[BACK]Return to intr.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / x86 / x86

Annotation of src/sys/arch/x86/x86/intr.c, Revision 1.130

1.130   ! cherry      1: /*     $NetBSD: intr.c,v 1.129 2018/09/14 01:50:51 mrg Exp $   */
1.29      ad          2:
1.117     maxv        3: /*
1.61      ad          4:  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
1.29      ad          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
1.1       fvdl       31:
                     32: /*
                     33:  * Copyright 2002 (c) Wasabi Systems, Inc.
                     34:  * All rights reserved.
                     35:  *
                     36:  * Written by Frank van der Linden for Wasabi Systems, Inc.
                     37:  *
                     38:  * Redistribution and use in source and binary forms, with or without
                     39:  * modification, are permitted provided that the following conditions
                     40:  * are met:
                     41:  * 1. Redistributions of source code must retain the above copyright
                     42:  *    notice, this list of conditions and the following disclaimer.
                     43:  * 2. Redistributions in binary form must reproduce the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer in the
                     45:  *    documentation and/or other materials provided with the distribution.
                     46:  * 3. All advertising materials mentioning features or use of this software
                     47:  *    must display the following acknowledgement:
                     48:  *      This product includes software developed for the NetBSD Project by
                     49:  *      Wasabi Systems, Inc.
                     50:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     51:  *    or promote products derived from this software without specific prior
                     52:  *    written permission.
                     53:  *
                     54:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     55:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     56:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     57:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     58:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     59:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     60:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     61:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     62:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     63:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     64:  * POSSIBILITY OF SUCH DAMAGE.
                     65:  */
1.4       lukem      66:
1.17      mycroft    67: /*-
                     68:  * Copyright (c) 1991 The Regents of the University of California.
                     69:  * All rights reserved.
                     70:  *
                     71:  * This code is derived from software contributed to Berkeley by
                     72:  * William Jolitz.
                     73:  *
                     74:  * Redistribution and use in source and binary forms, with or without
                     75:  * modification, are permitted provided that the following conditions
                     76:  * are met:
                     77:  * 1. Redistributions of source code must retain the above copyright
                     78:  *    notice, this list of conditions and the following disclaimer.
                     79:  * 2. Redistributions in binary form must reproduce the above copyright
                     80:  *    notice, this list of conditions and the following disclaimer in the
                     81:  *    documentation and/or other materials provided with the distribution.
1.18      wennmach   82:  * 3. Neither the name of the University nor the names of its contributors
                     83:  *    may be used to endorse or promote products derived from this software
                     84:  *    without specific prior written permission.
                     85:  *
                     86:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     87:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     88:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     89:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     90:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     91:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     92:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     93:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     94:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     95:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     96:  * SUCH DAMAGE.
                     97:  *
                     98:  *     @(#)isa.c       7.2 (Berkeley) 5/13/91
                     99:  */
                    100:
                    101: /*-
                    102:  * Copyright (c) 1993, 1994 Charles Hannum.
                    103:  *
                    104:  * Redistribution and use in source and binary forms, with or without
                    105:  * modification, are permitted provided that the following conditions
                    106:  * are met:
                    107:  * 1. Redistributions of source code must retain the above copyright
                    108:  *    notice, this list of conditions and the following disclaimer.
                    109:  * 2. Redistributions in binary form must reproduce the above copyright
                    110:  *    notice, this list of conditions and the following disclaimer in the
                    111:  *    documentation and/or other materials provided with the distribution.
1.17      mycroft   112:  * 3. All advertising materials mentioning features or use of this software
                    113:  *    must display the following acknowledgement:
                    114:  *     This product includes software developed by the University of
                    115:  *     California, Berkeley and its contributors.
                    116:  * 4. Neither the name of the University nor the names of its contributors
                    117:  *    may be used to endorse or promote products derived from this software
                    118:  *    without specific prior written permission.
                    119:  *
                    120:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                    121:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                    122:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                    123:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                    124:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                    125:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                    126:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                    127:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                    128:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                    129:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                    130:  * SUCH DAMAGE.
                    131:  *
                    132:  *     @(#)isa.c       7.2 (Berkeley) 5/13/91
                    133:  */
                    134:
1.4       lukem     135: #include <sys/cdefs.h>
1.130   ! cherry    136: __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.129 2018/09/14 01:50:51 mrg Exp $");
1.1       fvdl      137:
1.60      dyoung    138: #include "opt_intrdebug.h"
1.1       fvdl      139: #include "opt_multiprocessor.h"
1.23      christos  140: #include "opt_acpi.h"
1.1       fvdl      141:
1.26      hubertf   142: #include <sys/param.h>
1.1       fvdl      143: #include <sys/systm.h>
                    144: #include <sys/kernel.h>
                    145: #include <sys/syslog.h>
                    146: #include <sys/device.h>
1.59      yamt      147: #include <sys/kmem.h>
1.1       fvdl      148: #include <sys/proc.h>
                    149: #include <sys/errno.h>
1.36      ad        150: #include <sys/intr.h>
                    151: #include <sys/cpu.h>
                    152: #include <sys/atomic.h>
1.59      yamt      153: #include <sys/xcall.h>
1.87      knakahar  154: #include <sys/interrupt.h>
1.99      knakahar  155: #include <sys/reboot.h> /* for AB_VERBOSE */
1.87      knakahar  156:
                    157: #include <sys/kauth.h>
                    158: #include <sys/conf.h>
1.1       fvdl      159:
1.25      yamt      160: #include <uvm/uvm_extern.h>
                    161:
1.1       fvdl      162: #include <machine/i8259.h>
                    163: #include <machine/pio.h>
                    164:
                    165: #include "ioapic.h"
                    166: #include "lapic.h"
1.8       fvdl      167: #include "pci.h"
1.65      jmcneill  168: #include "acpica.h"
1.1       fvdl      169:
1.65      jmcneill  170: #if NIOAPIC > 0 || NACPICA > 0
1.117     maxv      171: #include <machine/i82093var.h>
1.1       fvdl      172: #include <machine/mpbiosvar.h>
1.23      christos  173: #include <machine/mpacpi.h>
1.1       fvdl      174: #endif
                    175:
                    176: #if NLAPIC > 0
                    177: #include <machine/i82489var.h>
                    178: #endif
                    179:
1.8       fvdl      180: #if NPCI > 0
                    181: #include <dev/pci/ppbreg.h>
                    182: #endif
                    183:
1.81      knakahar  184: #include <x86/pci/msipic.h>
1.85      knakahar  185: #include <x86/pci/pci_msi_machdep.h>
1.81      knakahar  186:
1.83      martin    187: #if NPCI == 0
                    188: #define msipic_is_msi_pic(PIC) (false)
                    189: #endif
                    190:
1.106     cherry    191: #if defined(XEN) /* XXX: Cleanup */
                    192: #include <xen/xen.h>
                    193: #include <xen/hypervisor.h>
                    194: #include <xen/evtchn.h>
                    195: #include <xen/xenfunc.h>
                    196: #endif /* XEN */
                    197:
1.69      cegger    198: #ifdef DDB
                    199: #include <ddb/db_output.h>
                    200: #endif
                    201:
1.79      knakahar  202: #ifdef INTRDEBUG
                    203: #define DPRINTF(msg) printf msg
                    204: #else
                    205: #define DPRINTF(msg)
                    206: #endif
                    207:
1.1       fvdl      208: struct pic softintr_pic = {
1.56      drochner  209:        .pic_name = "softintr_fakepic",
1.15      kochi     210:        .pic_type = PIC_SOFT,
1.23      christos  211:        .pic_vecbase = 0,
                    212:        .pic_apicid = 0,
1.15      kochi     213:        .pic_lock = __SIMPLELOCK_UNLOCKED,
1.1       fvdl      214: };
                    215:
1.78      knakahar  216: static void intr_calculatemasks(struct cpu_info *);
                    217:
1.79      knakahar  218: static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources =
                    219:        SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources);
                    220:
1.95      knakahar  221: static kmutex_t intr_distribute_lock;
                    222:
1.65      jmcneill  223: #if NIOAPIC > 0 || NACPICA > 0
1.80      knakahar  224: static int intr_scan_bus(int, int, intr_handle_t *);
1.11      fvdl      225: #if NPCI > 0
                    226: static int intr_find_pcibridge(int, pcitag_t *, pci_chipset_tag_t *);
                    227: #endif
1.10      fvdl      228: #endif
1.9       fvdl      229:
1.106     cherry    230: #if !defined(XEN)
1.79      knakahar  231: static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *,
                    232:                                  struct intrsource *);
1.78      knakahar  233: static int __noinline intr_allocate_slot(struct pic *, int, int,
1.79      knakahar  234:                                         struct cpu_info **, int *, int *,
                    235:                                         struct intrsource *);
1.78      knakahar  236:
                    237: static void intr_source_free(struct cpu_info *, int, struct pic *, int);
                    238:
                    239: static void intr_establish_xcall(void *, void *);
                    240: static void intr_disestablish_xcall(void *, void *);
1.126     jdolecek  241: #endif
1.78      knakahar  242:
1.87      knakahar  243: static const char *legacy_intr_string(int, char *, size_t, struct pic *);
1.126     jdolecek  244:
1.106     cherry    245: #if defined(XEN) /* XXX: nuke conditional after integration */
                    246: static const char *xen_intr_string(int, char *, size_t, struct pic *);
                    247: #endif /* XXX: XEN */
1.87      knakahar  248:
1.122     roy       249: #if defined(INTRSTACKSIZE)
1.78      knakahar  250: static inline bool redzone_const_or_false(bool);
                    251: static inline int redzone_const_or_zero(int);
1.120     roy       252: #endif
1.78      knakahar  253:
                    254: static void intr_redistribute_xc_t(void *, void *);
                    255: static void intr_redistribute_xc_s1(void *, void *);
                    256: static void intr_redistribute_xc_s2(void *, void *);
                    257: static bool intr_redistribute(struct cpu_info *);
                    258:
1.81      knakahar  259: static struct intrsource *intr_get_io_intrsource(const char *);
                    260: static void intr_free_io_intrsource_direct(struct intrsource *);
1.106     cherry    261: #if !defined(XEN)
1.81      knakahar  262: static int intr_num_handlers(struct intrsource *);
                    263:
1.79      knakahar  264: static int intr_find_unused_slot(struct cpu_info *, int *);
                    265: static void intr_activate_xcall(void *, void *);
                    266: static void intr_deactivate_xcall(void *, void *);
                    267: static void intr_get_affinity(struct intrsource *, kcpuset_t *);
                    268: static int intr_set_affinity(struct intrsource *, const kcpuset_t *);
1.106     cherry    269: #endif /* XEN */
1.79      knakahar  270:
1.1       fvdl      271: /*
                    272:  * Fill in default interrupt table (in case of spurious interrupt
                    273:  * during configuration of kernel), setup interrupt control unit
                    274:  */
                    275: void
                    276: intr_default_setup(void)
                    277: {
1.106     cherry    278: #if !defined(XEN)
1.1       fvdl      279:        int i;
                    280:
                    281:        /* icu vectors */
                    282:        for (i = 0; i < NUM_LEGACY_IRQS; i++) {
1.38      yamt      283:                idt_vec_reserve(ICU_OFFSET + i);
1.123     maxv      284:                idt_vec_set(ICU_OFFSET + i, legacy_stubs[i].ist_entry);
1.1       fvdl      285:        }
                    286:
                    287:        /*
                    288:         * Eventually might want to check if it's actually there.
                    289:         */
                    290:        i8259_default_setup();
1.95      knakahar  291:
1.106     cherry    292: #else
                    293:        events_default_setup();
                    294: #endif /* !XEN */
1.95      knakahar  295:        mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE);
1.1       fvdl      296: }
                    297:
                    298: /*
                    299:  * Handle a NMI, possibly a machine check.
                    300:  * return true to panic system, false to ignore.
                    301:  */
1.71      dyoung    302: void
1.1       fvdl      303: x86_nmi(void)
                    304: {
1.61      ad        305:
1.1       fvdl      306:        log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
                    307: }
                    308:
                    309: /*
                    310:  * Recalculate the interrupt masks from scratch.
1.61      ad        311:  * During early boot, anything goes and we are always called on the BP.
                    312:  * When the system is up and running:
                    313:  *
                    314:  * => called with ci == curcpu()
                    315:  * => cpu_lock held by the initiator
                    316:  * => interrupts disabled on-chip (PSL_I)
                    317:  *
                    318:  * Do not call printf(), kmem_free() or other "heavyweight" routines
                    319:  * from here.  This routine must be quick and must not block.
1.1       fvdl      320:  */
1.49      joerg     321: static void
1.1       fvdl      322: intr_calculatemasks(struct cpu_info *ci)
                    323: {
                    324:        int irq, level, unusedirqs, intrlevel[MAX_INTR_SOURCES];
                    325:        struct intrhand *q;
                    326:
                    327:        /* First, figure out which levels each IRQ uses. */
                    328:        unusedirqs = 0xffffffff;
                    329:        for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
                    330:                int levels = 0;
                    331:
                    332:                if (ci->ci_isources[irq] == NULL) {
                    333:                        intrlevel[irq] = 0;
                    334:                        continue;
                    335:                }
                    336:                for (q = ci->ci_isources[irq]->is_handlers; q; q = q->ih_next)
1.127     kamil     337:                        levels |= 1U << q->ih_level;
1.1       fvdl      338:                intrlevel[irq] = levels;
                    339:                if (levels)
1.127     kamil     340:                        unusedirqs &= ~(1U << irq);
1.1       fvdl      341:        }
                    342:
                    343:        /* Then figure out which IRQs use each level. */
                    344:        for (level = 0; level < NIPL; level++) {
                    345:                int irqs = 0;
                    346:                for (irq = 0; irq < MAX_INTR_SOURCES; irq++)
1.127     kamil     347:                        if (intrlevel[irq] & (1U << level))
                    348:                                irqs |= 1U << irq;
1.1       fvdl      349:                ci->ci_imask[level] = irqs | unusedirqs;
                    350:        }
                    351:
                    352:        for (level = 0; level<(NIPL-1); level++)
                    353:                ci->ci_imask[level+1] |= ci->ci_imask[level];
                    354:
                    355:        for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
                    356:                int maxlevel = IPL_NONE;
                    357:                int minlevel = IPL_HIGH;
                    358:
                    359:                if (ci->ci_isources[irq] == NULL)
                    360:                        continue;
                    361:                for (q = ci->ci_isources[irq]->is_handlers; q;
                    362:                     q = q->ih_next) {
                    363:                        if (q->ih_level < minlevel)
                    364:                                minlevel = q->ih_level;
                    365:                        if (q->ih_level > maxlevel)
                    366:                                maxlevel = q->ih_level;
                    367:                }
                    368:                ci->ci_isources[irq]->is_maxlevel = maxlevel;
                    369:                ci->ci_isources[irq]->is_minlevel = minlevel;
                    370:        }
                    371:
                    372:        for (level = 0; level < NIPL; level++)
                    373:                ci->ci_iunmask[level] = ~ci->ci_imask[level];
                    374: }
                    375:
1.11      fvdl      376: /*
                    377:  * List to keep track of PCI buses that are probed but not known
1.117     maxv      378:  * to the firmware. Used to
1.11      fvdl      379:  *
                    380:  * XXX should maintain one list, not an array and a linked list.
                    381:  */
1.65      jmcneill  382: #if (NPCI > 0) && ((NIOAPIC > 0) || NACPICA > 0)
1.11      fvdl      383: struct intr_extra_bus {
                    384:        int bus;
                    385:        pcitag_t *pci_bridge_tag;
                    386:        pci_chipset_tag_t pci_chipset_tag;
                    387:        LIST_ENTRY(intr_extra_bus) list;
                    388: };
                    389:
                    390: LIST_HEAD(, intr_extra_bus) intr_extra_buses =
                    391:     LIST_HEAD_INITIALIZER(intr_extra_buses);
                    392:
                    393:
                    394: void
                    395: intr_add_pcibus(struct pcibus_attach_args *pba)
                    396: {
                    397:        struct intr_extra_bus *iebp;
                    398:
1.61      ad        399:        iebp = kmem_alloc(sizeof(*iebp), KM_SLEEP);
1.11      fvdl      400:        iebp->bus = pba->pba_bus;
                    401:        iebp->pci_chipset_tag = pba->pba_pc;
1.13      fvdl      402:        iebp->pci_bridge_tag = pba->pba_bridgetag;
1.11      fvdl      403:        LIST_INSERT_HEAD(&intr_extra_buses, iebp, list);
                    404: }
                    405:
                    406: static int
                    407: intr_find_pcibridge(int bus, pcitag_t *pci_bridge_tag,
1.67      dyoung    408:                    pci_chipset_tag_t *pc)
1.11      fvdl      409: {
                    410:        struct intr_extra_bus *iebp;
                    411:        struct mp_bus *mpb;
                    412:
                    413:        if (bus < 0)
                    414:                return ENOENT;
                    415:
                    416:        if (bus < mp_nbus) {
                    417:                mpb = &mp_busses[bus];
                    418:                if (mpb->mb_pci_bridge_tag == NULL)
                    419:                        return ENOENT;
                    420:                *pci_bridge_tag = *mpb->mb_pci_bridge_tag;
1.67      dyoung    421:                *pc = mpb->mb_pci_chipset_tag;
1.11      fvdl      422:                return 0;
                    423:        }
                    424:
                    425:        LIST_FOREACH(iebp, &intr_extra_buses, list) {
                    426:                if (iebp->bus == bus) {
1.14      yamt      427:                        if (iebp->pci_bridge_tag == NULL)
                    428:                                return ENOENT;
1.11      fvdl      429:                        *pci_bridge_tag = *iebp->pci_bridge_tag;
1.67      dyoung    430:                        *pc = iebp->pci_chipset_tag;
1.11      fvdl      431:                        return 0;
                    432:                }
                    433:        }
                    434:        return ENOENT;
                    435: }
                    436: #endif
                    437:
1.65      jmcneill  438: #if NIOAPIC > 0 || NACPICA > 0
1.73      yamt      439: /*
1.74      yamt      440:  * 'pin' argument pci bus_pin encoding of a device/pin combination.
1.73      yamt      441:  */
1.1       fvdl      442: int
1.80      knakahar  443: intr_find_mpmapping(int bus, int pin, intr_handle_t *handle)
1.1       fvdl      444: {
                    445:
1.11      fvdl      446: #if NPCI > 0
                    447:        while (intr_scan_bus(bus, pin, handle) != 0) {
1.73      yamt      448:                int dev, func;
                    449:                pcitag_t pci_bridge_tag;
                    450:                pci_chipset_tag_t pc;
                    451:
                    452:                if (intr_find_pcibridge(bus, &pci_bridge_tag, &pc) != 0)
1.8       fvdl      453:                        return ENOENT;
1.11      fvdl      454:                dev = pin >> 2;
                    455:                pin = pin & 3;
1.16      kochi     456:                pin = PPB_INTERRUPT_SWIZZLE(pin + 1, dev) - 1;
1.73      yamt      457:                pci_decompose_tag(pc, pci_bridge_tag, &bus, &dev, &func);
1.9       fvdl      458:                pin |= (dev << 2);
                    459:        }
                    460:        return 0;
1.11      fvdl      461: #else
                    462:        return intr_scan_bus(bus, pin, handle);
                    463: #endif
1.9       fvdl      464: }
                    465:
                    466: static int
1.80      knakahar  467: intr_scan_bus(int bus, int pin, intr_handle_t *handle)
1.9       fvdl      468: {
1.11      fvdl      469:        struct mp_intr_map *mip, *intrs;
                    470:
                    471:        if (bus < 0 || bus >= mp_nbus)
                    472:                return ENOENT;
1.9       fvdl      473:
1.11      fvdl      474:        intrs = mp_busses[bus].mb_intrs;
1.9       fvdl      475:        if (intrs == NULL)
                    476:                return ENOENT;
                    477:
1.11      fvdl      478:        for (mip = intrs; mip != NULL; mip = mip->next) {
1.1       fvdl      479:                if (mip->bus_pin == pin) {
1.65      jmcneill  480: #if NACPICA > 0
1.23      christos  481:                        if (mip->linkdev != NULL)
                    482:                                if (mpacpi_findintr_linkdev(mip) != 0)
                    483:                                        continue;
                    484: #endif
1.1       fvdl      485:                        *handle = mip->ioapic_ih;
                    486:                        return 0;
                    487:                }
                    488:        }
                    489:        return ENOENT;
                    490: }
                    491: #endif
                    492:
1.79      knakahar  493: /*
                    494:  * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
                    495:  * by MI code and intrctl(8).
                    496:  */
1.114     knakahar  497: const char *
                    498: intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
1.79      knakahar  499: {
1.106     cherry    500:        int ih = 0;
1.79      knakahar  501:
1.83      martin    502: #if NPCI > 0
1.81      knakahar  503:        if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
                    504:                uint64_t pih;
                    505:                int dev, vec;
                    506:
                    507:                dev = msipic_get_devid(pic);
                    508:                vec = pin;
                    509:                pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
                    510:                        | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
                    511:                        | APIC_INT_VIA_MSI;
                    512:                if (pic->pic_type == PIC_MSI)
                    513:                        MSI_INT_MAKE_MSI(pih);
                    514:                else if (pic->pic_type == PIC_MSIX)
                    515:                        MSI_INT_MAKE_MSIX(pih);
                    516:
1.85      knakahar  517:                return x86_pci_msi_string(NULL, pih, buf, len);
1.81      knakahar  518:        }
1.83      martin    519: #endif
1.81      knakahar  520:
1.106     cherry    521: #if defined(XEN)
                    522:        evtchn_port_t port = pin; /* Port number */
                    523:
                    524:        if (pic->pic_type == PIC_XEN) {
                    525:                ih = pin;
                    526:                return xen_intr_string(port, buf, len, pic);
                    527:        }
                    528: #endif
                    529:
1.79      knakahar  530:        /*
                    531:         * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
                    532:         * is only used in intr_string() to show the irq number.
                    533:         * If the device is "legacy"(such as floppy), it should not use
                    534:         * intr_string().
                    535:         */
                    536:        if (pic->pic_type == PIC_I8259) {
                    537:                ih = legacy_irq;
                    538:                return legacy_intr_string(ih, buf, len, pic);
                    539:        }
1.81      knakahar  540:
1.106     cherry    541: #if NIOAPIC > 0 || NACPICA > 0
1.81      knakahar  542:        ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
                    543:            | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
                    544:        if (pic->pic_type == PIC_IOAPIC) {
                    545:                ih |= APIC_INT_VIA_APIC;
                    546:        }
                    547:        ih |= pin;
                    548:        return intr_string(ih, buf, len);
1.106     cherry    549: #endif
                    550:
                    551:        return NULL; /* No pic found! */
1.79      knakahar  552: }
                    553:
                    554: /*
                    555:  * Find intrsource from io_interrupt_sources list.
                    556:  */
                    557: static struct intrsource *
                    558: intr_get_io_intrsource(const char *intrid)
                    559: {
                    560:        struct intrsource *isp;
                    561:
                    562:        KASSERT(mutex_owned(&cpu_lock));
                    563:
                    564:        SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) {
                    565:                KASSERT(isp->is_intrid != NULL);
1.81      knakahar  566:                if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0)
1.79      knakahar  567:                        return isp;
                    568:        }
                    569:        return NULL;
                    570: }
                    571:
                    572: /*
                    573:  * Allocate intrsource and add to io_interrupt_sources list.
                    574:  */
                    575: struct intrsource *
                    576: intr_allocate_io_intrsource(const char *intrid)
                    577: {
                    578:        CPU_INFO_ITERATOR cii;
                    579:        struct cpu_info *ci;
                    580:        struct intrsource *isp;
                    581:        struct percpu_evcnt *pep;
                    582:
                    583:        KASSERT(mutex_owned(&cpu_lock));
                    584:
                    585:        if (intrid == NULL)
                    586:                return NULL;
                    587:
                    588:        isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
                    589:        pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP);
                    590:        isp->is_saved_evcnt = pep;
                    591:        for (CPU_INFO_FOREACH(cii, ci)) {
                    592:                pep->cpuid = ci->ci_cpuid;
                    593:                pep++;
                    594:        }
1.84      christos  595:        strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid));
1.79      knakahar  596:
                    597:        SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list);
                    598:
                    599:        return isp;
                    600: }
                    601:
                    602: /*
                    603:  * Remove from io_interrupt_sources list and free by the intrsource pointer.
                    604:  */
                    605: static void
                    606: intr_free_io_intrsource_direct(struct intrsource *isp)
                    607: {
                    608:        KASSERT(mutex_owned(&cpu_lock));
                    609:
                    610:        SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list);
                    611:
                    612:        /* Is this interrupt established? */
1.82      roy       613:        if (isp->is_evname[0] != '\0')
1.79      knakahar  614:                evcnt_detach(&isp->is_evcnt);
                    615:
                    616:        kmem_free(isp->is_saved_evcnt,
                    617:            sizeof(*(isp->is_saved_evcnt)) * ncpu);
1.81      knakahar  618:
1.79      knakahar  619:        kmem_free(isp, sizeof(*isp));
                    620: }
                    621:
                    622: /*
                    623:  * Remove from io_interrupt_sources list and free by the interrupt id.
                    624:  * This function can be used by MI code.
                    625:  */
                    626: void
                    627: intr_free_io_intrsource(const char *intrid)
                    628: {
                    629:        struct intrsource *isp;
                    630:
                    631:        KASSERT(mutex_owned(&cpu_lock));
                    632:
                    633:        if (intrid == NULL)
                    634:                return;
                    635:
                    636:        if ((isp = intr_get_io_intrsource(intrid)) == NULL) {
                    637:                return;
                    638:        }
                    639:
1.92      hannken   640:        /* If the interrupt uses shared IRQ, don't free yet. */
                    641:        if (isp->is_handlers != NULL) {
                    642:                return;
                    643:        }
                    644:
1.79      knakahar  645:        intr_free_io_intrsource_direct(isp);
                    646: }
                    647:
1.106     cherry    648: #if !defined(XEN)
1.49      joerg     649: static int
1.1       fvdl      650: intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
1.79      knakahar  651:                       int *index, struct intrsource *chained)
1.1       fvdl      652: {
1.53      joerg     653:        int slot, i;
1.1       fvdl      654:        struct intrsource *isp;
                    655:
1.61      ad        656:        KASSERT(mutex_owned(&cpu_lock));
                    657:
1.53      joerg     658:        if (pic == &i8259_pic) {
1.61      ad        659:                KASSERT(CPU_IS_PRIMARY(ci));
1.53      joerg     660:                slot = pin;
                    661:        } else {
1.81      knakahar  662:                int start = 0;
1.53      joerg     663:                slot = -1;
1.1       fvdl      664:
1.81      knakahar  665:                /* avoid reserved slots for legacy interrupts. */
                    666:                if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic))
                    667:                        start = NUM_LEGACY_IRQS;
1.53      joerg     668:                /*
                    669:                 * intr_allocate_slot has checked for an existing mapping.
                    670:                 * Now look for a free slot.
                    671:                 */
1.81      knakahar  672:                for (i = start; i < MAX_INTR_SOURCES ; i++) {
1.53      joerg     673:                        if (ci->ci_isources[i] == NULL) {
                    674:                                slot = i;
                    675:                                break;
                    676:                        }
1.1       fvdl      677:                }
1.53      joerg     678:                if (slot == -1) {
                    679:                        return EBUSY;
1.1       fvdl      680:                }
                    681:        }
                    682:
                    683:        isp = ci->ci_isources[slot];
                    684:        if (isp == NULL) {
1.81      knakahar  685:                const char *via;
                    686:
1.79      knakahar  687:                isp = chained;
                    688:                KASSERT(isp != NULL);
1.81      knakahar  689:                if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX)
                    690:                        via = "vec";
                    691:                else
                    692:                        via = "pin";
1.1       fvdl      693:                snprintf(isp->is_evname, sizeof (isp->is_evname),
1.81      knakahar  694:                    "%s %d", via, pin);
1.1       fvdl      695:                evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL,
1.56      drochner  696:                    pic->pic_name, isp->is_evname);
1.79      knakahar  697:                isp->is_active_cpu = ci->ci_cpuid;
1.1       fvdl      698:                ci->ci_isources[slot] = isp;
                    699:        }
                    700:
                    701:        *index = slot;
                    702:        return 0;
                    703: }
                    704:
                    705: /*
                    706:  * A simple round-robin allocator to assign interrupts to CPUs.
                    707:  */
1.61      ad        708: static int __noinline
1.53      joerg     709: intr_allocate_slot(struct pic *pic, int pin, int level,
1.79      knakahar  710:                   struct cpu_info **cip, int *index, int *idt_slot,
                    711:                   struct intrsource *chained)
1.1       fvdl      712: {
                    713:        CPU_INFO_ITERATOR cii;
1.61      ad        714:        struct cpu_info *ci, *lci;
1.1       fvdl      715:        struct intrsource *isp;
1.64      mrg       716:        int slot = 0, idtvec, error;
1.1       fvdl      717:
1.61      ad        718:        KASSERT(mutex_owned(&cpu_lock));
                    719:
1.53      joerg     720:        /* First check if this pin is already used by an interrupt vector. */
                    721:        for (CPU_INFO_FOREACH(cii, ci)) {
1.7       skd       722:                for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) {
1.61      ad        723:                        if ((isp = ci->ci_isources[slot]) == NULL) {
1.53      joerg     724:                                continue;
1.61      ad        725:                        }
1.72      drochner  726:                        if (isp->is_pic == pic &&
                    727:                            pin != -1 && isp->is_pin == pin) {
1.53      joerg     728:                                *idt_slot = isp->is_idtvec;
                    729:                                *index = slot;
                    730:                                *cip = ci;
                    731:                                return 0;
1.52      ad        732:                        }
                    733:                }
1.53      joerg     734:        }
1.1       fvdl      735:
1.53      joerg     736:        /*
                    737:         * The pic/pin combination doesn't have an existing mapping.
1.61      ad        738:         * Find a slot for a new interrupt source.  For the i8259 case,
                    739:         * we always use reserved slots of the primary CPU.  Otherwise,
                    740:         * we make an attempt to balance the interrupt load.
1.53      joerg     741:         *
                    742:         * PIC and APIC usage are essentially exclusive, so the reservation
                    743:         * of the ISA slots is ignored when assigning IOAPIC slots.
                    744:         */
1.61      ad        745:        if (pic == &i8259_pic) {
                    746:                /*
                    747:                 * Must be directed to BP.
                    748:                 */
                    749:                ci = &cpu_info_primary;
1.79      knakahar  750:                error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained);
1.61      ad        751:        } else {
1.1       fvdl      752:                /*
1.61      ad        753:                 * Find least loaded AP/BP and try to allocate there.
1.1       fvdl      754:                 */
1.61      ad        755:                ci = NULL;
                    756:                for (CPU_INFO_FOREACH(cii, lci)) {
                    757:                        if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1.1       fvdl      758:                                continue;
1.61      ad        759:                        }
1.62      ad        760: #if 0
1.61      ad        761:                        if (ci == NULL ||
                    762:                            ci->ci_nintrhand > lci->ci_nintrhand) {
                    763:                                ci = lci;
                    764:                        }
1.62      ad        765: #else
                    766:                        ci = &cpu_info_primary;
                    767: #endif
1.61      ad        768:                }
                    769:                KASSERT(ci != NULL);
1.79      knakahar  770:                error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained);
1.61      ad        771:
                    772:                /*
                    773:                 * If that did not work, allocate anywhere.
                    774:                 */
                    775:                if (error != 0) {
                    776:                        for (CPU_INFO_FOREACH(cii, ci)) {
                    777:                                if ((ci->ci_schedstate.spc_flags &
                    778:                                    SPCF_NOINTR) != 0) {
                    779:                                        continue;
                    780:                                }
                    781:                                error = intr_allocate_slot_cpu(ci, pic,
1.79      knakahar  782:                                    pin, &slot, chained);
1.61      ad        783:                                if (error == 0) {
                    784:                                        break;
                    785:                                }
                    786:                        }
1.1       fvdl      787:                }
1.53      joerg     788:        }
1.61      ad        789:        if (error != 0) {
                    790:                return error;
                    791:        }
                    792:        KASSERT(ci != NULL);
1.53      joerg     793:
1.117     maxv      794:        /*
1.61      ad        795:         * Now allocate an IDT vector.
                    796:         * For the 8259 these are reserved up front.
                    797:         */
                    798:        if (pic == &i8259_pic) {
1.53      joerg     799:                idtvec = ICU_OFFSET + pin;
1.61      ad        800:        } else {
1.53      joerg     801:                idtvec = idt_vec_alloc(APIC_LEVEL(level), IDT_INTR_HIGH);
1.61      ad        802:        }
1.53      joerg     803:        if (idtvec == 0) {
                    804:                evcnt_detach(&ci->ci_isources[slot]->is_evcnt);
                    805:                ci->ci_isources[slot] = NULL;
1.52      ad        806:                return EBUSY;
1.50      joerg     807:        }
1.53      joerg     808:        ci->ci_isources[slot]->is_idtvec = idtvec;
1.1       fvdl      809:        *idt_slot = idtvec;
                    810:        *index = slot;
                    811:        *cip = ci;
                    812:        return 0;
                    813: }
                    814:
1.61      ad        815: static void
                    816: intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec)
                    817: {
                    818:        struct intrsource *isp;
                    819:
                    820:        isp = ci->ci_isources[slot];
                    821:
                    822:        if (isp->is_handlers != NULL)
                    823:                return;
                    824:        ci->ci_isources[slot] = NULL;
                    825:        if (pic != &i8259_pic)
                    826:                idt_vec_free(idtvec);
                    827: }
                    828:
1.19      yamt      829: #ifdef MULTIPROCESSOR
                    830: static int intr_biglock_wrapper(void *);
                    831:
                    832: /*
                    833:  * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
                    834:  */
                    835:
                    836: static int
                    837: intr_biglock_wrapper(void *vp)
                    838: {
                    839:        struct intrhand *ih = vp;
                    840:        int ret;
                    841:
1.27      ad        842:        KERNEL_LOCK(1, NULL);
1.19      yamt      843:
                    844:        ret = (*ih->ih_realfun)(ih->ih_realarg);
                    845:
1.27      ad        846:        KERNEL_UNLOCK_ONE(NULL);
1.19      yamt      847:
                    848:        return ret;
                    849: }
                    850: #endif /* MULTIPROCESSOR */
1.106     cherry    851: #endif /* XEN */
1.19      yamt      852:
1.106     cherry    853: #if defined(DOM0OPS) || !defined(XEN)
1.23      christos  854: struct pic *
                    855: intr_findpic(int num)
                    856: {
                    857: #if NIOAPIC > 0
1.56      drochner  858:        struct ioapic_softc *pic;
1.23      christos  859:
1.56      drochner  860:        pic = ioapic_find_bybase(num);
1.23      christos  861:        if (pic != NULL)
1.56      drochner  862:                return &pic->sc_pic;
1.23      christos  863: #endif
                    864:        if (num < NUM_LEGACY_IRQS)
                    865:                return &i8259_pic;
                    866:
                    867:        return NULL;
                    868: }
1.106     cherry    869: #endif
1.126     jdolecek  870:
1.106     cherry    871: #if !defined(XEN)
1.61      ad        872: /*
1.87      knakahar  873:  * Append device name to intrsource. If device A and device B share IRQ number,
                    874:  * the device name of the interrupt id is "device A, device B".
                    875:  */
                    876: static void
                    877: intr_append_intrsource_xname(struct intrsource *isp, const char *xname)
                    878: {
                    879:
                    880:        if (isp->is_xname[0] != '\0')
                    881:                strlcat(isp->is_xname, ", ", sizeof(isp->is_xname));
                    882:        strlcat(isp->is_xname, xname, sizeof(isp->is_xname));
                    883: }
                    884:
                    885: /*
1.61      ad        886:  * Handle per-CPU component of interrupt establish.
                    887:  *
                    888:  * => caller (on initiating CPU) holds cpu_lock on our behalf
                    889:  * => arg1: struct intrhand *ih
                    890:  * => arg2: int idt_vec
                    891:  */
                    892: static void
                    893: intr_establish_xcall(void *arg1, void *arg2)
                    894: {
                    895:        struct intrsource *source;
                    896:        struct intrstub *stubp;
                    897:        struct intrhand *ih;
                    898:        struct cpu_info *ci;
                    899:        int idt_vec;
                    900:        u_long psl;
                    901:
                    902:        ih = arg1;
                    903:
                    904:        KASSERT(ih->ih_cpu == curcpu() || !mp_online);
                    905:
                    906:        ci = ih->ih_cpu;
                    907:        source = ci->ci_isources[ih->ih_slot];
                    908:        idt_vec = (int)(intptr_t)arg2;
                    909:
                    910:        /* Disable interrupts locally. */
                    911:        psl = x86_read_psl();
                    912:        x86_disable_intr();
                    913:
                    914:        /* Link in the handler and re-calculate masks. */
                    915:        *(ih->ih_prevp) = ih;
                    916:        intr_calculatemasks(ci);
                    917:
                    918:        /* Hook in new IDT vector and SPL state. */
                    919:        if (source->is_resume == NULL || source->is_idtvec != idt_vec) {
                    920:                if (source->is_idtvec != 0 && source->is_idtvec != idt_vec)
                    921:                        idt_vec_free(source->is_idtvec);
                    922:                source->is_idtvec = idt_vec;
                    923:                if (source->is_type == IST_LEVEL) {
                    924:                        stubp = &source->is_pic->pic_level_stubs[ih->ih_slot];
                    925:                } else {
                    926:                        stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot];
                    927:                }
                    928:                source->is_resume = stubp->ist_resume;
                    929:                source->is_recurse = stubp->ist_recurse;
1.102     maxv      930:                idt_vec_set(idt_vec, stubp->ist_entry);
1.61      ad        931:        }
                    932:
                    933:        /* Re-enable interrupts locally. */
                    934:        x86_write_psl(psl);
                    935: }
                    936:
1.1       fvdl      937: void *
1.87      knakahar  938: intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type,
                    939:                     int level, int (*handler)(void *), void *arg,
                    940:                     bool known_mpsafe, const char *xname)
1.1       fvdl      941: {
                    942:        struct intrhand **p, *q, *ih;
                    943:        struct cpu_info *ci;
                    944:        int slot, error, idt_vec;
1.79      knakahar  945:        struct intrsource *chained, *source;
1.19      yamt      946: #ifdef MULTIPROCESSOR
1.55      ad        947:        bool mpsafe = (known_mpsafe || level != IPL_VM);
1.19      yamt      948: #endif /* MULTIPROCESSOR */
1.61      ad        949:        uint64_t where;
1.79      knakahar  950:        const char *intrstr;
                    951:        char intrstr_buf[INTRIDBUF];
1.1       fvdl      952:
1.97      riastrad  953:        KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)),
                    954:            "bad legacy IRQ value: %d", legacy_irq);
                    955:        KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic),
                    956:            "non-legacy IRQ on i8259");
1.1       fvdl      957:
1.61      ad        958:        ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
1.114     knakahar  959:        intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
1.79      knakahar  960:            sizeof(intrstr_buf));
                    961:        KASSERT(intrstr != NULL);
                    962:
1.61      ad        963:        mutex_enter(&cpu_lock);
1.79      knakahar  964:
                    965:        /* allocate intrsource pool, if not yet. */
                    966:        chained = intr_get_io_intrsource(intrstr);
                    967:        if (chained == NULL) {
1.81      knakahar  968:                if (msipic_is_msi_pic(pic)) {
                    969:                        mutex_exit(&cpu_lock);
1.96      maxv      970:                        kmem_free(ih, sizeof(*ih));
1.81      knakahar  971:                        printf("%s: %s has no intrsource\n", __func__, intrstr);
                    972:                        return NULL;
                    973:                }
1.79      knakahar  974:                chained = intr_allocate_io_intrsource(intrstr);
                    975:                if (chained == NULL) {
                    976:                        mutex_exit(&cpu_lock);
1.96      maxv      977:                        kmem_free(ih, sizeof(*ih));
1.79      knakahar  978:                        printf("%s: can't allocate io_intersource\n", __func__);
                    979:                        return NULL;
                    980:                }
                    981:        }
                    982:
                    983:        error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec,
                    984:            chained);
1.1       fvdl      985:        if (error != 0) {
1.79      knakahar  986:                intr_free_io_intrsource_direct(chained);
1.61      ad        987:                mutex_exit(&cpu_lock);
                    988:                kmem_free(ih, sizeof(*ih));
1.1       fvdl      989:                printf("failed to allocate interrupt slot for PIC %s pin %d\n",
1.56      drochner  990:                    pic->pic_name, pin);
1.1       fvdl      991:                return NULL;
                    992:        }
                    993:
                    994:        source = ci->ci_isources[slot];
                    995:
                    996:        if (source->is_handlers != NULL &&
                    997:            source->is_pic->pic_type != pic->pic_type) {
1.79      knakahar  998:                intr_free_io_intrsource_direct(chained);
1.61      ad        999:                mutex_exit(&cpu_lock);
                   1000:                kmem_free(ih, sizeof(*ih));
1.70      tsutsui  1001:                printf("%s: can't share intr source between "
1.1       fvdl     1002:                       "different PIC types (legacy_irq %d pin %d slot %d)\n",
1.70      tsutsui  1003:                    __func__, legacy_irq, pin, slot);
1.1       fvdl     1004:                return NULL;
                   1005:        }
                   1006:
                   1007:        source->is_pin = pin;
                   1008:        source->is_pic = pic;
1.87      knakahar 1009:        intr_append_intrsource_xname(source, xname);
1.1       fvdl     1010:        switch (source->is_type) {
                   1011:        case IST_NONE:
                   1012:                source->is_type = type;
                   1013:                break;
                   1014:        case IST_EDGE:
                   1015:        case IST_LEVEL:
                   1016:                if (source->is_type == type)
                   1017:                        break;
1.61      ad       1018:                /* FALLTHROUGH */
1.1       fvdl     1019:        case IST_PULSE:
                   1020:                if (type != IST_NONE) {
1.79      knakahar 1021:                        intr_source_free(ci, slot, pic, idt_vec);
                   1022:                        intr_free_io_intrsource_direct(chained);
1.61      ad       1023:                        mutex_exit(&cpu_lock);
                   1024:                        kmem_free(ih, sizeof(*ih));
1.70      tsutsui  1025:                        printf("%s: pic %s pin %d: can't share "
                   1026:                               "type %d with %d\n",
                   1027:                                __func__, pic->pic_name, pin,
1.1       fvdl     1028:                                source->is_type, type);
                   1029:                        return NULL;
                   1030:                }
                   1031:                break;
                   1032:        default:
1.70      tsutsui  1033:                panic("%s: bad intr type %d for pic %s pin %d\n",
                   1034:                    __func__, source->is_type, pic->pic_name, pin);
1.61      ad       1035:                /* NOTREACHED */
1.1       fvdl     1036:        }
                   1037:
1.117     maxv     1038:        /*
1.89      knakahar 1039:         * If the establishing interrupt uses shared IRQ, the interrupt uses
                   1040:         * "ci->ci_isources[slot]" instead of allocated by the establishing
                   1041:         * device's pci_intr_alloc() or this function.
                   1042:         */
                   1043:        if (source->is_handlers != NULL) {
1.91      hannken  1044:                struct intrsource *isp, *nisp;
1.89      knakahar 1045:
1.91      hannken  1046:                SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources,
                   1047:                    is_list, nisp) {
1.89      knakahar 1048:                        if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0
                   1049:                            && isp->is_handlers == NULL)
                   1050:                                intr_free_io_intrsource_direct(isp);
                   1051:                }
                   1052:        }
                   1053:
1.61      ad       1054:        /*
                   1055:         * We're now committed.  Mask the interrupt in hardware and
                   1056:         * count it for load distribution.
                   1057:         */
                   1058:        (*pic->pic_hwmask)(pic, pin);
                   1059:        (ci->ci_nintrhand)++;
1.1       fvdl     1060:
                   1061:        /*
                   1062:         * Figure out where to put the handler.
                   1063:         * This is O(N^2), but we want to preserve the order, and N is
                   1064:         * generally small.
                   1065:         */
                   1066:        for (p = &ci->ci_isources[slot]->is_handlers;
                   1067:             (q = *p) != NULL && q->ih_level > level;
1.61      ad       1068:             p = &q->ih_next) {
                   1069:                /* nothing */;
                   1070:        }
1.1       fvdl     1071:
1.19      yamt     1072:        ih->ih_fun = ih->ih_realfun = handler;
                   1073:        ih->ih_arg = ih->ih_realarg = arg;
1.61      ad       1074:        ih->ih_prevp = p;
1.1       fvdl     1075:        ih->ih_next = *p;
                   1076:        ih->ih_level = level;
                   1077:        ih->ih_pin = pin;
                   1078:        ih->ih_cpu = ci;
                   1079:        ih->ih_slot = slot;
1.19      yamt     1080: #ifdef MULTIPROCESSOR
                   1081:        if (!mpsafe) {
                   1082:                ih->ih_fun = intr_biglock_wrapper;
                   1083:                ih->ih_arg = ih;
                   1084:        }
                   1085: #endif /* MULTIPROCESSOR */
1.1       fvdl     1086:
1.61      ad       1087:        /*
                   1088:         * Call out to the remote CPU to update its interrupt state.
                   1089:         * Only make RPCs if the APs are up and running.
                   1090:         */
                   1091:        if (ci == curcpu() || !mp_online) {
                   1092:                intr_establish_xcall(ih, (void *)(intptr_t)idt_vec);
                   1093:        } else {
                   1094:                where = xc_unicast(0, intr_establish_xcall, ih,
                   1095:                    (void *)(intptr_t)idt_vec, ci);
                   1096:                xc_wait(where);
1.1       fvdl     1097:        }
                   1098:
1.61      ad       1099:        /* All set up, so add a route for the interrupt and unmask it. */
                   1100:        (*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
                   1101:        (*pic->pic_hwunmask)(pic, pin);
                   1102:        mutex_exit(&cpu_lock);
1.1       fvdl     1103:
1.100     knakahar 1104:        if (bootverbose || cpu_index(ci) != 0)
1.99      knakahar 1105:                aprint_verbose("allocated pic %s type %s pin %d level %d to %s slot %d "
                   1106:                    "idt entry %d\n",
                   1107:                    pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, level,
                   1108:                    device_xname(ci->ci_dev), slot, idt_vec);
1.1       fvdl     1109:
                   1110:        return (ih);
                   1111: }
                   1112:
1.87      knakahar 1113: void *
                   1114: intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
                   1115:               int (*handler)(void *), void *arg, bool known_mpsafe)
                   1116: {
                   1117:
                   1118:        return intr_establish_xname(legacy_irq, pic, pin, type,
                   1119:            level, handler, arg, known_mpsafe, "unknown");
                   1120: }
                   1121:
1.1       fvdl     1122: /*
1.61      ad       1123:  * Called on bound CPU to handle intr_disestablish().
                   1124:  *
                   1125:  * => caller (on initiating CPU) holds cpu_lock on our behalf
                   1126:  * => arg1: struct intrhand *ih
                   1127:  * => arg2: unused
1.1       fvdl     1128:  */
1.61      ad       1129: static void
                   1130: intr_disestablish_xcall(void *arg1, void *arg2)
1.1       fvdl     1131: {
                   1132:        struct intrhand **p, *q;
                   1133:        struct cpu_info *ci;
                   1134:        struct pic *pic;
                   1135:        struct intrsource *source;
1.61      ad       1136:        struct intrhand *ih;
                   1137:        u_long psl;
1.1       fvdl     1138:        int idtvec;
                   1139:
1.61      ad       1140:        ih = arg1;
1.1       fvdl     1141:        ci = ih->ih_cpu;
1.61      ad       1142:
                   1143:        KASSERT(ci == curcpu() || !mp_online);
                   1144:
                   1145:        /* Disable interrupts locally. */
                   1146:        psl = x86_read_psl();
                   1147:        x86_disable_intr();
                   1148:
1.1       fvdl     1149:        pic = ci->ci_isources[ih->ih_slot]->is_pic;
                   1150:        source = ci->ci_isources[ih->ih_slot];
                   1151:        idtvec = source->is_idtvec;
                   1152:
1.117     maxv     1153:        (*pic->pic_hwmask)(pic, ih->ih_pin);
1.35      ad       1154:        atomic_and_32(&ci->ci_ipending, ~(1 << ih->ih_slot));
1.1       fvdl     1155:
                   1156:        /*
                   1157:         * Remove the handler from the chain.
                   1158:         */
                   1159:        for (p = &source->is_handlers; (q = *p) != NULL && q != ih;
                   1160:             p = &q->ih_next)
                   1161:                ;
                   1162:        if (q == NULL) {
1.61      ad       1163:                x86_write_psl(psl);
1.70      tsutsui  1164:                panic("%s: handler not registered", __func__);
1.61      ad       1165:                /* NOTREACHED */
1.1       fvdl     1166:        }
                   1167:
                   1168:        *p = q->ih_next;
                   1169:
                   1170:        intr_calculatemasks(ci);
1.86      msaitoh  1171:        /*
                   1172:         * If there is no any handler, 1) do delroute because it has no
                   1173:         * any source and 2) dont' hwunmask to prevent spurious interrupt.
                   1174:         *
                   1175:         * If there is any handler, 1) don't delroute because it has source
                   1176:         * and 2) do hwunmask to be able to get interrupt again.
                   1177:         *
                   1178:         */
                   1179:        if (source->is_handlers == NULL)
                   1180:                (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, source->is_type);
                   1181:        else
                   1182:                (*pic->pic_hwunmask)(pic, ih->ih_pin);
1.61      ad       1183:
                   1184:        /* Re-enable interrupts. */
                   1185:        x86_write_psl(psl);
                   1186:
                   1187:        /* If the source is free we can drop it now. */
                   1188:        intr_source_free(ci, ih->ih_slot, pic, idtvec);
1.1       fvdl     1189:
1.98      knakahar 1190:        DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n",
1.56      drochner 1191:            device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name,
1.98      knakahar 1192:            ih->ih_pin, idtvec));
1.61      ad       1193: }
1.1       fvdl     1194:
1.79      knakahar 1195: static int
                   1196: intr_num_handlers(struct intrsource *isp)
                   1197: {
                   1198:        struct intrhand *ih;
                   1199:        int num;
                   1200:
                   1201:        num = 0;
                   1202:        for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next)
                   1203:                num++;
                   1204:
                   1205:        return num;
                   1206: }
                   1207:
1.106     cherry   1208: #else /* XEN */
                   1209: void *
1.109     riastrad 1210: intr_establish(int legacy_irq, struct pic *pic, int pin,
1.111     riastrad 1211:     int type, int level, int (*handler)(void *), void *arg,
1.109     riastrad 1212:     bool known_mpsafe)
1.106     cherry   1213: {
1.109     riastrad 1214:
                   1215:        return intr_establish_xname(legacy_irq, pic, pin, type, level,
                   1216:            handler, arg, known_mpsafe, "XEN");
1.106     cherry   1217: }
                   1218:
                   1219: void *
1.109     riastrad 1220: intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
1.111     riastrad 1221:     int type, int level, int (*handler)(void *), void *arg,
1.109     riastrad 1222:     bool known_mpsafe, const char *xname)
1.106     cherry   1223: {
1.126     jdolecek 1224:        const char *intrstr;
                   1225:        char intrstr_buf[INTRIDBUF];
                   1226:
1.106     cherry   1227:        if (pic->pic_type == PIC_XEN) {
                   1228:                struct intrhand *rih;
1.109     riastrad 1229:
1.112     riastrad 1230:                /*
                   1231:                 * event_set_handler interprets `level != IPL_VM' to
                   1232:                 * mean MP-safe, so we require the caller to match that
                   1233:                 * for the moment.
                   1234:                 */
                   1235:                KASSERT(known_mpsafe == (level != IPL_VM));
                   1236:
1.126     jdolecek 1237:                intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
                   1238:                    sizeof(intrstr_buf));
                   1239:
                   1240:                event_set_handler(pin, handler, arg, level, intrstr, xname);
1.106     cherry   1241:
1.111     riastrad 1242:                rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
1.106     cherry   1243:                if (rih == NULL) {
                   1244:                        printf("%s: can't allocate handler info\n", __func__);
                   1245:                        return NULL;
                   1246:                }
                   1247:
                   1248:                /*
                   1249:                 * XXX:
                   1250:                 * This is just a copy for API conformance.
                   1251:                 * The real ih is lost in the innards of
                   1252:                 * event_set_handler(); where the details of
                   1253:                 * biglock_wrapper etc are taken care of.
                   1254:                 * All that goes away when we nuke event_set_handler()
                   1255:                 * et. al. and unify with x86/intr.c
                   1256:                 */
                   1257:                rih->ih_pin = pin; /* port */
1.119     bouyer   1258:                rih->ih_fun = rih->ih_realfun = handler;
                   1259:                rih->ih_arg = rih->ih_realarg = arg;
1.106     cherry   1260:                rih->pic_type = pic->pic_type;
                   1261:                return rih;
                   1262:        }       /* Else we assume pintr */
                   1263:
                   1264: #if NPCI > 0 || NISA > 0
                   1265:        struct pintrhand *pih;
1.113     bouyer   1266:        intr_handle_t irq;
1.106     cherry   1267:        int evtchn;
                   1268:
1.128     cherry   1269:        KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
1.107     riastrad 1270:            "bad legacy IRQ value: %d", legacy_irq);
                   1271:        KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
                   1272:            "non-legacy IRQon i8259 ");
                   1273:
1.113     bouyer   1274:        if (pic->pic_type != PIC_I8259) {
1.106     cherry   1275: #if NIOAPIC > 0
1.128     cherry   1276:                /* Are we passing mp tranmogrified/cascaded irqs ? */
                   1277:                irq = (legacy_irq == -1) ? 0 : legacy_irq;
                   1278:
1.106     cherry   1279:                /* will do interrupts via I/O APIC */
1.128     cherry   1280:                irq |= APIC_INT_VIA_APIC;
1.113     bouyer   1281:                irq |= pic->pic_apicid << APIC_INT_APIC_SHIFT;
                   1282:                irq |= pin << APIC_INT_PIN_SHIFT;
1.106     cherry   1283: #else /* NIOAPIC */
                   1284:                return NULL;
                   1285: #endif /* NIOAPIC */
1.111     riastrad 1286:        } else {
1.113     bouyer   1287:                irq = legacy_irq;
1.111     riastrad 1288:        }
1.106     cherry   1289:
1.126     jdolecek 1290:        intrstr = intr_create_intrid(irq, pic, pin, intrstr_buf,
                   1291:            sizeof(intrstr_buf));
                   1292:
1.113     bouyer   1293:        evtchn = xen_pirq_alloc(&irq, type);
1.130   ! cherry   1294:        irq = (legacy_irq == -1) ? irq : legacy_irq; /* ISA compat */
1.113     bouyer   1295:        pih = pirq_establish(irq & 0xff, evtchn, handler, arg, level,
1.126     jdolecek 1296:            intrstr, xname);
1.106     cherry   1297:        pih->pic_type = pic->pic_type;
                   1298:        return pih;
                   1299: #endif /* NPCI > 0 || NISA > 0 */
                   1300:
                   1301:        /* FALLTHROUGH */
                   1302:        return NULL;
                   1303: }
                   1304:
                   1305: #endif /* XEN */
                   1306:
1.61      ad       1307: /*
                   1308:  * Deregister an interrupt handler.
                   1309:  */
                   1310: void
                   1311: intr_disestablish(struct intrhand *ih)
                   1312: {
1.106     cherry   1313: #if !defined(XEN)
1.61      ad       1314:        struct cpu_info *ci;
1.79      knakahar 1315:        struct intrsource *isp;
1.61      ad       1316:        uint64_t where;
1.1       fvdl     1317:
1.61      ad       1318:        /*
                   1319:         * Count the removal for load balancing.
                   1320:         * Call out to the remote CPU to update its interrupt state.
                   1321:         * Only make RPCs if the APs are up and running.
                   1322:         */
                   1323:        mutex_enter(&cpu_lock);
                   1324:        ci = ih->ih_cpu;
                   1325:        (ci->ci_nintrhand)--;
                   1326:        KASSERT(ci->ci_nintrhand >= 0);
1.79      knakahar 1327:        isp = ci->ci_isources[ih->ih_slot];
1.61      ad       1328:        if (ci == curcpu() || !mp_online) {
                   1329:                intr_disestablish_xcall(ih, NULL);
                   1330:        } else {
                   1331:                where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci);
                   1332:                xc_wait(where);
1.111     riastrad 1333:        }
1.81      knakahar 1334:        if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) {
1.79      knakahar 1335:                intr_free_io_intrsource_direct(isp);
                   1336:        }
1.61      ad       1337:        mutex_exit(&cpu_lock);
                   1338:        kmem_free(ih, sizeof(*ih));
1.106     cherry   1339: #else /* XEN */
                   1340:        if (ih->pic_type == PIC_XEN) {
1.111     riastrad 1341:                event_remove_handler(ih->ih_pin, ih->ih_realfun,
                   1342:                    ih->ih_realarg);
1.110     riastrad 1343:                kmem_free(ih, sizeof(*ih));
1.106     cherry   1344:                return;
                   1345:        }
                   1346: #if defined(DOM0OPS)
                   1347:        pirq_disestablish((struct pintrhand *)ih);
                   1348: #endif
                   1349:        return;
                   1350: #endif /* XEN */
                   1351: }
                   1352:
                   1353: #if defined(XEN) /* nuke conditional post integration */
                   1354: static const char *
                   1355: xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
                   1356: {
                   1357:        KASSERT(pic->pic_type == PIC_XEN);
                   1358:
                   1359:        KASSERT(port >= 0);
                   1360:        KASSERT(port < NR_EVENT_CHANNELS);
                   1361:
                   1362:        snprintf(buf, len, "%s channel %d", pic->pic_name, port);
                   1363:
                   1364:        return buf;
1.6       fvdl     1365: }
1.126     jdolecek 1366: #endif /* XEN */
1.6       fvdl     1367:
1.79      knakahar 1368: static const char *
                   1369: legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
                   1370: {
                   1371:        int legacy_irq;
                   1372:
                   1373:        KASSERT(pic->pic_type == PIC_I8259);
1.106     cherry   1374: #if NLAPIC > 0
1.79      knakahar 1375:        KASSERT(APIC_IRQ_ISLEGACY(ih));
                   1376:
                   1377:        legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
1.106     cherry   1378: #else
                   1379:        legacy_irq = ih;
                   1380: #endif
1.79      knakahar 1381:        KASSERT(legacy_irq >= 0 && legacy_irq < 16);
                   1382:
                   1383:        snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
                   1384:
                   1385:        return buf;
                   1386: }
                   1387:
1.6       fvdl     1388: const char *
1.80      knakahar 1389: intr_string(intr_handle_t ih, char *buf, size_t len)
1.6       fvdl     1390: {
                   1391: #if NIOAPIC > 0
1.56      drochner 1392:        struct ioapic_softc *pic;
1.6       fvdl     1393: #endif
                   1394:
                   1395:        if (ih == 0)
1.80      knakahar 1396:                panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
1.6       fvdl     1397:
                   1398: #if NIOAPIC > 0
                   1399:        if (ih & APIC_INT_VIA_APIC) {
1.56      drochner 1400:                pic = ioapic_find(APIC_IRQ_APIC(ih));
1.6       fvdl     1401:                if (pic != NULL) {
1.76      christos 1402:                        snprintf(buf, len, "%s pin %d",
1.57      drochner 1403:                            device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
1.6       fvdl     1404:                } else {
1.76      christos 1405:                        snprintf(buf, len,
1.63      cegger   1406:                            "apic %d int %d (irq %d)",
1.6       fvdl     1407:                            APIC_IRQ_APIC(ih),
                   1408:                            APIC_IRQ_PIN(ih),
1.80      knakahar 1409:                            APIC_IRQ_LEGACY_IRQ(ih));
1.6       fvdl     1410:                }
                   1411:        } else
1.80      knakahar 1412:                snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
1.6       fvdl     1413:
1.106     cherry   1414: #elif NLAPIC > 0
1.80      knakahar 1415:        snprintf(buf, len, "irq %d" APIC_IRQ_LEGACY_IRQ(ih));
1.106     cherry   1416: #else
                   1417:        snprintf(buf, len, "irq %d", (int) ih);
1.6       fvdl     1418: #endif
1.76      christos 1419:        return buf;
1.6       fvdl     1420:
1.1       fvdl     1421: }
                   1422:
                   1423: /*
                   1424:  * Fake interrupt handler structures for the benefit of symmetry with
                   1425:  * other interrupt sources, and the benefit of intr_calculatemasks()
                   1426:  */
                   1427: struct intrhand fake_softclock_intrhand;
                   1428: struct intrhand fake_softnet_intrhand;
                   1429: struct intrhand fake_softserial_intrhand;
1.36      ad       1430: struct intrhand fake_softbio_intrhand;
1.1       fvdl     1431: struct intrhand fake_timer_intrhand;
                   1432: struct intrhand fake_ipi_intrhand;
1.45      ad       1433: struct intrhand fake_preempt_intrhand;
1.1       fvdl     1434:
                   1435: #if NLAPIC > 0 && defined(MULTIPROCESSOR)
1.21      christos 1436: static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES;
1.1       fvdl     1437: #endif
                   1438:
1.122     roy      1439: #if defined(INTRSTACKSIZE)
1.41      dyoung   1440: static inline bool
                   1441: redzone_const_or_false(bool x)
1.30      dyoung   1442: {
1.32      ad       1443: #ifdef DIAGNOSTIC
                   1444:        return x;
                   1445: #else
1.41      dyoung   1446:        return false;
1.32      ad       1447: #endif /* !DIAGNOSTIC */
1.30      dyoung   1448: }
                   1449:
1.41      dyoung   1450: static inline int
                   1451: redzone_const_or_zero(int x)
                   1452: {
                   1453:        return redzone_const_or_false(true) ? x : 0;
                   1454: }
1.120     roy      1455: #endif
1.41      dyoung   1456:
1.1       fvdl     1457: /*
                   1458:  * Initialize all handlers that aren't dynamically allocated, and exist
                   1459:  * for each CPU.
                   1460:  */
                   1461: void
                   1462: cpu_intr_init(struct cpu_info *ci)
                   1463: {
1.106     cherry   1464: #if !defined(XEN)
1.1       fvdl     1465:        struct intrsource *isp;
1.129     mrg      1466: #if NLAPIC > 0
                   1467:        static int first = 1;
                   1468: #if defined(MULTIPROCESSOR)
1.1       fvdl     1469:        int i;
1.129     mrg      1470: #endif
1.1       fvdl     1471: #endif
                   1472:
                   1473: #if NLAPIC > 0
1.61      ad       1474:        isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1.1       fvdl     1475:        isp->is_recurse = Xrecurse_lapic_ltimer;
                   1476:        isp->is_resume = Xresume_lapic_ltimer;
                   1477:        fake_timer_intrhand.ih_level = IPL_CLOCK;
                   1478:        isp->is_handlers = &fake_timer_intrhand;
                   1479:        isp->is_pic = &local_pic;
                   1480:        ci->ci_isources[LIR_TIMER] = isp;
1.68      mrg      1481:        evcnt_attach_dynamic(&isp->is_evcnt,
                   1482:            first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL,
1.43      cegger   1483:            device_xname(ci->ci_dev), "timer");
1.68      mrg      1484:        first = 0;
1.1       fvdl     1485:
                   1486: #ifdef MULTIPROCESSOR
1.61      ad       1487:        isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1.1       fvdl     1488:        isp->is_recurse = Xrecurse_lapic_ipi;
                   1489:        isp->is_resume = Xresume_lapic_ipi;
1.66      rmind    1490:        fake_ipi_intrhand.ih_level = IPL_HIGH;
1.1       fvdl     1491:        isp->is_handlers = &fake_ipi_intrhand;
                   1492:        isp->is_pic = &local_pic;
                   1493:        ci->ci_isources[LIR_IPI] = isp;
                   1494:
                   1495:        for (i = 0; i < X86_NIPI; i++)
1.46      ad       1496:                evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
1.43      cegger   1497:                    NULL, device_xname(ci->ci_dev), x86_ipi_names[i]);
1.1       fvdl     1498: #endif
                   1499: #endif
                   1500:
1.106     cherry   1501: #if defined(__HAVE_PREEMPTION)
1.61      ad       1502:        isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1.125     christos 1503:        isp->is_recurse = Xrecurse_preempt;
                   1504:        isp->is_resume = Xresume_preempt;
1.45      ad       1505:        fake_preempt_intrhand.ih_level = IPL_PREEMPT;
                   1506:        isp->is_handlers = &fake_preempt_intrhand;
                   1507:        isp->is_pic = &softintr_pic;
                   1508:        ci->ci_isources[SIR_PREEMPT] = isp;
                   1509:
1.106     cherry   1510: #endif
1.1       fvdl     1511:        intr_calculatemasks(ci);
                   1512:
1.106     cherry   1513: #else /* XEN */
                   1514:        int i; /* XXX: duplicate */
1.117     maxv     1515:        ci->ci_iunmask[0] = 0xfffffffe;
1.106     cherry   1516:        for (i = 1; i < NIPL; i++)
                   1517:                ci->ci_iunmask[i] = ci->ci_iunmask[i - 1] & ~(1 << i);
                   1518: #endif /* XEN */
                   1519:
1.25      yamt     1520: #if defined(INTRSTACKSIZE)
1.117     maxv     1521:        vaddr_t istack;
                   1522:
1.41      dyoung   1523:        /*
                   1524:         * If the red zone is activated, protect both the top and
                   1525:         * the bottom of the stack with an unmapped page.
                   1526:         */
                   1527:        istack = uvm_km_alloc(kernel_map,
                   1528:            INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
1.117     maxv     1529:            UVM_KMF_WIRED|UVM_KMF_ZERO);
1.41      dyoung   1530:        if (redzone_const_or_false(true)) {
                   1531:                pmap_kremove(istack, PAGE_SIZE);
                   1532:                pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
                   1533:                pmap_update(pmap_kernel());
                   1534:        }
1.117     maxv     1535:
                   1536:        /*
                   1537:         * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
1.41      dyoung   1538:         * of space for ddb(4) to examine some subroutine arguments
                   1539:         * and to hunt for the next stack frame.
                   1540:         */
                   1541:        ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
                   1542:            INTRSTACKSIZE - 33 * sizeof(register_t);
1.117     maxv     1543: #endif
                   1544:
1.25      yamt     1545:        ci->ci_idepth = -1;
1.1       fvdl     1546: }
                   1547:
1.61      ad       1548: #if defined(INTRDEBUG) || defined(DDB)
1.69      cegger   1549:
1.1       fvdl     1550: void
                   1551: intr_printconfig(void)
                   1552: {
                   1553:        int i;
                   1554:        struct intrhand *ih;
                   1555:        struct intrsource *isp;
                   1556:        struct cpu_info *ci;
                   1557:        CPU_INFO_ITERATOR cii;
1.75      chs      1558:        void (*pr)(const char *, ...);
                   1559:
1.105     joerg    1560:        pr = printf;
1.75      chs      1561: #ifdef DDB
                   1562:        extern int db_active;
                   1563:        if (db_active) {
                   1564:                pr = db_printf;
                   1565:        }
                   1566: #endif
1.1       fvdl     1567:
                   1568:        for (CPU_INFO_FOREACH(cii, ci)) {
1.75      chs      1569:                (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev));
1.1       fvdl     1570:                for (i = 0; i < NIPL; i++)
1.77      ozaki-r  1571:                        (*pr)("IPL %d mask %08lx unmask %08lx\n", i,
1.1       fvdl     1572:                            (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]);
                   1573:                for (i = 0; i < MAX_INTR_SOURCES; i++) {
                   1574:                        isp = ci->ci_isources[i];
                   1575:                        if (isp == NULL)
                   1576:                                continue;
1.75      chs      1577:                        (*pr)("%s source %d is pin %d from pic %s type %d maxlevel %d\n",
1.51      ad       1578:                            device_xname(ci->ci_dev), i, isp->is_pin,
1.75      chs      1579:                            isp->is_pic->pic_name, isp->is_type, isp->is_maxlevel);
1.1       fvdl     1580:                        for (ih = isp->is_handlers; ih != NULL;
                   1581:                             ih = ih->ih_next)
1.75      chs      1582:                                (*pr)("\thandler %p level %d\n",
1.1       fvdl     1583:                                    ih->ih_fun, ih->ih_level);
1.75      chs      1584: #if NIOAPIC > 0
                   1585:                        if (isp->is_pic->pic_type == PIC_IOAPIC) {
                   1586:                                struct ioapic_softc *sc;
                   1587:                                sc = isp->is_pic->pic_ioapic;
                   1588:                                (*pr)("\tioapic redir 0x%x\n",
                   1589:                                    sc->sc_pins[isp->is_pin].ip_map->redir);
                   1590:                        }
                   1591: #endif
1.1       fvdl     1592:
                   1593:                }
                   1594:        }
                   1595: }
1.75      chs      1596:
1.1       fvdl     1597: #endif
1.36      ad       1598:
1.106     cherry   1599: #if defined(__HAVE_FAST_SOFTINTS)
1.36      ad       1600: void
                   1601: softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
                   1602: {
                   1603:        struct intrsource *isp;
                   1604:        struct cpu_info *ci;
                   1605:        u_int sir;
                   1606:
                   1607:        ci = l->l_cpu;
                   1608:
1.61      ad       1609:        isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1.36      ad       1610:        isp->is_recurse = Xsoftintr;
                   1611:        isp->is_resume = Xsoftintr;
                   1612:        isp->is_pic = &softintr_pic;
                   1613:
                   1614:        switch (level) {
                   1615:        case SOFTINT_BIO:
                   1616:                sir = SIR_BIO;
                   1617:                fake_softbio_intrhand.ih_level = IPL_SOFTBIO;
                   1618:                isp->is_handlers = &fake_softbio_intrhand;
                   1619:                break;
                   1620:        case SOFTINT_NET:
                   1621:                sir = SIR_NET;
                   1622:                fake_softnet_intrhand.ih_level = IPL_SOFTNET;
                   1623:                isp->is_handlers = &fake_softnet_intrhand;
                   1624:                break;
                   1625:        case SOFTINT_SERIAL:
                   1626:                sir = SIR_SERIAL;
                   1627:                fake_softserial_intrhand.ih_level = IPL_SOFTSERIAL;
                   1628:                isp->is_handlers = &fake_softserial_intrhand;
                   1629:                break;
                   1630:        case SOFTINT_CLOCK:
                   1631:                sir = SIR_CLOCK;
                   1632:                fake_softclock_intrhand.ih_level = IPL_SOFTCLOCK;
                   1633:                isp->is_handlers = &fake_softclock_intrhand;
                   1634:                break;
                   1635:        default:
                   1636:                panic("softint_init_md");
                   1637:        }
                   1638:
                   1639:        KASSERT(ci->ci_isources[sir] == NULL);
                   1640:
                   1641:        *machdep = (1 << sir);
                   1642:        ci->ci_isources[sir] = isp;
                   1643:        ci->ci_isources[sir]->is_lwp = l;
                   1644:
                   1645:        intr_calculatemasks(ci);
                   1646: }
1.106     cherry   1647: #endif /* __HAVE_FAST_SOFTINTS */
1.79      knakahar 1648: /*
                   1649:  * Save current affinitied cpu's interrupt count.
                   1650:  */
                   1651: static void
                   1652: intr_save_evcnt(struct intrsource *source, cpuid_t cpuid)
                   1653: {
                   1654:        struct percpu_evcnt *pep;
                   1655:        uint64_t curcnt;
                   1656:        int i;
                   1657:
                   1658:        curcnt = source->is_evcnt.ev_count;
                   1659:        pep = source->is_saved_evcnt;
                   1660:
                   1661:        for (i = 0; i < ncpu; i++) {
                   1662:                if (pep[i].cpuid == cpuid) {
                   1663:                        pep[i].count = curcnt;
                   1664:                        break;
                   1665:                }
                   1666:        }
                   1667: }
                   1668:
                   1669: /*
                   1670:  * Restore current affinitied cpu's interrupt count.
                   1671:  */
                   1672: static void
                   1673: intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid)
                   1674: {
                   1675:        struct percpu_evcnt *pep;
                   1676:        int i;
                   1677:
                   1678:        pep = source->is_saved_evcnt;
                   1679:
                   1680:        for (i = 0; i < ncpu; i++) {
                   1681:                if (pep[i].cpuid == cpuid) {
                   1682:                        source->is_evcnt.ev_count = pep[i].count;
                   1683:                        break;
                   1684:                }
                   1685:        }
                   1686: }
                   1687:
1.61      ad       1688: static void
                   1689: intr_redistribute_xc_t(void *arg1, void *arg2)
                   1690: {
                   1691:        struct cpu_info *ci;
                   1692:        struct intrsource *isp;
                   1693:        int slot;
                   1694:        u_long psl;
                   1695:
                   1696:        ci = curcpu();
                   1697:        isp = arg1;
                   1698:        slot = (int)(intptr_t)arg2;
                   1699:
                   1700:        /* Disable interrupts locally. */
                   1701:        psl = x86_read_psl();
                   1702:        x86_disable_intr();
                   1703:
                   1704:        /* Hook it in and re-calculate masks. */
                   1705:        ci->ci_isources[slot] = isp;
                   1706:        intr_calculatemasks(curcpu());
                   1707:
                   1708:        /* Re-enable interrupts locally. */
                   1709:        x86_write_psl(psl);
                   1710: }
                   1711:
                   1712: static void
                   1713: intr_redistribute_xc_s1(void *arg1, void *arg2)
                   1714: {
                   1715:        struct pic *pic;
                   1716:        struct intrsource *isp;
                   1717:        struct cpu_info *nci;
                   1718:        u_long psl;
                   1719:
                   1720:        isp = arg1;
                   1721:        nci = arg2;
                   1722:
                   1723:        /*
                   1724:         * Disable interrupts on-chip and mask the pin.  Back out
                   1725:         * and let the interrupt be processed if one is pending.
                   1726:         */
                   1727:        pic = isp->is_pic;
                   1728:        for (;;) {
                   1729:                psl = x86_read_psl();
                   1730:                x86_disable_intr();
                   1731:                if ((*pic->pic_trymask)(pic, isp->is_pin)) {
                   1732:                        break;
                   1733:                }
                   1734:                x86_write_psl(psl);
                   1735:                DELAY(1000);
                   1736:        }
                   1737:
                   1738:        /* pic_addroute will unmask the interrupt. */
                   1739:        (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec,
                   1740:            isp->is_type);
                   1741:        x86_write_psl(psl);
                   1742: }
                   1743:
                   1744: static void
                   1745: intr_redistribute_xc_s2(void *arg1, void *arg2)
                   1746: {
                   1747:        struct cpu_info *ci;
                   1748:        u_long psl;
                   1749:        int slot;
                   1750:
                   1751:        ci = curcpu();
                   1752:        slot = (int)(uintptr_t)arg1;
                   1753:
                   1754:        /* Disable interrupts locally. */
                   1755:        psl = x86_read_psl();
                   1756:        x86_disable_intr();
                   1757:
                   1758:        /* Patch out the source and re-calculate masks. */
                   1759:        ci->ci_isources[slot] = NULL;
                   1760:        intr_calculatemasks(ci);
                   1761:
                   1762:        /* Re-enable interrupts locally. */
                   1763:        x86_write_psl(psl);
                   1764: }
                   1765:
                   1766: static bool
                   1767: intr_redistribute(struct cpu_info *oci)
                   1768: {
                   1769:        struct intrsource *isp;
                   1770:        struct intrhand *ih;
                   1771:        CPU_INFO_ITERATOR cii;
                   1772:        struct cpu_info *nci, *ici;
                   1773:        int oslot, nslot;
                   1774:        uint64_t where;
                   1775:
                   1776:        KASSERT(mutex_owned(&cpu_lock));
                   1777:
                   1778:        /* Look for an interrupt source that we can migrate. */
                   1779:        for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) {
                   1780:                if ((isp = oci->ci_isources[oslot]) == NULL) {
                   1781:                        continue;
                   1782:                }
                   1783:                if (isp->is_pic->pic_type == PIC_IOAPIC) {
                   1784:                        break;
                   1785:                }
                   1786:        }
                   1787:        if (oslot == MAX_INTR_SOURCES) {
                   1788:                return false;
                   1789:        }
                   1790:
                   1791:        /* Find least loaded CPU and try to move there. */
                   1792:        nci = NULL;
                   1793:        for (CPU_INFO_FOREACH(cii, ici)) {
                   1794:                if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
                   1795:                        continue;
                   1796:                }
                   1797:                KASSERT(ici != oci);
                   1798:                if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) {
                   1799:                        nci = ici;
                   1800:                }
                   1801:        }
                   1802:        if (nci == NULL) {
                   1803:                return false;
                   1804:        }
                   1805:        for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
                   1806:                if (nci->ci_isources[nslot] == NULL) {
                   1807:                        break;
                   1808:                }
                   1809:        }
                   1810:
                   1811:        /* If that did not work, allocate anywhere. */
                   1812:        if (nslot == MAX_INTR_SOURCES) {
                   1813:                for (CPU_INFO_FOREACH(cii, nci)) {
                   1814:                        if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
                   1815:                                continue;
                   1816:                        }
                   1817:                        KASSERT(nci != oci);
                   1818:                        for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
                   1819:                                if (nci->ci_isources[nslot] == NULL) {
                   1820:                                        break;
                   1821:                                }
                   1822:                        }
                   1823:                        if (nslot != MAX_INTR_SOURCES) {
                   1824:                                break;
                   1825:                        }
                   1826:                }
                   1827:        }
                   1828:        if (nslot == MAX_INTR_SOURCES) {
                   1829:                return false;
                   1830:        }
                   1831:
                   1832:        /*
                   1833:         * Now we have new CPU and new slot.  Run a cross-call to set up
                   1834:         * the new vector on the target CPU.
                   1835:         */
                   1836:        where = xc_unicast(0, intr_redistribute_xc_t, isp,
                   1837:            (void *)(intptr_t)nslot, nci);
                   1838:        xc_wait(where);
1.117     maxv     1839:
1.61      ad       1840:        /*
                   1841:         * We're ready to go on the target CPU.  Run a cross call to
                   1842:         * reroute the interrupt away from the source CPU.
                   1843:         */
                   1844:        where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci);
                   1845:        xc_wait(where);
                   1846:
                   1847:        /* Sleep for (at least) 10ms to allow the change to take hold. */
                   1848:        (void)kpause("intrdist", false, mstohz(10), NULL);
                   1849:
                   1850:        /* Complete removal from the source CPU. */
                   1851:        where = xc_unicast(0, intr_redistribute_xc_s2,
                   1852:            (void *)(uintptr_t)oslot, NULL, oci);
                   1853:        xc_wait(where);
                   1854:
                   1855:        /* Finally, take care of book-keeping. */
                   1856:        for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) {
                   1857:                oci->ci_nintrhand--;
                   1858:                nci->ci_nintrhand++;
                   1859:                ih->ih_cpu = nci;
                   1860:        }
1.79      knakahar 1861:        intr_save_evcnt(isp, oci->ci_cpuid);
                   1862:        intr_restore_evcnt(isp, nci->ci_cpuid);
                   1863:        isp->is_active_cpu = nci->ci_cpuid;
1.61      ad       1864:
                   1865:        return true;
                   1866: }
                   1867:
                   1868: void
                   1869: cpu_intr_redistribute(void)
                   1870: {
                   1871:        CPU_INFO_ITERATOR cii;
                   1872:        struct cpu_info *ci;
                   1873:
                   1874:        KASSERT(mutex_owned(&cpu_lock));
                   1875:        KASSERT(mp_online);
                   1876:
1.106     cherry   1877: #if defined(XEN) /* XXX: remove */
                   1878:        return;
                   1879: #endif
1.61      ad       1880:        /* Direct interrupts away from shielded CPUs. */
                   1881:        for (CPU_INFO_FOREACH(cii, ci)) {
                   1882:                if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
                   1883:                        continue;
                   1884:                }
                   1885:                while (intr_redistribute(ci)) {
                   1886:                        /* nothing */
                   1887:                }
                   1888:        }
                   1889:
                   1890:        /* XXX should now re-balance */
                   1891: }
                   1892:
                   1893: u_int
                   1894: cpu_intr_count(struct cpu_info *ci)
                   1895: {
                   1896:
                   1897:        KASSERT(ci->ci_nintrhand >= 0);
                   1898:
                   1899:        return ci->ci_nintrhand;
                   1900: }
1.79      knakahar 1901:
1.106     cherry   1902: #if !defined(XEN)
1.79      knakahar 1903: static int
                   1904: intr_find_unused_slot(struct cpu_info *ci, int *index)
                   1905: {
                   1906:        int slot, i;
                   1907:
                   1908:        KASSERT(mutex_owned(&cpu_lock));
                   1909:
                   1910:        slot = -1;
                   1911:        for (i = 0; i < MAX_INTR_SOURCES ; i++) {
                   1912:                if (ci->ci_isources[i] == NULL) {
                   1913:                        slot = i;
                   1914:                        break;
                   1915:                }
                   1916:        }
                   1917:        if (slot == -1) {
                   1918:                DPRINTF(("cannot allocate ci_isources\n"));
                   1919:                return EBUSY;
                   1920:        }
                   1921:
                   1922:        *index = slot;
                   1923:        return 0;
                   1924: }
                   1925:
                   1926: /*
                   1927:  * Let cpu_info ready to accept the interrupt.
                   1928:  */
                   1929: static void
                   1930: intr_activate_xcall(void *arg1, void *arg2)
                   1931: {
                   1932:        struct cpu_info *ci;
                   1933:        struct intrsource *source;
                   1934:        struct intrstub *stubp;
                   1935:        struct intrhand *ih;
                   1936:        u_long psl;
                   1937:        int idt_vec;
                   1938:        int slot;
                   1939:
                   1940:        ih = arg1;
                   1941:
                   1942:        kpreempt_disable();
                   1943:
                   1944:        KASSERT(ih->ih_cpu == curcpu() || !mp_online);
                   1945:
                   1946:        ci = ih->ih_cpu;
                   1947:        slot = ih->ih_slot;
                   1948:        source = ci->ci_isources[slot];
                   1949:        idt_vec = source->is_idtvec;
                   1950:
                   1951:        psl = x86_read_psl();
                   1952:        x86_disable_intr();
                   1953:
                   1954:        intr_calculatemasks(ci);
                   1955:
                   1956:        if (source->is_type == IST_LEVEL) {
                   1957:                stubp = &source->is_pic->pic_level_stubs[slot];
                   1958:        } else {
                   1959:                stubp = &source->is_pic->pic_edge_stubs[slot];
                   1960:        }
                   1961:        source->is_resume = stubp->ist_resume;
                   1962:        source->is_recurse = stubp->ist_recurse;
1.102     maxv     1963:        idt_vec_set(idt_vec, stubp->ist_entry);
1.79      knakahar 1964:
                   1965:        x86_write_psl(psl);
                   1966:
                   1967:        kpreempt_enable();
                   1968: }
                   1969:
                   1970: /*
                   1971:  * Let cpu_info not accept the interrupt.
                   1972:  */
                   1973: static void
                   1974: intr_deactivate_xcall(void *arg1, void *arg2)
                   1975: {
                   1976:        struct cpu_info *ci;
                   1977:        struct intrhand *ih, *lih;
                   1978:        u_long psl;
                   1979:        int slot;
                   1980:
                   1981:        ih = arg1;
                   1982:
                   1983:        kpreempt_disable();
                   1984:
                   1985:        KASSERT(ih->ih_cpu == curcpu() || !mp_online);
                   1986:
                   1987:        ci = ih->ih_cpu;
                   1988:        slot = ih->ih_slot;
                   1989:
                   1990:        psl = x86_read_psl();
                   1991:        x86_disable_intr();
                   1992:
                   1993:        /* Move all devices sharing IRQ number. */
                   1994:        ci->ci_isources[slot] = NULL;
                   1995:        for (lih = ih; lih != NULL; lih = lih->ih_next) {
                   1996:                ci->ci_nintrhand--;
                   1997:        }
                   1998:
                   1999:        intr_calculatemasks(ci);
                   2000:
                   2001:        /*
                   2002:         * Skip unsetgate(), because the same itd[] entry is overwritten in
                   2003:         * intr_activate_xcall().
                   2004:         */
                   2005:
                   2006:        x86_write_psl(psl);
                   2007:
                   2008:        kpreempt_enable();
                   2009: }
                   2010:
                   2011: static void
                   2012: intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset)
                   2013: {
                   2014:        struct cpu_info *ci;
                   2015:
                   2016:        KASSERT(mutex_owned(&cpu_lock));
                   2017:
                   2018:        if (isp == NULL) {
                   2019:                kcpuset_zero(cpuset);
                   2020:                return;
                   2021:        }
                   2022:
1.124     knakahar 2023:        KASSERTMSG(isp->is_handlers != NULL,
                   2024:            "Don't get affinity for the device which is not established.");
                   2025:
1.79      knakahar 2026:        ci = isp->is_handlers->ih_cpu;
                   2027:        if (ci == NULL) {
                   2028:                kcpuset_zero(cpuset);
                   2029:                return;
                   2030:        }
                   2031:
                   2032:        kcpuset_set(cpuset, cpu_index(ci));
                   2033:        return;
                   2034: }
                   2035:
                   2036: static int
                   2037: intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
                   2038: {
                   2039:        struct cpu_info *oldci, *newci;
                   2040:        struct intrhand *ih, *lih;
                   2041:        struct pic *pic;
                   2042:        u_int cpu_idx;
                   2043:        int idt_vec;
                   2044:        int oldslot, newslot;
                   2045:        int err;
                   2046:        int pin;
                   2047:
1.95      knakahar 2048:        KASSERT(mutex_owned(&intr_distribute_lock));
1.79      knakahar 2049:        KASSERT(mutex_owned(&cpu_lock));
                   2050:
                   2051:        /* XXX
                   2052:         * logical destination mode is not supported, use lowest index cpu.
                   2053:         */
                   2054:        cpu_idx = kcpuset_ffs(cpuset) - 1;
                   2055:        newci = cpu_lookup(cpu_idx);
                   2056:        if (newci == NULL) {
                   2057:                DPRINTF(("invalid cpu index: %u\n", cpu_idx));
                   2058:                return EINVAL;
                   2059:        }
                   2060:        if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
                   2061:                DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx));
                   2062:                return EINVAL;
                   2063:        }
                   2064:
                   2065:        if (isp == NULL) {
                   2066:                DPRINTF(("invalid intrctl handler\n"));
                   2067:                return EINVAL;
                   2068:        }
                   2069:
                   2070:        /* i8259_pic supports only primary cpu, see i8259.c. */
                   2071:        pic = isp->is_pic;
                   2072:        if (pic == &i8259_pic) {
                   2073:                DPRINTF(("i8259 pic does not support set_affinity\n"));
                   2074:                return ENOTSUP;
                   2075:        }
                   2076:
                   2077:        ih = isp->is_handlers;
1.124     knakahar 2078:        KASSERTMSG(ih != NULL,
                   2079:            "Don't set affinity for the device which is not established.");
                   2080:
1.79      knakahar 2081:        oldci = ih->ih_cpu;
                   2082:        if (newci == oldci) /* nothing to do */
                   2083:                return 0;
                   2084:
                   2085:        oldslot = ih->ih_slot;
                   2086:        idt_vec = isp->is_idtvec;
                   2087:
                   2088:        err = intr_find_unused_slot(newci, &newslot);
                   2089:        if (err) {
                   2090:                DPRINTF(("failed to allocate interrupt slot for PIC %s intrid %s\n",
                   2091:                        isp->is_pic->pic_name, isp->is_intrid));
                   2092:                return err;
                   2093:        }
                   2094:
                   2095:        pin = isp->is_pin;
                   2096:        (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */
1.95      knakahar 2097:        while(oldci->ci_ipending & (1 << oldslot))
                   2098:                (void)kpause("intrdist", false, 1, &cpu_lock);
1.79      knakahar 2099:
                   2100:        kpreempt_disable();
                   2101:
                   2102:        /* deactivate old interrupt setting */
                   2103:        if (oldci == curcpu() || !mp_online) {
                   2104:                intr_deactivate_xcall(ih, NULL);
                   2105:        } else {
                   2106:                uint64_t where;
                   2107:                where = xc_unicast(0, intr_deactivate_xcall, ih,
                   2108:                                   NULL, oldci);
                   2109:                xc_wait(where);
                   2110:        }
                   2111:        intr_save_evcnt(isp, oldci->ci_cpuid);
                   2112:        (*pic->pic_delroute)(pic, oldci, pin, idt_vec, isp->is_type);
                   2113:
                   2114:        /* activate new interrupt setting */
                   2115:        newci->ci_isources[newslot] = isp;
                   2116:        for (lih = ih; lih != NULL; lih = lih->ih_next) {
                   2117:                newci->ci_nintrhand++;
                   2118:                lih->ih_cpu = newci;
                   2119:                lih->ih_slot = newslot;
                   2120:        }
                   2121:        if (newci == curcpu() || !mp_online) {
                   2122:                intr_activate_xcall(ih, NULL);
                   2123:        } else {
                   2124:                uint64_t where;
                   2125:                where = xc_unicast(0, intr_activate_xcall, ih,
                   2126:                                   NULL, newci);
                   2127:                xc_wait(where);
                   2128:        }
                   2129:        intr_restore_evcnt(isp, newci->ci_cpuid);
                   2130:        isp->is_active_cpu = newci->ci_cpuid;
                   2131:        (*pic->pic_addroute)(pic, newci, pin, idt_vec, isp->is_type);
                   2132:
                   2133:        kpreempt_enable();
                   2134:
                   2135:        (*pic->pic_hwunmask)(pic, pin);
                   2136:
                   2137:        return err;
                   2138: }
1.126     jdolecek 2139:
1.87      knakahar 2140: static bool
                   2141: intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset)
                   2142: {
                   2143:        struct cpu_info *ci;
                   2144:
                   2145:        KASSERT(mutex_owned(&cpu_lock));
                   2146:
1.124     knakahar 2147:        /*
                   2148:         * The device is already pci_intr_alloc'ed, however it is not
                   2149:         * established yet.
                   2150:         */
                   2151:        if (isp->is_handlers == NULL)
                   2152:                return false;
                   2153:
1.87      knakahar 2154:        ci = isp->is_handlers->ih_cpu;
                   2155:        KASSERT(ci != NULL);
                   2156:
                   2157:        return kcpuset_isset(cpuset, cpu_index(ci));
                   2158: }
                   2159:
                   2160: static struct intrhand *
                   2161: intr_get_handler(const char *intrid)
                   2162: {
                   2163:        struct intrsource *isp;
                   2164:
                   2165:        KASSERT(mutex_owned(&cpu_lock));
                   2166:
                   2167:        isp = intr_get_io_intrsource(intrid);
                   2168:        if (isp == NULL)
                   2169:                return NULL;
                   2170:
                   2171:        return isp->is_handlers;
                   2172: }
                   2173:
                   2174: /*
                   2175:  * MI interface for subr_interrupt.c
                   2176:  */
                   2177: uint64_t
                   2178: interrupt_get_count(const char *intrid, u_int cpu_idx)
1.79      knakahar 2179: {
1.87      knakahar 2180:        struct cpu_info *ci;
1.79      knakahar 2181:        struct intrsource *isp;
1.87      knakahar 2182:        struct intrhand *ih;
                   2183:        struct percpu_evcnt pep;
                   2184:        cpuid_t cpuid;
                   2185:        int i, slot;
                   2186:        uint64_t count = 0;
1.79      knakahar 2187:
1.87      knakahar 2188:        ci = cpu_lookup(cpu_idx);
                   2189:        cpuid = ci->ci_cpuid;
                   2190:
                   2191:        mutex_enter(&cpu_lock);
                   2192:
                   2193:        ih = intr_get_handler(intrid);
                   2194:        if (ih == NULL) {
                   2195:                count = 0;
                   2196:                goto out;
                   2197:        }
                   2198:        slot = ih->ih_slot;
                   2199:        isp = ih->ih_cpu->ci_isources[slot];
                   2200:
                   2201:        for (i = 0; i < ncpu; i++) {
                   2202:                pep = isp->is_saved_evcnt[i];
                   2203:                if (cpuid == pep.cpuid) {
                   2204:                        if (isp->is_active_cpu == pep.cpuid) {
                   2205:                                count = isp->is_evcnt.ev_count;
                   2206:                                goto out;
                   2207:                        } else {
                   2208:                                count = pep.count;
                   2209:                                goto out;
                   2210:                        }
                   2211:                }
                   2212:        }
                   2213:
                   2214:  out:
                   2215:        mutex_exit(&cpu_lock);
                   2216:        return count;
                   2217: }
                   2218:
                   2219: /*
                   2220:  * MI interface for subr_interrupt.c
                   2221:  */
                   2222: void
                   2223: interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
                   2224: {
                   2225:        struct cpu_info *ci;
                   2226:        struct intrhand *ih;
                   2227:
                   2228:        kcpuset_zero(cpuset);
                   2229:
                   2230:        mutex_enter(&cpu_lock);
                   2231:
                   2232:        ih = intr_get_handler(intrid);
1.79      knakahar 2233:        if (ih == NULL)
1.87      knakahar 2234:                goto out;
                   2235:
                   2236:        ci = ih->ih_cpu;
                   2237:        kcpuset_set(cpuset, cpu_index(ci));
                   2238:
                   2239:  out:
                   2240:        mutex_exit(&cpu_lock);
                   2241: }
                   2242:
1.126     jdolecek 2243: #endif /* XEN */
1.106     cherry   2244:
1.87      knakahar 2245: /*
                   2246:  * MI interface for subr_interrupt.c
                   2247:  */
                   2248: void
                   2249: interrupt_get_available(kcpuset_t *cpuset)
                   2250: {
                   2251:        CPU_INFO_ITERATOR cii;
                   2252:        struct cpu_info *ci;
                   2253:
                   2254:        kcpuset_zero(cpuset);
                   2255:
                   2256:        mutex_enter(&cpu_lock);
                   2257:        for (CPU_INFO_FOREACH(cii, ci)) {
                   2258:                if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
                   2259:                        kcpuset_set(cpuset, cpu_index(ci));
                   2260:                }
                   2261:        }
                   2262:        mutex_exit(&cpu_lock);
                   2263: }
                   2264:
1.126     jdolecek 2265: #if !defined(XEN)
                   2266:
1.87      knakahar 2267: /*
                   2268:  * MI interface for subr_interrupt.c
                   2269:  */
                   2270: void
                   2271: interrupt_get_devname(const char *intrid, char *buf, size_t len)
                   2272: {
                   2273:        struct intrsource *isp;
                   2274:        struct intrhand *ih;
                   2275:        int slot;
1.79      knakahar 2276:
                   2277:        mutex_enter(&cpu_lock);
                   2278:
1.87      knakahar 2279:        ih = intr_get_handler(intrid);
                   2280:        if (ih == NULL) {
                   2281:                buf[0] = '\0';
                   2282:                goto out;
                   2283:        }
                   2284:        slot = ih->ih_slot;
                   2285:        isp = ih->ih_cpu->ci_isources[slot];
1.94      knakahar 2286:        strlcpy(buf, isp->is_xname, len);
1.87      knakahar 2287:
                   2288:  out:
                   2289:        mutex_exit(&cpu_lock);
                   2290: }
                   2291:
                   2292: static int
                   2293: intr_distribute_locked(struct intrhand *ih, const kcpuset_t *newset,
                   2294:     kcpuset_t *oldset)
                   2295: {
                   2296:        struct intrsource *isp;
                   2297:        int slot;
                   2298:
1.95      knakahar 2299:        KASSERT(mutex_owned(&intr_distribute_lock));
1.87      knakahar 2300:        KASSERT(mutex_owned(&cpu_lock));
                   2301:
                   2302:        if (ih == NULL)
                   2303:                return EINVAL;
                   2304:
1.79      knakahar 2305:        slot = ih->ih_slot;
                   2306:        isp = ih->ih_cpu->ci_isources[slot];
                   2307:        KASSERT(isp != NULL);
                   2308:
                   2309:        if (oldset != NULL)
                   2310:                intr_get_affinity(isp, oldset);
                   2311:
1.87      knakahar 2312:        return intr_set_affinity(isp, newset);
                   2313: }
                   2314:
                   2315: /*
                   2316:  * MI interface for subr_interrupt.c
                   2317:  */
                   2318: int
                   2319: interrupt_distribute(void *cookie, const kcpuset_t *newset, kcpuset_t *oldset)
                   2320: {
                   2321:        int error;
                   2322:        struct intrhand *ih = cookie;
1.79      knakahar 2323:
1.95      knakahar 2324:        mutex_enter(&intr_distribute_lock);
1.87      knakahar 2325:        mutex_enter(&cpu_lock);
                   2326:        error = intr_distribute_locked(ih, newset, oldset);
1.79      knakahar 2327:        mutex_exit(&cpu_lock);
1.95      knakahar 2328:        mutex_exit(&intr_distribute_lock);
1.79      knakahar 2329:
1.87      knakahar 2330:        return error;
                   2331: }
                   2332:
                   2333: /*
                   2334:  * MI interface for subr_interrupt.c
                   2335:  */
                   2336: int
                   2337: interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
                   2338:     kcpuset_t *oldset)
                   2339: {
                   2340:        int error;
                   2341:        struct intrhand *ih;
                   2342:
1.95      knakahar 2343:        mutex_enter(&intr_distribute_lock);
1.87      knakahar 2344:        mutex_enter(&cpu_lock);
                   2345:
                   2346:        ih = intr_get_handler(intrid);
                   2347:        if (ih == NULL) {
                   2348:                error = ENOENT;
                   2349:                goto out;
                   2350:        }
                   2351:        error = intr_distribute_locked(ih, newset, oldset);
                   2352:
                   2353:  out:
                   2354:        mutex_exit(&cpu_lock);
1.95      knakahar 2355:        mutex_exit(&intr_distribute_lock);
1.87      knakahar 2356:        return error;
                   2357: }
                   2358:
                   2359: /*
                   2360:  * MI interface for subr_interrupt.c
                   2361:  */
                   2362: struct intrids_handler *
                   2363: interrupt_construct_intrids(const kcpuset_t *cpuset)
                   2364: {
                   2365:        struct intrsource *isp;
                   2366:        struct intrids_handler *ii_handler;
                   2367:        intrid_t *ids;
                   2368:        int i, count;
                   2369:
                   2370:        if (kcpuset_iszero(cpuset))
                   2371:                return 0;
                   2372:
                   2373:        /*
                   2374:         * Count the number of interrupts which affinity to any cpu of "cpuset".
                   2375:         */
                   2376:        count = 0;
                   2377:        mutex_enter(&cpu_lock);
                   2378:        SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) {
                   2379:                if (intr_is_affinity_intrsource(isp, cpuset))
                   2380:                        count++;
                   2381:        }
                   2382:        mutex_exit(&cpu_lock);
                   2383:
                   2384:        ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
                   2385:            KM_SLEEP);
                   2386:        if (ii_handler == NULL)
                   2387:                return NULL;
                   2388:        ii_handler->iih_nids = count;
                   2389:        if (count == 0)
                   2390:                return ii_handler;
                   2391:
                   2392:        ids = ii_handler->iih_intrids;
                   2393:        i = 0;
                   2394:        mutex_enter(&cpu_lock);
                   2395:        SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) {
                   2396:                /* Ignore devices attached after counting "count". */
                   2397:                if (i >= count) {
                   2398:                        DPRINTF(("New devices are attached after counting.\n"));
                   2399:                        break;
                   2400:                }
                   2401:
                   2402:                if (!intr_is_affinity_intrsource(isp, cpuset))
                   2403:                        continue;
                   2404:
                   2405:                strncpy(ids[i], isp->is_intrid, sizeof(intrid_t));
                   2406:                i++;
                   2407:        }
                   2408:        mutex_exit(&cpu_lock);
                   2409:
                   2410:        return ii_handler;
                   2411: }
1.126     jdolecek 2412: #endif /* !XEN */
1.87      knakahar 2413:
                   2414: /*
                   2415:  * MI interface for subr_interrupt.c
                   2416:  */
                   2417: void
                   2418: interrupt_destruct_intrids(struct intrids_handler *ii_handler)
                   2419: {
                   2420:        size_t iih_size;
                   2421:
                   2422:        if (ii_handler == NULL)
                   2423:                return;
                   2424:
                   2425:        iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
                   2426:        kmem_free(ii_handler, iih_size);
1.79      knakahar 2427: }

CVSweb <webmaster@jp.NetBSD.org>