[BACK]Return to xen_ipi.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / xen / x86

Annotation of src/sys/arch/xen/x86/xen_ipi.c, Revision 1.9

1.9     ! cherry      1: /* $NetBSD: xen_ipi.c,v 1.8 2011/12/28 18:59:21 cherry Exp $ */
1.2       cherry      2:
                      3: /*-
                      4:  * Copyright (c) 2011 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Cherry G. Mathew <cherry@zyx.in>
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: #include <sys/cdefs.h>                 /* RCS ID macro */
                     33:
                     34: /*
                     35:  * Based on: x86/ipi.c
1.9     ! cherry     36:  * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.8 2011/12/28 18:59:21 cherry Exp $");
1.2       cherry     37:  */
                     38:
1.9     ! cherry     39: __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.8 2011/12/28 18:59:21 cherry Exp $");
1.2       cherry     40:
                     41: #include <sys/types.h>
                     42:
                     43: #include <sys/atomic.h>
                     44: #include <sys/mutex.h>
                     45: #include <sys/cpu.h>
                     46: #include <sys/device.h>
                     47: #include <sys/xcall.h>
                     48: #include <sys/errno.h>
                     49: #include <sys/systm.h>
                     50:
                     51: #include <machine/cpu.h>
                     52: #ifdef __x86_64__
                     53: #include <machine/fpu.h>
                     54: #else
                     55: #include <machine/npx.h>
                     56: #endif /* __x86_64__ */
                     57: #include <machine/frame.h>
                     58: #include <machine/segments.h>
                     59:
                     60: #include <xen/intr.h>
                     61: #include <xen/intrdefs.h>
                     62: #include <xen/hypervisor.h>
1.7       cegger     63: #include <xen/xen-public/vcpu.h>
1.2       cherry     64:
                     65: #ifdef __x86_64__
                     66: extern void ddb_ipi(struct trapframe);
                     67: #else
                     68: extern void ddb_ipi(int, struct trapframe);
                     69: #endif /* __x86_64__ */
                     70:
                     71: static void xen_ipi_halt(struct cpu_info *, struct intrframe *);
                     72: static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *);
                     73: static void xen_ipi_ddb(struct cpu_info *, struct intrframe *);
                     74: static void xen_ipi_xcall(struct cpu_info *, struct intrframe *);
1.6       cherry     75: static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *);
1.2       cherry     76:
                     77: static void (*ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) =
                     78: {      /* In order of priority (see: xen/include/intrdefs.h */
                     79:        xen_ipi_halt,
                     80:        xen_ipi_synch_fpu,
                     81:        xen_ipi_ddb,
1.6       cherry     82:        xen_ipi_xcall,
                     83:        xen_ipi_hvcb
1.2       cherry     84: };
                     85:
                     86: static void
                     87: xen_ipi_handler(struct cpu_info *ci, struct intrframe *regs)
                     88: {
                     89:        uint32_t pending;
                     90:        int bit;
                     91:
                     92:        pending = atomic_swap_32(&ci->ci_ipis, 0);
                     93:
                     94:        KDASSERT((pending >> XEN_NIPIS) == 0);
                     95:        while ((bit = ffs(pending)) != 0) {
                     96:                bit--;
                     97:                pending &= ~(1 << bit);
                     98:                ci->ci_ipi_events[bit].ev_count++;
                     99:                if (ipifunc[bit] != NULL) {
                    100:                        (*ipifunc[bit])(ci, regs);
1.3       cherry    101:                } else {
1.2       cherry    102:                        panic("ipifunc[%d] unsupported!\n", bit);
                    103:                        /* NOTREACHED */
                    104:                }
                    105:        }
                    106: }
                    107:
                    108: /* Must be called once for every cpu that expects to send/recv ipis */
                    109: void
                    110: xen_ipi_init(void)
                    111: {
                    112:        cpuid_t vcpu;
                    113:        evtchn_port_t evtchn;
                    114:        struct cpu_info *ci;
                    115:
                    116:        ci = curcpu();
                    117:
                    118:        vcpu = ci->ci_cpuid;
1.7       cegger    119:        KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS);
1.2       cherry    120:
1.3       cherry    121:        evtchn = bind_vcpu_to_evtch(vcpu);
                    122:        ci->ci_ipi_evtchn = evtchn;
1.2       cherry    123:
                    124:        KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS);
                    125:
                    126:        if (0 != event_set_handler(evtchn, (int (*)(void *))xen_ipi_handler,
                    127:                                   ci, IPL_HIGH, "ipi")) {
                    128:                panic("event_set_handler(...) KPI violation\n");
                    129:                /* NOTREACHED */
                    130:        }
                    131:
                    132:        hypervisor_enable_event(evtchn);
                    133: }
                    134:
                    135: /* prefer this to global variable */
                    136: static inline u_int max_cpus(void)
                    137: {
                    138:        return maxcpus;
                    139: }
                    140:
                    141: static inline bool /* helper */
                    142: valid_ipimask(uint32_t ipimask)
                    143: {
1.6       cherry    144:        uint32_t masks =  XEN_IPI_HVCB | XEN_IPI_XCALL |
                    145:                 XEN_IPI_DDB | XEN_IPI_SYNCH_FPU |
                    146:                 XEN_IPI_HALT | XEN_IPI_KICK;
1.2       cherry    147:
                    148:        if (ipimask & ~masks) {
                    149:                return false;
1.3       cherry    150:        } else {
1.2       cherry    151:                return true;
                    152:        }
                    153:
                    154: }
                    155:
                    156: int
                    157: xen_send_ipi(struct cpu_info *ci, uint32_t ipimask)
                    158: {
                    159:        evtchn_port_t evtchn;
                    160:
                    161:        KASSERT(ci != NULL || ci != curcpu());
                    162:
1.4       cherry    163:        if ((ci->ci_flags & CPUF_RUNNING) == 0) {
1.2       cherry    164:                return ENOENT;
                    165:        }
                    166:
                    167:        evtchn = ci->ci_ipi_evtchn;
1.3       cherry    168:
                    169:        KASSERTMSG(valid_ipimask(ipimask) == true,
1.5       jym       170:                "xen_send_ipi() called with invalid ipimask\n");
1.2       cherry    171:
                    172:        atomic_or_32(&ci->ci_ipis, ipimask);
                    173:        hypervisor_notify_via_evtchn(evtchn);
                    174:
                    175:        return 0;
                    176: }
                    177:
                    178: void
                    179: xen_broadcast_ipi(uint32_t ipimask)
                    180: {
                    181:        struct cpu_info *ci, *self = curcpu();
                    182:        CPU_INFO_ITERATOR cii;
                    183:
1.3       cherry    184:        KASSERTMSG(valid_ipimask(ipimask) == true,
1.5       jym       185:                "xen_broadcast_ipi() called with invalid ipimask\n");
1.2       cherry    186:
                    187:        /*
                    188:         * XXX-cherry: there's an implicit broadcast sending order
                    189:         * which I dislike. Randomise this ? :-)
                    190:         */
                    191:
                    192:        for (CPU_INFO_FOREACH(cii, ci)) {
                    193:                if (ci == NULL)
                    194:                        continue;
                    195:                if (ci == self)
                    196:                        continue;
                    197:                if (ci->ci_data.cpu_idlelwp == NULL)
                    198:                        continue;
                    199:                if ((ci->ci_flags & CPUF_PRESENT) == 0)
                    200:                        continue;
                    201:                if (ci->ci_flags & (CPUF_RUNNING)) {
                    202:                        if (0 != xen_send_ipi(ci, ipimask)) {
                    203:                                panic("xen_ipi of %x from %s to %s failed\n",
                    204:                                      ipimask, cpu_name(curcpu()),
                    205:                                      cpu_name(ci));
                    206:                        }
                    207:                }
                    208:        }
                    209: }
                    210:
                    211: /* MD wrapper for the xcall(9) callback. */
                    212: #define PRIuCPUID      "lu" /* XXX: move this somewhere more appropriate */
                    213:
                    214: static void
                    215: xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf)
                    216: {
                    217:        KASSERT(ci == curcpu());
                    218:        KASSERT(ci != NULL);
                    219:        if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL)) {
                    220:                panic("vcpu%" PRIuCPUID "shutdown failed.\n", ci->ci_cpuid);
                    221:        }
                    222:
                    223: }
                    224:
                    225: static void
                    226: xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
                    227: {
                    228:        KASSERT(ci != NULL);
                    229:        KASSERT(intrf != NULL);
                    230:
                    231: #ifdef __x86_64__
                    232:        fpusave_cpu(true);
                    233: #else
                    234:        npxsave_cpu(true);
                    235: #endif /* __x86_64__ */
                    236: }
                    237:
                    238: static void
                    239: xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf)
                    240: {
                    241:        KASSERT(ci != NULL);
                    242:        KASSERT(intrf != NULL);
                    243:
                    244: #ifdef __x86_64__
                    245:        ddb_ipi(intrf->if_tf);
                    246: #else
                    247:        struct trapframe tf;
                    248:        tf.tf_gs = intrf->if_gs;
                    249:        tf.tf_fs = intrf->if_fs;
                    250:        tf.tf_es = intrf->if_es;
                    251:        tf.tf_ds = intrf->if_ds;
                    252:        tf.tf_edi = intrf->if_edi;
                    253:        tf.tf_esi = intrf->if_esi;
                    254:        tf.tf_ebp = intrf->if_ebp;
                    255:        tf.tf_ebx = intrf->if_ebx;
                    256:        tf.tf_ecx = intrf->if_ecx;
                    257:        tf.tf_eax = intrf->if_eax;
                    258:        tf.tf_trapno = intrf->__if_trapno;
                    259:        tf.tf_err = intrf->__if_err;
                    260:        tf.tf_eip = intrf->if_eip;
                    261:        tf.tf_cs = intrf->if_cs;
                    262:        tf.tf_eflags = intrf->if_eflags;
                    263:        tf.tf_esp = intrf->if_esp;
                    264:        tf.tf_ss = intrf->if_ss;
                    265:
                    266:        /* XXX: does i386/Xen have vm86 support ?
                    267:        tf.tf_vm86_es;
                    268:        tf.tf_vm86_ds;
                    269:        tf.tf_vm86_fs;
                    270:        tf.tf_vm86_gs;
                    271:           :XXX */
                    272:
                    273:        ddb_ipi(SEL_KPL, tf);
                    274: #endif
                    275: }
                    276:
                    277: static void
                    278: xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf)
                    279: {
                    280:        KASSERT(ci != NULL);
                    281:        KASSERT(intrf != NULL);
                    282:
                    283:        xc_ipi_handler();
                    284: }
                    285:
                    286: void
                    287: xc_send_ipi(struct cpu_info *ci)
                    288: {
                    289:
                    290:        KASSERT(kpreempt_disabled());
                    291:        KASSERT(curcpu() != ci);
                    292:        if (ci) {
                    293:                if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) {
                    294:                        panic("xen_send_ipi(XEN_IPI_XCALL) failed\n");
1.3       cherry    295:                }
1.2       cherry    296:        } else {
                    297:                xen_broadcast_ipi(XEN_IPI_XCALL);
                    298:        }
                    299: }
1.6       cherry    300:
                    301: static void
                    302: xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf)
                    303: {
                    304:        KASSERT(ci != NULL);
                    305:        KASSERT(intrf != NULL);
1.8       cherry    306:        KASSERT(ci == curcpu());
                    307:        KASSERT(!ci->ci_vcpu->evtchn_upcall_mask);
1.6       cherry    308:
                    309:        hypervisor_force_callback();
                    310: }

CVSweb <webmaster@jp.NetBSD.org>