Annotation of src/sys/arch/xen/xen/xennetback_xenbus.c, Revision 1.26
1.26 ! jym 1: /* $NetBSD: xennetback_xenbus.c,v 1.25 2008/11/13 18:44:51 cegger Exp $ */
1.1 bouyer 2:
3: /*
4: * Copyright (c) 2006 Manuel Bouyer.
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: * 3. All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by Manuel Bouyer.
17: * 4. The name of the author may not be used to endorse or promote products
18: * derived from this software without specific prior written permission.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30: *
31: */
32:
33: #include "opt_xen.h"
34:
35: #include <sys/types.h>
36: #include <sys/param.h>
37: #include <sys/systm.h>
38: #include <sys/malloc.h>
39: #include <sys/queue.h>
40: #include <sys/kernel.h>
41: #include <sys/mbuf.h>
42: #include <sys/protosw.h>
43: #include <sys/socket.h>
44: #include <sys/ioctl.h>
45: #include <sys/errno.h>
46: #include <sys/device.h>
1.21 ad 47: #include <sys/intr.h>
1.1 bouyer 48:
49: #include <net/if.h>
50: #include <net/if_types.h>
51: #include <net/if_dl.h>
52: #include <net/route.h>
53: #include <net/netisr.h>
54: #include "bpfilter.h"
55: #if NBPFILTER > 0
56: #include <net/bpf.h>
57: #include <net/bpfdesc.h>
58: #endif
59:
60: #include <net/if_ether.h>
61:
62:
1.20 bouyer 63: #include <xen/xen.h>
64: #include <xen/xen_shm.h>
65: #include <xen/evtchn.h>
66: #include <xen/xenbus.h>
67: #include <xen/xennet_checksum.h>
1.1 bouyer 68:
69: #include <uvm/uvm.h>
70:
71: #ifdef XENDEBUG_NET
72: #define XENPRINTF(x) printf x
73: #else
74: #define XENPRINTF(x)
75: #endif
76:
1.26 ! jym 77: #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
1.1 bouyer 78: #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
79:
1.8 bouyer 80: /* linux wants at last 16 bytes free in front of the packet */
81: #define LINUX_REQUESTED_OFFSET 16
82:
1.1 bouyer 83: /* hash list for TX requests */
84: /* descriptor of a packet being handled by the kernel */
85: struct xni_pkt {
86: int pkt_id; /* packet's ID */
87: grant_handle_t pkt_handle;
88: struct xnetback_instance *pkt_xneti; /* pointer back to our softc */
89: };
90:
91: static inline void xni_pkt_unmap(struct xni_pkt *, vaddr_t);
92:
93:
94: /* pools for xni_pkt */
95: struct pool xni_pkt_pool;
96: /* ratecheck(9) for pool allocation failures */
97: struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */
98: /*
99: * Backend network device driver for Xen
100: */
101:
102: /* state of a xnetback instance */
103: typedef enum {CONNECTED, DISCONNECTING, DISCONNECTED} xnetback_state_t;
104:
105: /* we keep the xnetback instances in a linked list */
106: struct xnetback_instance {
1.26 ! jym 107: SLIST_ENTRY(xnetback_instance) next;
1.1 bouyer 108: struct xenbus_device *xni_xbusd; /* our xenstore entry */
109: domid_t xni_domid; /* attached to this domain */
110: uint32_t xni_handle; /* domain-specific handle */
111: xnetback_state_t xni_status;
112: void *xni_softintr;
113:
114: /* network interface stuff */
115: struct ethercom xni_ec;
116: struct callout xni_restart;
1.22 cegger 117: uint8_t xni_enaddr[ETHER_ADDR_LEN];
1.1 bouyer 118:
119: /* remote domain communication stuff */
120: unsigned int xni_evtchn; /* our even channel */
121: netif_tx_back_ring_t xni_txring;
122: netif_rx_back_ring_t xni_rxring;
123: grant_handle_t xni_tx_ring_handle; /* to unmap the ring */
124: grant_handle_t xni_rx_ring_handle;
125: vaddr_t xni_tx_ring_va; /* to unmap the ring */
126: vaddr_t xni_rx_ring_va;
127: };
128: #define xni_if xni_ec.ec_if
129: #define xni_bpf xni_if.if_bpf
130:
131: void xvifattach(int);
1.13 christos 132: static int xennetback_ifioctl(struct ifnet *, u_long, void *);
1.1 bouyer 133: static void xennetback_ifstart(struct ifnet *);
134: static void xennetback_ifsoftstart(void *);
135: static void xennetback_ifwatchdog(struct ifnet *);
136: static int xennetback_ifinit(struct ifnet *);
137: static void xennetback_ifstop(struct ifnet *, int);
138:
139: static int xennetback_xenbus_create(struct xenbus_device *);
140: static int xennetback_xenbus_destroy(void *);
141: static void xennetback_frontend_changed(void *, XenbusState);
142:
143: static inline void xennetback_tx_response(struct xnetback_instance *,
144: int, int);
1.13 christos 145: static void xennetback_tx_free(struct mbuf * , void *, size_t, void *);
1.1 bouyer 146:
147: SLIST_HEAD(, xnetback_instance) xnetback_instances;
148:
149: static struct xnetback_instance *xnetif_lookup(domid_t, uint32_t);
150: static int xennetback_evthandler(void *);
151:
152: static struct xenbus_backend_driver xvif_backend_driver = {
153: .xbakd_create = xennetback_xenbus_create,
154: .xbakd_type = "vif"
155: };
156:
157: /*
158: * Number of packets to transmit in one hypercall (= number of pages to
159: * transmit at once).
160: */
161: #define NB_XMIT_PAGES_BATCH 64
162: /*
1.26 ! jym 163: * We will transfer a mapped page to the remote domain, and remap another
! 164: * page in place immediately. For this we keep a list of pages available.
1.1 bouyer 165: * When the list is empty, we ask the hypervisor to give us
166: * NB_XMIT_PAGES_BATCH pages back.
167: */
168: static unsigned long mcl_pages[NB_XMIT_PAGES_BATCH]; /* our physical pages */
169: int mcl_pages_alloc; /* current index in mcl_pages */
170: static int xennetback_get_mcl_page(paddr_t *);
171: static void xennetback_get_new_mcl_pages(void);
172: /*
173: * If we can't transfer the mbuf directly, we have to copy it to a page which
1.26 ! jym 174: * will be transferred to the remote domain. We use a pool_cache
1.1 bouyer 175: * for this, or the mbuf cluster pool cache if MCLBYTES == PAGE_SIZE
176: */
177: #if MCLBYTES != PAGE_SIZE
1.18 ad 178: pool_cache_t xmit_pages_cache;
1.1 bouyer 179: #endif
1.18 ad 180: pool_cache_t xmit_pages_cachep;
1.1 bouyer 181:
182: /* arrays used in xennetback_ifstart(), too large to allocate on stack */
1.8 bouyer 183: static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH];
184: static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1];
1.1 bouyer 185: static gnttab_transfer_t xstart_gop[NB_XMIT_PAGES_BATCH];
186: struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH];
187: struct _pages_pool_free {
188: vaddr_t va;
189: paddr_t pa;
190: } pages_pool_free[NB_XMIT_PAGES_BATCH];
191:
192:
193: static inline void
194: xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va)
195: {
1.4 bouyer 196: xen_shm_unmap(pkt_va, 1, &pkt->pkt_handle);
1.1 bouyer 197: pool_put(&xni_pkt_pool, pkt);
198: }
199:
200: void
201: xvifattach(int n)
202: {
203: int i;
204: struct pglist mlist;
205: struct vm_page *pg;
206:
207: XENPRINTF(("xennetback_init\n"));
208:
209: /*
1.26 ! jym 210: * steal some non-managed pages to the VM system, to replace
! 211: * mbuf cluster or xmit_pages_pool pages given to foreign domains.
1.1 bouyer 212: */
213: if (uvm_pglistalloc(PAGE_SIZE * NB_XMIT_PAGES_BATCH, 0, 0xffffffff,
214: 0, 0, &mlist, NB_XMIT_PAGES_BATCH, 0) != 0)
215: panic("xennetback_init: uvm_pglistalloc");
216: for (i = 0, pg = mlist.tqh_first; pg != NULL;
1.23 ad 217: pg = pg->pageq.queue.tqe_next, i++)
1.1 bouyer 218: mcl_pages[i] = xpmap_ptom(VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT;
219: if (i != NB_XMIT_PAGES_BATCH)
220: panic("xennetback_init: %d mcl pages", i);
221: mcl_pages_alloc = NB_XMIT_PAGES_BATCH - 1;
222:
223: /* initialise pools */
224: pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0,
1.15 ad 225: "xnbpkt", NULL, IPL_VM);
1.1 bouyer 226: #if MCLBYTES != PAGE_SIZE
1.18 ad 227: xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL,
228: IPL_VM, NULL, NULL, NULL);
1.19 ad 229: xmit_pages_cachep = xmit_pages_cache;
1.1 bouyer 230: #else
1.19 ad 231: xmit_pages_cachep = mcl_cache;
1.1 bouyer 232: #endif
233:
234: SLIST_INIT(&xnetback_instances);
235: xenbus_backend_register(&xvif_backend_driver);
236: }
237:
238: static int
239: xennetback_xenbus_create(struct xenbus_device *xbusd)
240: {
241: struct xnetback_instance *xneti;
242: long domid, handle;
243: struct ifnet *ifp;
244: extern int ifqmaxlen; /* XXX */
245: char *val, *e, *p;
246: int i, err;
247:
248: if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path,
1.5 bouyer 249: "frontend-id", &domid, 10)) != 0) {
1.1 bouyer 250: aprint_error("xvif: can' read %s/frontend-id: %d\n",
251: xbusd->xbusd_path, err);
252: return err;
253: }
254: if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path,
1.5 bouyer 255: "handle", &handle, 10)) != 0) {
1.1 bouyer 256: aprint_error("xvif: can' read %s/handle: %d\n",
257: xbusd->xbusd_path, err);
258: return err;
259: }
260:
261: if (xnetif_lookup(domid, handle) != NULL) {
262: return EEXIST;
263: }
264: xneti = malloc(sizeof(struct xnetback_instance), M_DEVBUF,
265: M_NOWAIT | M_ZERO);
266: if (xneti == NULL) {
267: return ENOMEM;
268: }
269: xneti->xni_domid = domid;
270: xneti->xni_handle = handle;
271: xneti->xni_status = DISCONNECTED;
272:
273: xbusd->xbusd_u.b.b_cookie = xneti;
274: xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy;
275: xneti->xni_xbusd = xbusd;
276:
1.21 ad 277: xneti->xni_softintr = softint_establish(SOFTINT_NET,
1.1 bouyer 278: xennetback_ifsoftstart, xneti);
279: if (xneti->xni_softintr == NULL) {
280: err = ENOMEM;
281: goto fail;
282: }
283:
284: ifp = &xneti->xni_if;
285: ifp->if_softc = xneti;
286:
287: /* read mac address */
288: if ((err = xenbus_read(NULL, xbusd->xbusd_path, "mac", NULL, &val))) {
289: aprint_error("xvif: can' read %s/mac: %d\n",
290: xbusd->xbusd_path, err);
291: goto fail;
292: }
293: for (i = 0, p = val; i < 6; i++) {
294: xneti->xni_enaddr[i] = strtoul(p, &e, 16);
295: if ((e[0] == '\0' && i != 5) && e[0] != ':') {
296: aprint_error("xvif: %s is not a valid mac address\n",
297: val);
298: err = EINVAL;
299: goto fail;
300: }
301: p = &e[1];
302: }
303: free(val, M_DEVBUF);
304:
305: /* we can't use the same MAC addr as our guest */
306: xneti->xni_enaddr[3]++;
307: /* create pseudo-interface */
308: snprintf(xneti->xni_if.if_xname, IFNAMSIZ, "xvif%d.%d",
309: (int)domid, (int)handle);
1.24 jym 310: aprint_verbose_ifnet(ifp, "Ethernet address %s\n",
1.1 bouyer 311: ether_sprintf(xneti->xni_enaddr));
312: ifp->if_flags =
313: IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
314: ifp->if_snd.ifq_maxlen =
1.12 bouyer 315: max(ifqmaxlen, NET_TX_RING_SIZE * 2);
1.11 yamt 316: ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1.1 bouyer 317: ifp->if_ioctl = xennetback_ifioctl;
318: ifp->if_start = xennetback_ifstart;
319: ifp->if_watchdog = xennetback_ifwatchdog;
320: ifp->if_init = xennetback_ifinit;
321: ifp->if_stop = xennetback_ifstop;
322: ifp->if_timer = 0;
323: IFQ_SET_READY(&ifp->if_snd);
324: if_attach(ifp);
325: ether_ifattach(&xneti->xni_if, xneti->xni_enaddr);
326:
327: SLIST_INSERT_HEAD(&xnetback_instances, xneti, next);
328:
329: xbusd->xbusd_otherend_changed = xennetback_frontend_changed;
330:
331: err = xenbus_switch_state(xbusd, NULL, XenbusStateInitWait);
332: if (err) {
333: printf("failed to switch state on %s: %d\n",
334: xbusd->xbusd_path, err);
335: goto fail;
336: }
337: if (err) {
338: printf("failed to write %s/hotplug-status: %d\n",
339: xbusd->xbusd_path, err);
340: goto fail;
341: }
342: return 0;
343: fail:
344: free(xneti, M_DEVBUF);
345: return err;
346: }
347:
348: int
349: xennetback_xenbus_destroy(void *arg)
350: {
351: struct xnetback_instance *xneti = arg;
352: struct gnttab_unmap_grant_ref op;
353: int err;
354:
355: #if 0
356: if (xneti->xni_status == CONNECTED) {
357: return EBUSY;
358: }
359: #endif
1.24 jym 360: aprint_verbose_ifnet(&xneti->xni_if, "disconnecting\n");
1.1 bouyer 361: hypervisor_mask_event(xneti->xni_evtchn);
362: event_remove_handler(xneti->xni_evtchn, xennetback_evthandler, xneti);
1.21 ad 363: softint_disestablish(xneti->xni_softintr);
1.1 bouyer 364:
365: SLIST_REMOVE(&xnetback_instances,
366: xneti, xnetback_instance, next);
367:
368: ether_ifdetach(&xneti->xni_if);
369: if_detach(&xneti->xni_if);
370:
371: if (xneti->xni_txring.sring) {
372: op.host_addr = xneti->xni_tx_ring_va;
373: op.handle = xneti->xni_tx_ring_handle;
374: op.dev_bus_addr = 0;
375: err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
376: &op, 1);
377: if (err)
1.24 jym 378: aprint_error_ifnet(&xneti->xni_if,
379: "unmap_grant_ref failed: %d\n", err);
1.1 bouyer 380: }
381: if (xneti->xni_rxring.sring) {
382: op.host_addr = xneti->xni_rx_ring_va;
383: op.handle = xneti->xni_rx_ring_handle;
384: op.dev_bus_addr = 0;
385: err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
386: &op, 1);
387: if (err)
1.24 jym 388: aprint_error_ifnet(&xneti->xni_if,
389: "unmap_grant_ref failed: %d\n", err);
1.1 bouyer 390: }
391: uvm_km_free(kernel_map, xneti->xni_tx_ring_va,
392: PAGE_SIZE, UVM_KMF_VAONLY);
393: uvm_km_free(kernel_map, xneti->xni_rx_ring_va,
394: PAGE_SIZE, UVM_KMF_VAONLY);
395: free(xneti, M_DEVBUF);
396: return 0;
397: }
398:
399: static void
400: xennetback_frontend_changed(void *arg, XenbusState new_state)
401: {
402: struct xnetback_instance *xneti = arg;
403: struct xenbus_device *xbusd = xneti->xni_xbusd;
404: int err;
405: netif_tx_sring_t *tx_ring;
406: netif_rx_sring_t *rx_ring;
407: struct gnttab_map_grant_ref op;
408: evtchn_op_t evop;
409: u_long tx_ring_ref, rx_ring_ref;
410: u_long revtchn;
411:
412: XENPRINTF(("%s: new state %d\n", xneti->xni_if.if_xname, new_state));
413: switch(new_state) {
414: case XenbusStateInitialising:
415: case XenbusStateInitialised:
416: break;
417:
418: case XenbusStateConnected:
419: /* read comunication informations */
420: err = xenbus_read_ul(NULL, xbusd->xbusd_otherend,
1.5 bouyer 421: "tx-ring-ref", &tx_ring_ref, 10);
1.1 bouyer 422: if (err) {
423: xenbus_dev_fatal(xbusd, err, "reading %s/tx-ring-ref",
424: xbusd->xbusd_otherend);
425: break;
426: }
427: err = xenbus_read_ul(NULL, xbusd->xbusd_otherend,
1.5 bouyer 428: "rx-ring-ref", &rx_ring_ref, 10);
1.1 bouyer 429: if (err) {
430: xenbus_dev_fatal(xbusd, err, "reading %s/rx-ring-ref",
431: xbusd->xbusd_otherend);
432: break;
433: }
434: err = xenbus_read_ul(NULL, xbusd->xbusd_otherend,
1.5 bouyer 435: "event-channel", &revtchn, 10);
1.1 bouyer 436: if (err) {
437: xenbus_dev_fatal(xbusd, err, "reading %s/event-channel",
438: xbusd->xbusd_otherend);
439: break;
440: }
441: /* allocate VA space and map rings */
442: xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
443: UVM_KMF_VAONLY);
1.4 bouyer 444: if (xneti->xni_tx_ring_va == 0) {
445: xenbus_dev_fatal(xbusd, ENOMEM,
446: "can't get VA for tx ring", xbusd->xbusd_otherend);
1.1 bouyer 447: break;
1.4 bouyer 448: }
1.1 bouyer 449: tx_ring = (void *)xneti->xni_tx_ring_va;
450: xneti->xni_rx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
451: UVM_KMF_VAONLY);
1.4 bouyer 452: if (xneti->xni_rx_ring_va == 0) {
453: xenbus_dev_fatal(xbusd, ENOMEM,
454: "can't get VA for rx ring", xbusd->xbusd_otherend);
1.1 bouyer 455: goto err1;
1.4 bouyer 456: }
1.1 bouyer 457: rx_ring = (void *)xneti->xni_rx_ring_va;
458: op.host_addr = xneti->xni_tx_ring_va;
459: op.flags = GNTMAP_host_map;
460: op.ref = tx_ring_ref;
461: op.dom = xneti->xni_domid;
462: err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
463: if (err || op.status) {
464: printf("%s: can't map TX grant ref: %d/%d\n",
465: xneti->xni_if.if_xname, err, op.status);
466: goto err2;
467: }
468: xneti->xni_tx_ring_handle = op.handle;
469:
470: op.host_addr = xneti->xni_rx_ring_va;
471: op.flags = GNTMAP_host_map;
472: op.ref = rx_ring_ref;
473: op.dom = xneti->xni_domid;
474: err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
475: if (err || op.status) {
476: printf("%s: can't map RX grant ref: %d/%d\n",
477: xneti->xni_if.if_xname, err, op.status);
478: goto err2;
479: }
480: xneti->xni_rx_ring_handle = op.handle;
481: BACK_RING_INIT(&xneti->xni_txring, tx_ring, PAGE_SIZE);
482: BACK_RING_INIT(&xneti->xni_rxring, rx_ring, PAGE_SIZE);
483: evop.cmd = EVTCHNOP_bind_interdomain;
484: evop.u.bind_interdomain.remote_dom = xneti->xni_domid;
485: evop.u.bind_interdomain.remote_port = revtchn;
486: err = HYPERVISOR_event_channel_op(&evop);
487: if (err) {
488: printf("%s: can't get event channel: %d\n",
489: xneti->xni_if.if_xname, err);
490: goto err2;
491: }
492: xneti->xni_evtchn = evop.u.bind_interdomain.local_port;
493: x86_sfence();
494: xneti->xni_status = CONNECTED;
495: xenbus_switch_state(xbusd, NULL, XenbusStateConnected);
496: x86_sfence();
497: event_set_handler(xneti->xni_evtchn, xennetback_evthandler,
498: xneti, IPL_NET, xneti->xni_if.if_xname);
499: xennetback_ifinit(&xneti->xni_if);
500: hypervisor_enable_event(xneti->xni_evtchn);
501: hypervisor_notify_via_evtchn(xneti->xni_evtchn);
502: break;
503:
504: case XenbusStateClosing:
505: xneti->xni_status = DISCONNECTING;
506: xneti->xni_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
507: xneti->xni_if.if_timer = 0;
508: xenbus_switch_state(xbusd, NULL, XenbusStateClosing);
509: break;
510:
511: case XenbusStateClosed:
512: /* otherend_changed() should handle it for us */
513: panic("xennetback_frontend_changed: closed\n");
1.26 ! jym 514: case XenbusStateUnknown:
1.1 bouyer 515: case XenbusStateInitWait:
516: default:
517: aprint_error("%s: invalid frontend state %d\n",
518: xneti->xni_if.if_xname, new_state);
519: break;
520: }
521: return;
522: err2:
523: uvm_km_free(kernel_map, xneti->xni_rx_ring_va,
524: PAGE_SIZE, UVM_KMF_VAONLY);
525: err1:
526: uvm_km_free(kernel_map, xneti->xni_tx_ring_va,
527: PAGE_SIZE, UVM_KMF_VAONLY);
528: }
529:
530: /* lookup a xneti based on domain id and interface handle */
531: static struct xnetback_instance *
532: xnetif_lookup(domid_t dom , uint32_t handle)
533: {
534: struct xnetback_instance *xneti;
535:
536: SLIST_FOREACH(xneti, &xnetback_instances, next) {
537: if (xneti->xni_domid == dom && xneti->xni_handle == handle)
538: return xneti;
539: }
540: return NULL;
541: }
542:
543:
544: /* get a page to remplace a mbuf cluster page given to a domain */
545: static int
546: xennetback_get_mcl_page(paddr_t *map)
547: {
548: if (mcl_pages_alloc < 0)
549: /*
550: * we exhausted our allocation. We can't allocate new ones yet
551: * because the current pages may not have been loaned to
552: * the remote domain yet. We have to let the caller do this.
553: */
554: return -1;
555:
556: *map = mcl_pages[mcl_pages_alloc] << PAGE_SHIFT;
557: mcl_pages_alloc--;
558: return 0;
559:
560: }
561:
562: static void
563: xennetback_get_new_mcl_pages(void)
564: {
565: int nb_pages;
566: struct xen_memory_reservation res;
567:
568: /* get some new pages. */
1.25 cegger 569: xenguest_handle(res.extent_start) = mcl_pages;
1.1 bouyer 570: res.nr_extents = NB_XMIT_PAGES_BATCH;
571: res.extent_order = 0;
572: res.address_bits = 0;
573: res.domid = DOMID_SELF;
574:
575: nb_pages = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res);
576: if (nb_pages <= 0) {
577: printf("xennetback: can't get new mcl pages (%d)\n", nb_pages);
578: return;
579: }
580: if (nb_pages != NB_XMIT_PAGES_BATCH)
581: printf("xennetback: got only %d new mcl pages\n", nb_pages);
582:
583: mcl_pages_alloc = nb_pages - 1;
584: }
585:
586: static inline void
587: xennetback_tx_response(struct xnetback_instance *xneti, int id, int status)
588: {
589: RING_IDX resp_prod;
590: netif_tx_response_t *txresp;
591: int do_event;
592:
593: resp_prod = xneti->xni_txring.rsp_prod_pvt;
594: txresp = RING_GET_RESPONSE(&xneti->xni_txring, resp_prod);
595:
596: txresp->id = id;
597: txresp->status = status;
598: xneti->xni_txring.rsp_prod_pvt++;
599: RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event);
600: if (do_event) {
601: XENPRINTF(("%s send event\n", xneti->xni_if.if_xname));
602: hypervisor_notify_via_evtchn(xneti->xni_evtchn);
603: }
604: }
605:
606: static int
607: xennetback_evthandler(void *arg)
608: {
609: struct xnetback_instance *xneti = arg;
610: struct ifnet *ifp = &xneti->xni_if;
611: netif_tx_request_t *txreq;
612: struct xni_pkt *pkt;
613: vaddr_t pkt_va;
614: struct mbuf *m;
615: int receive_pending, err;
616: RING_IDX req_cons;
617:
618: XENPRINTF(("xennetback_evthandler "));
619: req_cons = xneti->xni_txring.req_cons;
620: x86_lfence();
621: while (1) {
622: x86_lfence(); /* be sure to read the request before updating */
623: xneti->xni_txring.req_cons = req_cons;
624: x86_sfence();
625: RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring,
626: receive_pending);
627: if (receive_pending == 0)
628: break;
629: txreq = RING_GET_REQUEST(&xneti->xni_txring, req_cons);
630: x86_lfence();
631: XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname,
632: txreq->size));
1.6 bouyer 633: req_cons++;
1.1 bouyer 634: if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
635: (IFF_UP | IFF_RUNNING))) {
636: /* interface not up, drop */
637: xennetback_tx_response(xneti, txreq->id,
638: NETIF_RSP_DROPPED);
639: continue;
640: }
641: /*
642: * Do some sanity checks, and map the packet's page.
643: */
644: if (__predict_false(txreq->size < ETHER_HDR_LEN ||
645: txreq->size > (ETHER_MAX_LEN - ETHER_CRC_LEN))) {
646: printf("%s: packet size %d too big\n",
647: ifp->if_xname, txreq->size);
648: xennetback_tx_response(xneti, txreq->id,
649: NETIF_RSP_ERROR);
650: ifp->if_ierrors++;
651: continue;
652: }
653: /* don't cross page boundaries */
654: if (__predict_false(
655: txreq->offset + txreq->size > PAGE_SIZE)) {
656: printf("%s: packet cross page boundary\n",
657: ifp->if_xname);
658: xennetback_tx_response(xneti, txreq->id,
659: NETIF_RSP_ERROR);
660: ifp->if_ierrors++;
661: continue;
662: }
663: /* get a mbuf for this packet */
664: MGETHDR(m, M_DONTWAIT, MT_DATA);
665: if (__predict_false(m == NULL)) {
666: static struct timeval lasttime;
667: if (ratecheck(&lasttime, &xni_pool_errintvl))
668: printf("%s: mbuf alloc failed\n",
669: ifp->if_xname);
670: xennetback_tx_response(xneti, txreq->id,
671: NETIF_RSP_DROPPED);
672: ifp->if_ierrors++;
673: continue;
674: }
675:
676: XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n",
677: xneti->xni_if.if_xname, txreq->offset,
678: txreq->size, txreq->id, MASK_NETIF_TX_IDX(req_cons)));
1.26 ! jym 679:
1.1 bouyer 680: pkt = pool_get(&xni_pkt_pool, PR_NOWAIT);
681: if (__predict_false(pkt == NULL)) {
682: static struct timeval lasttime;
683: if (ratecheck(&lasttime, &xni_pool_errintvl))
684: printf("%s: xnbpkt alloc failed\n",
685: ifp->if_xname);
686: xennetback_tx_response(xneti, txreq->id,
687: NETIF_RSP_DROPPED);
688: ifp->if_ierrors++;
689: m_freem(m);
690: continue;
691: }
1.4 bouyer 692: err = xen_shm_map(1, xneti->xni_domid, &txreq->gref, &pkt_va,
1.2 bouyer 693: &pkt->pkt_handle, XSHM_RO);
694: if (__predict_false(err == ENOMEM)) {
1.1 bouyer 695: xennetback_tx_response(xneti, txreq->id,
696: NETIF_RSP_DROPPED);
697: ifp->if_ierrors++;
698: pool_put(&xni_pkt_pool, pkt);
699: m_freem(m);
700: continue;
701: }
702:
1.2 bouyer 703: if (__predict_false(err)) {
1.1 bouyer 704: printf("%s: mapping foreing page failed: %d\n",
1.2 bouyer 705: xneti->xni_if.if_xname, err);
1.1 bouyer 706: xennetback_tx_response(xneti, txreq->id,
707: NETIF_RSP_ERROR);
708: ifp->if_ierrors++;
709: pool_put(&xni_pkt_pool, pkt);
710: m_freem(m);
711: continue;
712: }
713:
714: if ((ifp->if_flags & IFF_PROMISC) == 0) {
715: struct ether_header *eh =
716: (void*)(pkt_va + txreq->offset);
717: if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
1.16 dyoung 718: memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
1.1 bouyer 719: ETHER_ADDR_LEN) != 0) {
720: xni_pkt_unmap(pkt, pkt_va);
721: m_freem(m);
722: xennetback_tx_response(xneti, txreq->id,
723: NETIF_RSP_OKAY);
724: continue; /* packet is not for us */
725: }
726: }
1.3 bouyer 727: #ifdef notyet
728: a lot of work is needed in the tcp stack to handle read-only ext storage
729: so always copy for now.
1.1 bouyer 730: if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) ==
1.12 bouyer 731: (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1)))
732: #else
733: if (1)
1.3 bouyer 734: #endif /* notyet */
1.12 bouyer 735: {
1.1 bouyer 736: /*
737: * This is the last TX buffer. Copy the data and
738: * ack it. Delaying it until the mbuf is
739: * freed will stall transmit.
740: */
741: m->m_len = min(MHLEN, txreq->size);
742: m->m_pkthdr.len = 0;
743: m_copyback(m, 0, txreq->size,
1.13 christos 744: (void *)(pkt_va + txreq->offset));
1.1 bouyer 745: xni_pkt_unmap(pkt, pkt_va);
746: if (m->m_pkthdr.len < txreq->size) {
747: ifp->if_ierrors++;
748: m_freem(m);
749: xennetback_tx_response(xneti, txreq->id,
750: NETIF_RSP_DROPPED);
751: continue;
752: }
753: xennetback_tx_response(xneti, txreq->id,
754: NETIF_RSP_OKAY);
755: } else {
1.3 bouyer 756:
1.1 bouyer 757: pkt->pkt_id = txreq->id;
758: pkt->pkt_xneti = xneti;
759:
760: MEXTADD(m, pkt_va + txreq->offset,
761: txreq->size, M_DEVBUF, xennetback_tx_free, pkt);
762: m->m_pkthdr.len = m->m_len = txreq->size;
1.2 bouyer 763: m->m_flags |= M_EXT_ROMAP;
1.1 bouyer 764: }
1.10 yamt 765: if ((txreq->flags & NETTXF_csum_blank) != 0) {
766: xennet_checksum_fill(&m);
767: if (m == NULL) {
768: ifp->if_ierrors++;
769: continue;
770: }
771: }
1.1 bouyer 772: m->m_pkthdr.rcvif = ifp;
773: ifp->if_ipackets++;
774:
775: #if NBPFILTER > 0
776: if (ifp->if_bpf)
777: bpf_mtap(ifp->if_bpf, m);
778: #endif
779: (*ifp->if_input)(ifp, m);
780: }
781: x86_lfence(); /* be sure to read the request before updating pointer */
782: xneti->xni_txring.req_cons = req_cons;
783: x86_sfence();
784: /* check to see if we can transmit more packets */
1.21 ad 785: softint_schedule(xneti->xni_softintr);
1.1 bouyer 786:
787: return 1;
788: }
789:
790: static void
1.13 christos 791: xennetback_tx_free(struct mbuf *m, void *va, size_t size, void *arg)
1.1 bouyer 792: {
793: int s = splnet();
794: struct xni_pkt *pkt = arg;
795: struct xnetback_instance *xneti = pkt->pkt_xneti;
796:
797: XENPRINTF(("xennetback_tx_free\n"));
798:
799: xennetback_tx_response(xneti, pkt->pkt_id, NETIF_RSP_OKAY);
800:
801: xni_pkt_unmap(pkt, (vaddr_t)va & ~PAGE_MASK);
802:
803: if (m)
1.18 ad 804: pool_cache_put(mb_cache, m);
1.1 bouyer 805: splx(s);
806: }
807:
808: static int
1.13 christos 809: xennetback_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1.1 bouyer 810: {
811: //struct xnetback_instance *xneti = ifp->if_softc;
812: //struct ifreq *ifr = (struct ifreq *)data;
813: int s, error;
814:
815: s = splnet();
816: error = ether_ioctl(ifp, cmd, data);
817: if (error == ENETRESET)
818: error = 0;
819: splx(s);
820: return error;
821: }
822:
823: static void
824: xennetback_ifstart(struct ifnet *ifp)
825: {
826: struct xnetback_instance *xneti = ifp->if_softc;
827:
828: /*
829: * The Xen communication channel is much more efficient if we can
830: * schedule batch of packets for the domain. To achieve this, we
831: * schedule a soft interrupt, and just return. This way, the network
832: * stack will enqueue all pending mbufs in the interface's send queue
833: * before it is processed by xennet_softstart().
834: */
1.21 ad 835: softint_schedule(xneti->xni_softintr);
1.1 bouyer 836: }
837:
838: static void
839: xennetback_ifsoftstart(void *arg)
840: {
841: struct xnetback_instance *xneti = arg;
842: struct ifnet *ifp = &xneti->xni_if;
843: struct mbuf *m;
844: vaddr_t xmit_va;
845: paddr_t xmit_pa;
846: paddr_t xmit_ma;
1.9 bouyer 847: paddr_t newp_ma = 0; /* XXX gcc */
1.1 bouyer 848: int i, j, nppitems;
849: mmu_update_t *mmup;
850: multicall_entry_t *mclp;
851: netif_rx_response_t *rxresp;
852: RING_IDX req_prod, resp_prod;
853: int do_event = 0;
854: gnttab_transfer_t *gop;
855: int id, offset;
856:
857: XENPRINTF(("xennetback_ifsoftstart "));
858: int s = splnet();
859: if (__predict_false(
860: (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
861: splx(s);
862: return;
863: }
864:
865: while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
866: XENPRINTF(("pkt\n"));
867: req_prod = xneti->xni_rxring.sring->req_prod;
868: resp_prod = xneti->xni_rxring.rsp_prod_pvt;
869: x86_lfence();
870:
871: mmup = xstart_mmu;
872: mclp = xstart_mcl;
873: gop = xstart_gop;
874: for (nppitems = 0, i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) {
875: XENPRINTF(("have a packet\n"));
876: IFQ_POLL(&ifp->if_snd, m);
877: if (__predict_false(m == NULL))
878: panic("xennetback_ifstart: IFQ_POLL");
879: if (__predict_false(
880: req_prod == xneti->xni_rxring.req_cons ||
881: xneti->xni_rxring.req_cons - resp_prod ==
882: NET_RX_RING_SIZE)) {
883: /* out of ring space */
884: XENPRINTF(("xennetback_ifstart: ring full "
885: "req_prod 0x%x req_cons 0x%x resp_prod "
886: "0x%x\n",
887: req_prod, xneti->xni_rxring.req_cons,
888: resp_prod));
889: ifp->if_timer = 1;
890: break;
891: }
892: if (__predict_false(i == NB_XMIT_PAGES_BATCH))
893: break; /* we filled the array */
894: if (__predict_false(
895: xennetback_get_mcl_page(&newp_ma) != 0))
896: break; /* out of memory */
897: if ((m->m_flags & M_CLUSTER) != 0 &&
898: !M_READONLY(m) && MCLBYTES == PAGE_SIZE) {
899: /* we can give this page away */
900: xmit_pa = m->m_ext.ext_paddr;
901: xmit_ma = xpmap_ptom(xmit_pa);
902: xmit_va = (vaddr_t)m->m_ext.ext_buf;
903: KASSERT(xmit_pa != M_PADDR_INVALID);
904: KASSERT((xmit_va & PAGE_MASK) == 0);
905: offset = m->m_data - m->m_ext.ext_buf;
906: } else {
907: /* we have to copy the packet */
908: xmit_va = (vaddr_t)pool_cache_get_paddr(
1.18 ad 909: xmit_pages_cachep,
1.1 bouyer 910: PR_NOWAIT, &xmit_pa);
911: if (__predict_false(xmit_va == 0))
912: break; /* out of memory */
913:
914: KASSERT(xmit_pa != POOL_PADDR_INVALID);
915: xmit_ma = xpmap_ptom(xmit_pa);
916: XENPRINTF(("xennetback_get_xmit_page: got va "
917: "0x%x ma 0x%x\n", (u_int)xmit_va,
918: (u_int)xmit_ma));
919: m_copydata(m, 0, m->m_pkthdr.len,
1.14 dogcow 920: (char *)xmit_va + LINUX_REQUESTED_OFFSET);
1.8 bouyer 921: offset = LINUX_REQUESTED_OFFSET;
1.1 bouyer 922: pages_pool_free[nppitems].va = xmit_va;
923: pages_pool_free[nppitems].pa = xmit_pa;
924: nppitems++;
925: }
926: /* start filling ring */
927: gop->ref = RING_GET_REQUEST(&xneti->xni_rxring,
928: xneti->xni_rxring.req_cons)->gref;
929: id = RING_GET_REQUEST(&xneti->xni_rxring,
930: xneti->xni_rxring.req_cons)->id;
1.8 bouyer 931: x86_lfence();
1.1 bouyer 932: xneti->xni_rxring.req_cons++;
933: rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,
934: resp_prod);
935: rxresp->id = id;
936: rxresp->offset = offset;
937: rxresp->status = m->m_pkthdr.len;
1.11 yamt 938: if ((m->m_pkthdr.csum_flags &
939: (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
940: rxresp->flags = NETRXF_csum_blank;
941: } else {
942: rxresp->flags = 0;
943: }
1.1 bouyer 944: /*
945: * transfers the page containing the packet to the
946: * remote domain, and map newp in place.
947: */
948: xpmap_phys_to_machine_mapping[
949: (xmit_pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
950: newp_ma >> PAGE_SHIFT;
951: MULTI_update_va_mapping(mclp, xmit_va,
952: newp_ma | PG_V | PG_RW | PG_U | PG_M, 0);
953: mclp++;
954: gop->mfn = xmit_ma >> PAGE_SHIFT;
955: gop->domid = xneti->xni_domid;
956: gop++;
957:
958: mmup->ptr = newp_ma | MMU_MACHPHYS_UPDATE;
959: mmup->val = (xmit_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
960: mmup++;
961:
962: /* done with this packet */
963: IFQ_DEQUEUE(&ifp->if_snd, m);
964: mbufs_sent[i] = m;
1.8 bouyer 965: resp_prod++;
1.1 bouyer 966: i++; /* this packet has been queued */
967: ifp->if_opackets++;
968: #if NBPFILTER > 0
969: if (ifp->if_bpf)
970: bpf_mtap(ifp->if_bpf, m);
971: #endif
972: }
973: if (i != 0) {
974: /*
975: * We may have allocated buffers which have entries
976: * outstanding in the page update queue -- make sure
977: * we flush those first!
978: */
979: int svm = splvm();
980: xpq_flush_queue();
981: splx(svm);
982: mclp[-1].args[MULTI_UVMFLAGS_INDEX] =
983: UVMF_TLB_FLUSH|UVMF_ALL;
984: mclp->op = __HYPERVISOR_mmu_update;
985: mclp->args[0] = (unsigned long)xstart_mmu;
986: mclp->args[1] = i;
987: mclp->args[2] = 0;
988: mclp->args[3] = DOMID_SELF;
989: mclp++;
990: /* update the MMU */
991: if (HYPERVISOR_multicall(xstart_mcl, i + 1) != 0) {
1.26 ! jym 992: panic("%s: HYPERVISOR_multicall failed",
1.1 bouyer 993: ifp->if_xname);
994: }
995: for (j = 0; j < i + 1; j++) {
1.8 bouyer 996: if (xstart_mcl[j].result != 0) {
997: printf("%s: xstart_mcl[%d] "
998: "failed (%lu)\n", ifp->if_xname,
999: j, xstart_mcl[j].result);
1000: printf("%s: req_prod %u req_cons "
1001: "%u rsp_prod %u rsp_prod_pvt %u "
1002: "i %u\n",
1003: ifp->if_xname,
1004: xneti->xni_rxring.sring->req_prod,
1005: xneti->xni_rxring.req_cons,
1006: xneti->xni_rxring.sring->rsp_prod,
1007: xneti->xni_rxring.rsp_prod_pvt,
1008: i);
1009: }
1.1 bouyer 1010: }
1011: if (HYPERVISOR_grant_table_op(GNTTABOP_transfer,
1012: xstart_gop, i) != 0) {
1.26 ! jym 1013: panic("%s: GNTTABOP_transfer failed",
1.1 bouyer 1014: ifp->if_xname);
1015: }
1016:
1017: for (j = 0; j < i; j++) {
1.26 ! jym 1018: if (xstart_gop[j].status != GNTST_okay) {
1.8 bouyer 1019: printf("%s GNTTABOP_transfer[%d] %d\n",
1020: ifp->if_xname,
1.1 bouyer 1021: j, xstart_gop[j].status);
1.8 bouyer 1022: printf("%s: req_prod %u req_cons "
1023: "%u rsp_prod %u rsp_prod_pvt %u "
1024: "i %d\n",
1025: ifp->if_xname,
1026: xneti->xni_rxring.sring->req_prod,
1027: xneti->xni_rxring.req_cons,
1028: xneti->xni_rxring.sring->rsp_prod,
1029: xneti->xni_rxring.rsp_prod_pvt,
1030: i);
1031: rxresp = RING_GET_RESPONSE(
1032: &xneti->xni_rxring,
1033: xneti->xni_rxring.rsp_prod_pvt + j);
1.1 bouyer 1034: rxresp->status = NETIF_RSP_ERROR;
1035: }
1036: }
1037:
1038: /* update pointer */
1.8 bouyer 1039: KASSERT(
1040: xneti->xni_rxring.rsp_prod_pvt + i == resp_prod);
1.1 bouyer 1041: xneti->xni_rxring.rsp_prod_pvt = resp_prod;
1042: RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
1043: &xneti->xni_rxring, j);
1044: if (j)
1045: do_event = 1;
1046: /* now we can free the mbufs */
1047: for (j = 0; j < i; j++) {
1048: m_freem(mbufs_sent[j]);
1049: }
1050: for (j = 0; j < nppitems; j++) {
1.18 ad 1051: pool_cache_put_paddr(xmit_pages_cachep,
1.1 bouyer 1052: (void *)pages_pool_free[j].va,
1053: pages_pool_free[j].pa);
1054: }
1055: }
1056: /* send event */
1057: if (do_event) {
1058: x86_lfence();
1059: XENPRINTF(("%s receive event\n",
1060: xneti->xni_if.if_xname));
1061: hypervisor_notify_via_evtchn(xneti->xni_evtchn);
1062: do_event = 0;
1063: }
1064: /* check if we need to get back some pages */
1065: if (mcl_pages_alloc < 0) {
1066: xennetback_get_new_mcl_pages();
1067: if (mcl_pages_alloc < 0) {
1068: /*
1.26 ! jym 1069: * setup the watchdog to try again, because
1.1 bouyer 1070: * xennetback_ifstart() will never be called
1071: * again if queue is full.
1072: */
1073: printf("xennetback_ifstart: no mcl_pages\n");
1074: ifp->if_timer = 1;
1075: break;
1076: }
1077: }
1.8 bouyer 1078: /*
1079: * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS()
1080: * here, as the frontend doesn't notify when adding
1081: * requests anyway
1082: */
1.1 bouyer 1083: if (__predict_false(
1.8 bouyer 1084: !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) {
1.1 bouyer 1085: /* ring full */
1086: break;
1087: }
1088: }
1089: splx(s);
1090: }
1091:
1092:
1093: static void
1094: xennetback_ifwatchdog(struct ifnet * ifp)
1095: {
1096: /*
1.26 ! jym 1097: * We can get to the following condition:
1.1 bouyer 1098: * transmit stalls because the ring is full when the ifq is full too.
1099: * In this case (as, unfortunably, we don't get an interrupt from xen
1100: * on transmit) noting will ever call xennetback_ifstart() again.
1101: * Here we abuse the watchdog to get out of this condition.
1102: */
1103: XENPRINTF(("xennetback_ifwatchdog\n"));
1104: xennetback_ifstart(ifp);
1105: }
1106:
1107:
1108: static int
1109: xennetback_ifinit(struct ifnet *ifp)
1110: {
1111: struct xnetback_instance *xneti = ifp->if_softc;
1112: int s = splnet();
1113:
1114: if ((ifp->if_flags & IFF_UP) == 0) {
1115: splx(s);
1116: return 0;
1117: }
1118: if (xneti->xni_status == CONNECTED)
1119: ifp->if_flags |= IFF_RUNNING;
1120: splx(s);
1121: return 0;
1122: }
1123:
1124: static void
1125: xennetback_ifstop(struct ifnet *ifp, int disable)
1126: {
1127: struct xnetback_instance *xneti = ifp->if_softc;
1128: int s = splnet();
1129:
1130: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1131: ifp->if_timer = 0;
1132: if (xneti->xni_status == CONNECTED) {
1133: XENPRINTF(("%s: req_prod 0x%x resp_prod 0x%x req_cons 0x%x "
1134: "event 0x%x\n", ifp->if_xname, xneti->xni_txring->req_prod,
1135: xneti->xni_txring->resp_prod, xneti->xni_txring->req_cons,
1136: xneti->xni_txring->event));
1137: xennetback_evthandler(ifp->if_softc); /* flush pending RX requests */
1138: }
1139: splx(s);
1140: }
CVSweb <webmaster@jp.NetBSD.org>