Annotation of src/sys/arch/xen/xen/if_xennet_xenbus.c, Revision 1.10.4.8
1.10.4.8! yamt 1: /* $NetBSD: if_xennet_xenbus.c,v 1.10.4.7 2008/02/04 09:22:57 yamt Exp $ */
1.10.4.2 yamt 2:
3: /*
4: * Copyright (c) 2006 Manuel Bouyer.
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: * 3. All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by Manuel Bouyer.
17: * 4. The name of the author may not be used to endorse or promote products
18: * derived from this software without specific prior written permission.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30: *
31: */
32:
33: /*
34: * Copyright (c) 2004 Christian Limpach.
35: * All rights reserved.
36: *
37: * Redistribution and use in source and binary forms, with or without
38: * modification, are permitted provided that the following conditions
39: * are met:
40: * 1. Redistributions of source code must retain the above copyright
41: * notice, this list of conditions and the following disclaimer.
42: * 2. Redistributions in binary form must reproduce the above copyright
43: * notice, this list of conditions and the following disclaimer in the
44: * documentation and/or other materials provided with the distribution.
45: * 3. All advertising materials mentioning features or use of this software
46: * must display the following acknowledgement:
47: * This product includes software developed by Christian Limpach.
48: * 4. The name of the author may not be used to endorse or promote products
49: * derived from this software without specific prior written permission.
50: *
51: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61: */
62:
63: #include <sys/cdefs.h>
1.10.4.8! yamt 64: __KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.10.4.7 2008/02/04 09:22:57 yamt Exp $");
1.10.4.2 yamt 65:
66: #include "opt_xen.h"
67: #include "opt_nfs_boot.h"
68: #include "rnd.h"
69: #include "bpfilter.h"
70:
71: #include <sys/param.h>
72: #include <sys/device.h>
73: #include <sys/conf.h>
74: #include <sys/kernel.h>
1.10.4.5 yamt 75: #include <sys/proc.h>
1.10.4.2 yamt 76: #include <sys/systm.h>
1.10.4.7 yamt 77: #include <sys/intr.h>
1.10.4.2 yamt 78: #if NRND > 0
79: #include <sys/rnd.h>
80: #endif
81:
82: #include <net/if.h>
83: #include <net/if_dl.h>
84: #include <net/if_ether.h>
85: #if NBPFILTER > 0
86: #include <net/bpf.h>
87: #include <net/bpfdesc.h>
88: #endif
89:
90: #if defined(NFS_BOOT_BOOTSTATIC)
91: #include <sys/fstypes.h>
92: #include <sys/mount.h>
93: #include <sys/statvfs.h>
94: #include <netinet/in.h>
95: #include <nfs/rpcv2.h>
96: #include <nfs/nfsproto.h>
97: #include <nfs/nfs.h>
98: #include <nfs/nfsmount.h>
99: #include <nfs/nfsdiskless.h>
1.10.4.7 yamt 100: #include <xen/if_xennetvar.h>
1.10.4.2 yamt 101: #endif /* defined(NFS_BOOT_BOOTSTATIC) */
102:
1.10.4.7 yamt 103: #include <xen/xennet_checksum.h>
1.10.4.3 yamt 104:
1.10.4.2 yamt 105: #include <uvm/uvm.h>
106:
1.10.4.7 yamt 107: #include <xen/xen3-public/io/ring.h>
1.10.4.2 yamt 108:
1.10.4.7 yamt 109: #include <xen/granttables.h>
110: #include <xen/xenbus.h>
1.10.4.2 yamt 111: #include "locators.h"
112:
113: #undef XENNET_DEBUG_DUMP
114: #undef XENNET_DEBUG
115: #ifdef XENNET_DEBUG
116: #define XEDB_FOLLOW 0x01
117: #define XEDB_INIT 0x02
118: #define XEDB_EVENT 0x04
119: #define XEDB_MBUF 0x08
120: #define XEDB_MEM 0x10
121: int xennet_debug = 0xff;
122: #define DPRINTF(x) if (xennet_debug) printf x;
123: #define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
124: #else
125: #define DPRINTF(x)
126: #define DPRINTFN(n,x)
127: #endif
128:
129: #define GRANT_INVALID_REF -1 /* entry is free */
130: #define GRANT_STACK_REF -2 /* entry owned by the network stack */
131:
132: #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
133: #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
134:
135: struct xennet_txreq {
136: SLIST_ENTRY(xennet_txreq) txreq_next;
137: uint16_t txreq_id; /* ID passed to backed */
138: grant_ref_t txreq_gntref; /* grant ref of this request */
139: struct mbuf *txreq_m; /* mbuf being transmitted */
140: };
141:
142: struct xennet_rxreq {
143: SLIST_ENTRY(xennet_rxreq) rxreq_next;
144: uint16_t rxreq_id; /* ID passed to backed */
145: grant_ref_t rxreq_gntref; /* grant ref of this request */
146: /* va/pa for this receive buf. ma will be provided by backend */
147: paddr_t rxreq_pa;
148: vaddr_t rxreq_va;
149: struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */
150: };
151:
152: struct xennet_xenbus_softc {
153: struct device sc_dev;
154: struct ethercom sc_ethercom;
155: uint8_t sc_enaddr[6];
156: struct xenbus_device *sc_xbusd;
157:
158: netif_tx_front_ring_t sc_tx_ring;
159: netif_rx_front_ring_t sc_rx_ring;
160:
161: unsigned int sc_evtchn;
162: void *sc_softintr;
163:
164: grant_ref_t sc_tx_ring_gntref;
165: grant_ref_t sc_rx_ring_gntref;
166:
167: struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
168: struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
169: SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
170: SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */
171: int sc_free_rxreql; /* number of free receive request struct */
172:
173: int sc_backend_status; /* our status with backend */
174: #define BEST_CLOSED 0
175: #define BEST_DISCONNECTED 1
176: #define BEST_CONNECTED 2
177: #if NRND > 0
178: rndsource_element_t sc_rnd_source;
179: #endif
180: };
1.10.4.3 yamt 181: #define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \
182: (sc)->sc_rx_ring.sring->rsp_prod)
1.10.4.2 yamt 183:
184: /* too big to be on stack */
185: static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
1.10.4.8! yamt 186: static u_long xennet_pages[NET_RX_RING_SIZE];
1.10.4.2 yamt 187:
188: static int xennet_xenbus_match(struct device *, struct cfdata *, void *);
189: static void xennet_xenbus_attach(struct device *, struct device *, void *);
190: static int xennet_xenbus_detach(struct device *, int);
191: static void xennet_backend_changed(void *, XenbusState);
192:
193: static int xennet_xenbus_resume(void *);
194: static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
195: static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
196: static void xennet_tx_complete(struct xennet_xenbus_softc *);
1.10.4.5 yamt 197: static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *);
1.10.4.2 yamt 198: static int xennet_handler(void *);
199: #ifdef XENNET_DEBUG_DUMP
200: static void xennet_hex_dump(const unsigned char *, size_t, const char *, int);
201: #endif
202:
203: static int xennet_init(struct ifnet *);
204: static void xennet_stop(struct ifnet *, int);
205: static void xennet_reset(struct xennet_xenbus_softc *);
206: static void xennet_softstart(void *);
207: static void xennet_start(struct ifnet *);
1.10.4.5 yamt 208: static int xennet_ioctl(struct ifnet *, u_long, void *);
1.10.4.2 yamt 209: static void xennet_watchdog(struct ifnet *);
210:
211: CFATTACH_DECL(xennet_xenbus, sizeof(struct xennet_xenbus_softc),
212: xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL);
213:
214: static int
215: xennet_xenbus_match(struct device *parent, struct cfdata *match, void *aux)
216: {
217: struct xenbusdev_attach_args *xa = aux;
218:
219: if (strcmp(xa->xa_type, "vif") != 0)
220: return 0;
221:
222: if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT &&
223: match->cf_loc[XENBUSCF_ID] != xa->xa_id)
224: return 0;
225:
226: return 1;
227: }
228:
229: static void
230: xennet_xenbus_attach(struct device *parent, struct device *self, void *aux)
231: {
232: struct xennet_xenbus_softc *sc = (void *)self;
233: struct xenbusdev_attach_args *xa = aux;
234: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
235: int err;
236: RING_IDX i;
237: char *val, *e, *p;
238: int s;
239: extern int ifqmaxlen; /* XXX */
240: #ifdef XENNET_DEBUG
241: char **dir;
242: int dir_n = 0;
243: char id_str[20];
244: #endif
245:
246: aprint_normal(": Xen Virtual Network Interface\n");
247: #ifdef XENNET_DEBUG
248: printf("path: %s\n", xa->xa_xbusd->xbusd_path);
249: snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
250: err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir);
251: if (err) {
252: printf("%s: xenbus_directory err %d\n",
253: sc->sc_dev.dv_xname, err);
254: } else {
255: printf("%s/\n", xa->xa_xbusd->xbusd_path);
256: for (i = 0; i < dir_n; i++) {
257: printf("\t/%s", dir[i]);
258: err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i],
259: NULL, &val);
260: if (err) {
261: printf("%s: xenbus_read err %d\n",
262: sc->sc_dev.dv_xname, err);
263: } else {
264: printf(" = %s\n", val);
265: free(val, M_DEVBUF);
266: }
267: }
268: }
269: #endif /* XENNET_DEBUG */
270: sc->sc_xbusd = xa->xa_xbusd;
271: sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
272:
273: /* initialize free RX and RX request lists */
274: SLIST_INIT(&sc->sc_txreq_head);
275: for (i = 0; i < NET_TX_RING_SIZE; i++) {
276: sc->sc_txreqs[i].txreq_id = i;
277: SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
278: txreq_next);
279: }
280: SLIST_INIT(&sc->sc_rxreq_head);
281: s = splvm();
282: for (i = 0; i < NET_RX_RING_SIZE; i++) {
283: struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
284: rxreq->rxreq_id = i;
285: rxreq->rxreq_sc = sc;
286: rxreq->rxreq_va = uvm_km_alloc(kernel_map,
287: PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
288: if (rxreq->rxreq_va == 0)
289: break;
290: if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
291: &rxreq->rxreq_pa))
292: panic("xennet: no pa for mapped va ?");
293: rxreq->rxreq_gntref = GRANT_INVALID_REF;
294: SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
295: }
296: splx(s);
297: sc->sc_free_rxreql = i;
298: if (sc->sc_free_rxreql == 0) {
299: aprint_error("%s: failed to allocate rx memory\n",
300: sc->sc_dev.dv_xname);
301: return;
302: }
303:
304: /* read mac address */
305: err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val);
306: if (err) {
307: aprint_error("%s: can't read mac address, err %d\n",
308: sc->sc_dev.dv_xname, err);
309: return;
310: }
311: /* read mac address */
312: for (i = 0, p = val; i < 6; i++) {
313: sc->sc_enaddr[i] = strtoul(p, &e, 16);
314: if ((e[0] == '\0' && i != 5) && e[0] != ':') {
315: aprint_error("%s: %s is not a valid mac address\n",
316: sc->sc_dev.dv_xname, val);
317: free(val, M_DEVBUF);
318: return;
319: }
320: p = &e[1];
321: }
322: free(val, M_DEVBUF);
323: aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
324: ether_sprintf(sc->sc_enaddr));
325: /* Initialize ifnet structure and attach interface */
326: memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
327: ifp->if_softc = sc;
328: ifp->if_start = xennet_start;
329: ifp->if_ioctl = xennet_ioctl;
330: ifp->if_watchdog = xennet_watchdog;
331: ifp->if_init = xennet_init;
332: ifp->if_stop = xennet_stop;
333: ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
334: ifp->if_timer = 0;
335: ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2);
1.10.4.3 yamt 336: ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1.10.4.2 yamt 337: IFQ_SET_READY(&ifp->if_snd);
338: if_attach(ifp);
339: ether_ifattach(ifp, sc->sc_enaddr);
1.10.4.7 yamt 340: sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc);
1.10.4.2 yamt 341: if (sc->sc_softintr == NULL)
342: panic(" xennet: can't establish soft interrupt");
343:
344: /* initialise shared structures and tell backend that we are ready */
345: xennet_xenbus_resume(sc);
346: }
347:
348: static int
349: xennet_xenbus_detach(struct device *self, int flags)
350: {
351: struct xennet_xenbus_softc *sc = (void *)self;
352: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
353: int s0, s1;
354: RING_IDX i;
355:
356: DPRINTF(("%s: xennet_xenbus_detach\n", sc->sc_dev.dv_xname));
357: s0 = splnet();
358: xennet_stop(ifp, 1);
359: /* wait for pending TX to complete, and collect pending RX packets */
360: xennet_handler(sc);
361: while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
362: tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
363: xennet_handler(sc);
364: }
365: xennet_free_rx_buffer(sc);
366:
367: s1 = splvm();
368: for (i = 0; i < NET_RX_RING_SIZE; i++) {
369: struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
370: uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
371: UVM_KMF_WIRED);
372: }
373: splx(s1);
374:
375: ether_ifdetach(ifp);
376: if_detach(ifp);
377: while (xengnt_status(sc->sc_tx_ring_gntref)) {
378: tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
379: }
380: xengnt_revoke_access(sc->sc_tx_ring_gntref);
381: uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
382: UVM_KMF_WIRED);
383: while (xengnt_status(sc->sc_rx_ring_gntref)) {
384: tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
385: }
386: xengnt_revoke_access(sc->sc_rx_ring_gntref);
387: uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
388: UVM_KMF_WIRED);
1.10.4.7 yamt 389: softint_disestablish(sc->sc_softintr);
1.10.4.2 yamt 390: event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
391: splx(s0);
392: DPRINTF(("%s: xennet_xenbus_detach done\n", sc->sc_dev.dv_xname));
393: return 0;
394: }
395:
396: static int
397: xennet_xenbus_resume(void *p)
398: {
399: struct xennet_xenbus_softc *sc = p;
400: struct xenbus_transaction *xbt;
401: int error;
402: netif_tx_sring_t *tx_ring;
403: netif_rx_sring_t *rx_ring;
404: paddr_t ma;
405: const char *errmsg;
406:
407: sc->sc_tx_ring_gntref = GRANT_INVALID_REF;
408: sc->sc_rx_ring_gntref = GRANT_INVALID_REF;
409:
410:
411: /* setup device: alloc event channel and shared rings */
412: tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
413: UVM_KMF_WIRED | UVM_KMF_ZERO);
414: rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
415: UVM_KMF_WIRED | UVM_KMF_ZERO);
416: if (tx_ring == NULL || rx_ring == NULL)
417: panic("xennet_xenbus_resume: can't alloc rings");
418:
419: SHARED_RING_INIT(tx_ring);
420: FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE);
421: SHARED_RING_INIT(rx_ring);
422: FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE);
423:
424: (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma);
425: error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref);
426: if (error)
427: return error;
428: (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma);
429: error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref);
430: if (error)
431: return error;
432: error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn);
433: if (error)
434: return error;
435: aprint_verbose("%s: using event channel %d\n",
436: sc->sc_dev.dv_xname, sc->sc_evtchn);
437: event_set_handler(sc->sc_evtchn, &xennet_handler, sc,
438: IPL_NET, sc->sc_dev.dv_xname);
439:
440: again:
441: xbt = xenbus_transaction_start();
442: if (xbt == NULL)
443: return ENOMEM;
444: error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
445: "tx-ring-ref","%u", sc->sc_tx_ring_gntref);
446: if (error) {
447: errmsg = "writing tx ring-ref";
448: goto abort_transaction;
449: }
450: error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
451: "rx-ring-ref","%u", sc->sc_rx_ring_gntref);
452: if (error) {
453: errmsg = "writing rx ring-ref";
454: goto abort_transaction;
455: }
456: error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
457: "event-channel", "%u", sc->sc_evtchn);
458: if (error) {
459: errmsg = "writing event channel";
460: goto abort_transaction;
461: }
462: error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
463: "state", "%d", XenbusStateConnected);
464: if (error) {
465: errmsg = "writing frontend XenbusStateConnected";
466: goto abort_transaction;
467: }
468: error = xenbus_transaction_end(xbt, 0);
469: if (error == EAGAIN)
470: goto again;
471: if (error) {
472: xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
473: return -1;
474: }
475: xennet_alloc_rx_buffer(sc);
476: sc->sc_backend_status = BEST_CONNECTED;
477: return 0;
478:
479: abort_transaction:
480: xenbus_transaction_end(xbt, 1);
481: xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
482: return error;
483: }
484:
485: static void xennet_backend_changed(void *arg, XenbusState new_state)
486: {
487: struct xennet_xenbus_softc *sc = arg;
488: DPRINTF(("%s: new backend state %d\n", sc->sc_dev.dv_xname, new_state));
489:
490: switch (new_state) {
491: case XenbusStateInitialising:
492: case XenbusStateInitWait:
493: case XenbusStateInitialised:
494: break;
495: case XenbusStateClosing:
496: sc->sc_backend_status = BEST_CLOSED;
497: xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
498: break;
499: case XenbusStateConnected:
500: break;
501: case XenbusStateUnknown:
502: default:
503: panic("bad backend state %d", new_state);
504: }
505: }
506:
507: static void
508: xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
509: {
510: RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
511: RING_IDX i;
512: struct xennet_rxreq *req;
513: struct xen_memory_reservation reservation;
514: int s1, s2;
515: paddr_t pfn;
516:
517: s1 = splnet();
518: for (i = 0; sc->sc_free_rxreql != 0; i++) {
519: req = SLIST_FIRST(&sc->sc_rxreq_head);
520: KASSERT(req != NULL);
1.10.4.3 yamt 521: KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
1.10.4.2 yamt 522: RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
523: req->rxreq_id;
524: if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
525: &req->rxreq_gntref) != 0) {
526: break;
527: }
528: RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
529: req->rxreq_gntref;
530:
531: SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
532: sc->sc_free_rxreql--;
533:
534: /* unmap the page */
535: MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
536: /*
537: * Remove this page from pseudo phys map before
538: * passing back to Xen.
539: */
540: pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
541: xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
542: xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
543: }
544: if (i == 0) {
545: splx(s1);
546: return;
547: }
548: /* also make sure to flush all TLB entries */
549: rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
550: /*
551: * We may have allocated buffers which have entries
552: * outstanding in the page update queue -- make sure we flush
553: * those first!
554: */
555: s2 = splvm();
556: xpq_flush_queue();
557: splx(s2);
558: /* now decrease reservation */
559: reservation.extent_start = xennet_pages;
560: reservation.nr_extents = i;
561: reservation.extent_order = 0;
562: reservation.address_bits = 0;
563: reservation.domid = DOMID_SELF;
564: rx_mcl[i].op = __HYPERVISOR_memory_op;
565: rx_mcl[i].args[0] = XENMEM_decrease_reservation;
566: rx_mcl[i].args[1] = (unsigned long)&reservation;
567: HYPERVISOR_multicall(rx_mcl, i+1);
568: if (__predict_false(rx_mcl[i].result != i)) {
569: panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
570: }
571: sc->sc_rx_ring.req_prod_pvt = req_prod + i;
572: RING_PUSH_REQUESTS(&sc->sc_rx_ring);
573:
574: splx(s1);
575: return;
576: }
577:
578: static void
579: xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
580: {
581: paddr_t ma, pa;
582: vaddr_t va;
583: RING_IDX i;
584: mmu_update_t mmu[1];
585: multicall_entry_t mcl[2];
586:
587: int s = splbio();
588:
589: DPRINTF(("%s: xennet_free_rx_buffer\n", sc->sc_dev.dv_xname));
590: /* get back memory from RX ring */
591: for (i = 0; i < NET_RX_RING_SIZE; i++) {
592: struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
593:
594: /*
595: * if the buffer is in transit in the network stack, wait for
596: * the network stack to free it.
597: */
598: while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
599: GRANT_STACK_REF)
600: tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
601:
602: if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
603: /*
604: * this req is still granted. Get back the page or
605: * allocate a new one, and remap it.
606: */
607: SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
608: rxreq_next);
609: sc->sc_free_rxreql++;
610: ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
611: rxreq->rxreq_gntref = GRANT_INVALID_REF;
612: if (ma == 0) {
1.10.4.8! yamt 613: u_long pfn;
1.10.4.2 yamt 614: struct xen_memory_reservation xenres;
615: /*
616: * transfer not complete, we lost the page.
617: * Get one from hypervisor
618: */
1.10.4.8! yamt 619: xenres.extent_start = &pfn;
1.10.4.2 yamt 620: xenres.nr_extents = 1;
621: xenres.extent_order = 0;
622: xenres.address_bits = 31;
623: xenres.domid = DOMID_SELF;
624: if (HYPERVISOR_memory_op(
625: XENMEM_increase_reservation, &xenres) < 0) {
626: panic("xennet_free_rx_buffer: "
627: "can't get memory back");
628: }
1.10.4.8! yamt 629: ma = pfn;
1.10.4.2 yamt 630: KASSERT(ma != 0);
631: }
632: pa = rxreq->rxreq_pa;
633: va = rxreq->rxreq_va;
634: /* remap the page */
635: mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
636: mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
637: MULTI_update_va_mapping(&mcl[0], va,
638: (ma << PAGE_SHIFT) | PG_V | PG_KW,
639: UVMF_TLB_FLUSH|UVMF_ALL);
640: xpmap_phys_to_machine_mapping[
641: (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
642: mcl[1].op = __HYPERVISOR_mmu_update;
643: mcl[1].args[0] = (unsigned long)mmu;
644: mcl[1].args[1] = 1;
645: mcl[1].args[2] = 0;
646: mcl[1].args[3] = DOMID_SELF;
647: HYPERVISOR_multicall(mcl, 2);
648: }
649:
650: }
651: splx(s);
652: DPRINTF(("%s: xennet_free_rx_buffer done\n", sc->sc_dev.dv_xname));
653: }
654:
655: static void
1.10.4.5 yamt 656: xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg)
1.10.4.2 yamt 657: {
658: struct xennet_rxreq *req = arg;
659: struct xennet_xenbus_softc *sc = req->rxreq_sc;
660:
1.10.4.3 yamt 661: int s = splnet();
662:
1.10.4.2 yamt 663: SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
664: sc->sc_free_rxreql++;
665:
666: req->rxreq_gntref = GRANT_INVALID_REF;
1.10.4.3 yamt 667: if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) &&
1.10.4.2 yamt 668: __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
669: xennet_alloc_rx_buffer(sc);
670: }
671:
672: if (m)
1.10.4.6 yamt 673: pool_cache_put(mb_cache, m);
1.10.4.3 yamt 674: splx(s);
1.10.4.2 yamt 675: }
676:
677:
678: static void
679: xennet_tx_complete(struct xennet_xenbus_softc *sc)
680: {
681: struct xennet_txreq *req;
682: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
683: RING_IDX resp_prod, i;
684:
685: DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n",
686: sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons));
687:
688: again:
689: resp_prod = sc->sc_tx_ring.sring->rsp_prod;
690: x86_lfence();
691: for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
692: req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
693: KASSERT(req->txreq_id ==
694: RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
695: if (__predict_false(xengnt_status(req->txreq_gntref))) {
696: printf("%s: grant still used by backend\n",
697: sc->sc_dev.dv_xname);
698: sc->sc_tx_ring.rsp_cons = i;
699: goto end;
700: }
701: if (__predict_false(
702: RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
703: NETIF_RSP_OKAY))
704: ifp->if_oerrors++;
705: else
706: ifp->if_opackets++;
707: xengnt_revoke_access(req->txreq_gntref);
708: m_freem(req->txreq_m);
709: SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
710: }
711: sc->sc_tx_ring.rsp_cons = resp_prod;
712: /* set new event and check fopr race with rsp_cons update */
713: sc->sc_tx_ring.sring->rsp_event =
714: resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1;
715: ifp->if_timer = 0;
716: x86_sfence();
717: if (resp_prod != sc->sc_tx_ring.sring->rsp_prod)
718: goto again;
719: end:
720: if (ifp->if_flags & IFF_OACTIVE) {
721: ifp->if_flags &= ~IFF_OACTIVE;
722: xennet_softstart(sc);
723: }
724: }
725:
726: static int
727: xennet_handler(void *arg)
728: {
729: struct xennet_xenbus_softc *sc = arg;
730: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
731: RING_IDX resp_prod, i;
732: struct xennet_rxreq *req;
733: paddr_t ma, pa;
734: vaddr_t va;
735: mmu_update_t mmu[1];
736: multicall_entry_t mcl[2];
737: struct mbuf *m;
738: void *pktp;
739: int more_to_do;
740:
741: if (sc->sc_backend_status != BEST_CONNECTED)
742: return 1;
743:
744: xennet_tx_complete(sc);
745:
746: again:
747: DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n",
748: sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons));
749:
750: resp_prod = sc->sc_rx_ring.sring->rsp_prod;
751: x86_lfence(); /* ensure we see replies up to resp_prod */
752: for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
753: netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
754: req = &sc->sc_rxreqs[rx->id];
755: KASSERT(req->rxreq_gntref != GRANT_INVALID_REF);
1.10.4.3 yamt 756: KASSERT(req->rxreq_id == rx->id);
1.10.4.2 yamt 757: ma = xengnt_revoke_transfer(req->rxreq_gntref);
758: if (ma == 0) {
759: DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n"));
760: /*
761: * the remote could't send us a packet.
762: * we can't free this rxreq as no page will be mapped
763: * here. Instead give it back immediatly to backend.
764: */
765: ifp->if_ierrors++;
766: RING_GET_REQUEST(&sc->sc_rx_ring,
767: sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id;
768: RING_GET_REQUEST(&sc->sc_rx_ring,
769: sc->sc_rx_ring.req_prod_pvt)->gref =
770: req->rxreq_gntref;
771: sc->sc_rx_ring.req_prod_pvt++;
772: RING_PUSH_REQUESTS(&sc->sc_rx_ring);
773: continue;
774: }
775: req->rxreq_gntref = GRANT_INVALID_REF;
776:
777: pa = req->rxreq_pa;
778: va = req->rxreq_va;
779: /* remap the page */
780: mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
781: mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
782: MULTI_update_va_mapping(&mcl[0], va,
783: (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL);
784: xpmap_phys_to_machine_mapping[
785: (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
786: mcl[1].op = __HYPERVISOR_mmu_update;
787: mcl[1].args[0] = (unsigned long)mmu;
788: mcl[1].args[1] = 1;
789: mcl[1].args[2] = 0;
790: mcl[1].args[3] = DOMID_SELF;
791: HYPERVISOR_multicall(mcl, 2);
792: pktp = (void *)(va + rx->offset);
793: #ifdef XENNET_DEBUG_DUMP
794: xennet_hex_dump(pktp, rx->status, "r", rx->id);
795: #endif
796: if ((ifp->if_flags & IFF_PROMISC) == 0) {
797: struct ether_header *eh = pktp;
798: if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
1.10.4.5 yamt 799: memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
1.10.4.2 yamt 800: ETHER_ADDR_LEN) != 0) {
801: DPRINTFN(XEDB_EVENT,
802: ("xennet_handler bad dest\n"));
803: /* packet not for us */
804: xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
805: req);
806: continue;
807: }
808: }
809: MGETHDR(m, M_DONTWAIT, MT_DATA);
810: if (__predict_false(m == NULL)) {
811: printf("xennet: rx no mbuf\n");
812: ifp->if_ierrors++;
813: xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
814: continue;
815: }
1.10.4.3 yamt 816: MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1.10.4.2 yamt 817:
818: m->m_pkthdr.rcvif = ifp;
819: if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=
820: sc->sc_rx_ring.sring->rsp_prod)) {
821: m->m_len = m->m_pkthdr.len = rx->status;
822: MEXTADD(m, pktp, rx->status,
823: M_DEVBUF, xennet_rx_mbuf_free, req);
824: m->m_flags |= M_EXT_RW; /* we own the buffer */
825: req->rxreq_gntref = GRANT_STACK_REF;
826: } else {
827: /*
828: * This was our last receive buffer, allocate
829: * memory, copy data and push the receive
830: * buffer back to the hypervisor.
831: */
1.10.4.3 yamt 832: m->m_len = min(MHLEN, rx->status);
1.10.4.2 yamt 833: m->m_pkthdr.len = 0;
834: m_copyback(m, 0, rx->status, pktp);
835: xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
836: if (m->m_pkthdr.len < rx->status) {
837: /* out of memory, just drop packets */
838: ifp->if_ierrors++;
839: m_freem(m);
840: continue;
841: }
842: }
1.10.4.3 yamt 843: if ((rx->flags & NETRXF_csum_blank) != 0) {
844: xennet_checksum_fill(&m);
845: if (m == NULL) {
846: ifp->if_ierrors++;
847: continue;
848: }
849: }
1.10.4.2 yamt 850: #if NBPFILTER > 0
851: /*
852: * Pass packet to bpf if there is a listener.
853: */
854: if (ifp->if_bpf)
855: bpf_mtap(ifp->if_bpf, m);
856: #endif
857:
858: ifp->if_ipackets++;
859:
860: /* Pass the packet up. */
861: (*ifp->if_input)(ifp, m);
862: }
863: x86_lfence();
864: sc->sc_rx_ring.rsp_cons = i;
865: RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
866: if (more_to_do)
867: goto again;
868: return 1;
869: }
870:
871: /*
872: * Called at splnet.
873: */
874: void
875: xennet_start(struct ifnet *ifp)
876: {
877: struct xennet_xenbus_softc *sc = ifp->if_softc;
878:
879: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", sc->sc_dev.dv_xname));
880:
881: #if NRND > 0
882: rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
883: #endif
884:
885: xennet_tx_complete(sc);
886:
887: if (__predict_false(
888: (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING))
889: return;
890:
891: /*
892: * The Xen communication channel is much more efficient if we can
893: * schedule batch of packets for domain0. To achieve this, we
894: * schedule a soft interrupt, and just return. This way, the network
895: * stack will enqueue all pending mbufs in the interface's send queue
896: * before it is processed by xennet_softstart().
897: */
1.10.4.7 yamt 898: softint_schedule(sc->sc_softintr);
1.10.4.2 yamt 899: return;
900: }
901:
902: /*
903: * called at splsoftnet
904: */
905: void
906: xennet_softstart(void *arg)
907: {
908: struct xennet_xenbus_softc *sc = arg;
909: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
910: struct mbuf *m, *new_m;
911: netif_tx_request_t *txreq;
912: RING_IDX req_prod;
913: paddr_t pa, pa2;
914: struct xennet_txreq *req;
915: int notify;
916: int do_notify = 0;
917: int s;
918:
919: s = splnet();
920: if (__predict_false(
921: (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
922: splx(s);
923: return;
924: }
925:
926: req_prod = sc->sc_tx_ring.req_prod_pvt;
927: while (/*CONSTCOND*/1) {
1.10.4.3 yamt 928: uint16_t txflags;
929:
1.10.4.2 yamt 930: req = SLIST_FIRST(&sc->sc_txreq_head);
931: if (__predict_false(req == NULL)) {
932: ifp->if_flags |= IFF_OACTIVE;
933: break;
934: }
935: IFQ_POLL(&ifp->if_snd, m);
936: if (m == NULL)
937: break;
938:
939: switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
940: case M_EXT|M_EXT_CLUSTER:
941: KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
942: pa = m->m_ext.ext_paddr +
943: (m->m_data - m->m_ext.ext_buf);
944: break;
945: case 0:
946: KASSERT(m->m_paddr != M_PADDR_INVALID);
947: pa = m->m_paddr + M_BUFOFFSET(m) +
948: (m->m_data - M_BUFADDR(m));
949: break;
950: default:
1.10.4.4 yamt 951: if ((m->m_flags & M_EXT_LAZY) != 0) {
952: pa = 0;
953: } else if(__predict_false(
1.10.4.2 yamt 954: !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data,
955: &pa))) {
956: panic("xennet_start: no pa");
957: }
958: break;
959: }
960:
1.10.4.3 yamt 961: if ((m->m_pkthdr.csum_flags &
962: (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
963: txflags = NETTXF_csum_blank;
964: } else {
965: txflags = 0;
966: }
967:
1.10.4.4 yamt 968: if (m->m_pkthdr.len != m->m_len || pa == 0 ||
1.10.4.2 yamt 969: (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) {
970:
971: MGETHDR(new_m, M_DONTWAIT, MT_DATA);
972: if (__predict_false(new_m == NULL)) {
973: printf("xennet: no mbuf\n");
974: break;
975: }
976: if (m->m_pkthdr.len > MHLEN) {
977: MCLGET(new_m, M_DONTWAIT);
978: if (__predict_false(
979: (new_m->m_flags & M_EXT) == 0)) {
980: DPRINTF(("xennet: no mbuf cluster\n"));
981: m_freem(new_m);
982: break;
983: }
984: }
985:
1.10.4.5 yamt 986: m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *));
1.10.4.2 yamt 987: new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
988:
989: if ((new_m->m_flags & M_EXT) != 0) {
990: pa = new_m->m_ext.ext_paddr;
991: KASSERT(new_m->m_data == new_m->m_ext.ext_buf);
992: KASSERT(pa != M_PADDR_INVALID);
993: } else {
994: pa = new_m->m_paddr;
995: KASSERT(pa != M_PADDR_INVALID);
996: KASSERT(new_m->m_data == M_BUFADDR(new_m));
997: pa += M_BUFOFFSET(new_m);
998: }
999: if (__predict_false(xengnt_grant_access(
1000: sc->sc_xbusd->xbusd_otherend_id,
1001: xpmap_ptom_masked(pa),
1002: GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1003: m_freem(new_m);
1004: ifp->if_flags |= IFF_OACTIVE;
1005: break;
1006: }
1007: /* we will be able to send new_m */
1008: IFQ_DEQUEUE(&ifp->if_snd, m);
1009: m_freem(m);
1010: m = new_m;
1011: } else {
1012: if (__predict_false(xengnt_grant_access(
1013: sc->sc_xbusd->xbusd_otherend_id,
1014: xpmap_ptom_masked(pa),
1015: GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1016: ifp->if_flags |= IFF_OACTIVE;
1017: break;
1018: }
1019: /* we will be able to send m */
1020: IFQ_DEQUEUE(&ifp->if_snd, m);
1021: }
1.10.4.3 yamt 1022: MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1.10.4.2 yamt 1023:
1024: KASSERT(((pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) == 0);
1025:
1026: SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next);
1027: req->txreq_m = m;
1028:
1029: DPRINTFN(XEDB_MBUF, ("xennet_start id %d, "
1030: "mbuf %p, buf %p/%p/%p, size %d\n",
1031: req->txreq_id, m, mtod(m, void *), (void *)pa,
1032: (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len));
1033: pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2);
1034: DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n",
1035: (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2));
1036: #ifdef XENNET_DEBUG_DUMP
1037: xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id);
1038: #endif
1039:
1040: txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod);
1041: txreq->id = req->txreq_id;
1042: txreq->gref = req->txreq_gntref;
1043: txreq->offset = pa & ~PG_FRAME;
1044: txreq->size = m->m_pkthdr.len;
1.10.4.3 yamt 1045: txreq->flags = txflags;
1.10.4.2 yamt 1046:
1047: req_prod++;
1048: sc->sc_tx_ring.req_prod_pvt = req_prod;
1049: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify);
1050: if (notify)
1051: do_notify = 1;
1052:
1053: #ifdef XENNET_DEBUG
1054: DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1055: "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1056: (void *)*kvtopte(mtod(m, vaddr_t)),
1057: (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1058: (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1059: DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d"
1060: " prod %d\n",
1061: txreq->id, txreq->gref, txreq->offset, txreq->size,
1062: txreq->flags, req_prod));
1063: #endif
1064:
1065: #if NBPFILTER > 0
1066: /*
1067: * Pass packet to bpf if there is a listener.
1068: */
1069: if (ifp->if_bpf) {
1070: bpf_mtap(ifp->if_bpf, m);
1071: }
1072: #endif
1073: }
1074:
1075: x86_lfence();
1076: if (do_notify) {
1077: hypervisor_notify_via_evtchn(sc->sc_evtchn);
1078: ifp->if_timer = 5;
1079: }
1080: splx(s);
1081:
1082: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1083: sc->sc_dev.dv_xname));
1084: }
1085:
1086: int
1.10.4.5 yamt 1087: xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1.10.4.2 yamt 1088: {
1089: #ifdef XENNET_DEBUG
1090: struct xennet_xenbus_softc *sc = ifp->if_softc;
1091: #endif
1092: int s, error = 0;
1093:
1094: s = splnet();
1095:
1096: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", sc->sc_dev.dv_xname));
1097: error = ether_ioctl(ifp, cmd, data);
1098: if (error == ENETRESET)
1099: error = 0;
1100: splx(s);
1101:
1102: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1103: sc->sc_dev.dv_xname, error));
1104:
1105: return error;
1106: }
1107:
1108: void
1109: xennet_watchdog(struct ifnet *ifp)
1110: {
1111: struct xennet_xenbus_softc *sc = ifp->if_softc;
1112:
1113: printf("%s: xennet_watchdog\n", sc->sc_dev.dv_xname);
1114: }
1115:
1116: int
1117: xennet_init(struct ifnet *ifp)
1118: {
1119: struct xennet_xenbus_softc *sc = ifp->if_softc;
1120: int s = splnet();
1121:
1122: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname));
1123:
1124: if ((ifp->if_flags & IFF_RUNNING) == 0) {
1125: sc->sc_rx_ring.sring->rsp_event =
1126: sc->sc_rx_ring.rsp_cons + 1;
1127: hypervisor_enable_event(sc->sc_evtchn);
1128: hypervisor_notify_via_evtchn(sc->sc_evtchn);
1129: xennet_reset(sc);
1130: }
1131: ifp->if_flags |= IFF_RUNNING;
1132: ifp->if_flags &= ~IFF_OACTIVE;
1133: ifp->if_timer = 0;
1134: splx(s);
1135: return 0;
1136: }
1137:
1138: void
1139: xennet_stop(struct ifnet *ifp, int disable)
1140: {
1141: struct xennet_xenbus_softc *sc = ifp->if_softc;
1142: int s = splnet();
1143:
1144: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1145: hypervisor_mask_event(sc->sc_evtchn);
1146: xennet_reset(sc);
1147: splx(s);
1148: }
1149:
1150: void
1151: xennet_reset(struct xennet_xenbus_softc *sc)
1152: {
1153:
1154: DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", sc->sc_dev.dv_xname));
1155: }
1156:
1157: #if defined(NFS_BOOT_BOOTSTATIC)
1158: int
1159: xennet_bootstatic_callback(struct nfs_diskless *nd)
1160: {
1161: #if 0
1162: struct ifnet *ifp = nd->nd_ifp;
1163: struct xennet_xenbus_softc *sc =
1164: (struct xennet_xenbus_softc *)ifp->if_softc;
1165: #endif
1166: union xen_cmdline_parseinfo xcp;
1167: struct sockaddr_in *sin;
1168:
1169: memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1170: xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0;
1171: xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1172: xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1173:
1174: nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1175: nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1176: nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1177:
1178: sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1.10.4.5 yamt 1179: memset((void *)sin, 0, sizeof(*sin));
1.10.4.2 yamt 1180: sin->sin_len = sizeof(*sin);
1181: sin->sin_family = AF_INET;
1182: sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1.10.4.5 yamt 1183: if (nd->nd_myip.s_addr == 0)
1184: return NFS_BOOTSTATIC_NOSTATIC;
1185: else
1186: return (NFS_BOOTSTATIC_HAS_MYIP|NFS_BOOTSTATIC_HAS_GWIP|
1187: NFS_BOOTSTATIC_HAS_MASK|NFS_BOOTSTATIC_HAS_SERVADDR|
1188: NFS_BOOTSTATIC_HAS_SERVER);
1.10.4.2 yamt 1189: }
1190: #endif /* defined(NFS_BOOT_BOOTSTATIC) */
1191:
1192: #ifdef XENNET_DEBUG_DUMP
1193: #define XCHR(x) hexdigits[(x) & 0xf]
1194: static void
1195: xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id)
1196: {
1197: size_t i, j;
1198:
1199: printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1200: printf("00000000 ");
1201: for(i=0; i<len; i++) {
1202: printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1203: if ((i+1) % 16 == 8)
1204: printf(" ");
1205: if ((i+1) % 16 == 0) {
1206: printf(" %c", '|');
1207: for(j=0; j<16; j++)
1208: printf("%c", pkt[i-15+j]>=32 &&
1209: pkt[i-15+j]<127?pkt[i-15+j]:'.');
1210: printf("%c\n%c%c%c%c%c%c%c%c ", '|',
1211: XCHR((i+1)>>28), XCHR((i+1)>>24),
1212: XCHR((i+1)>>20), XCHR((i+1)>>16),
1213: XCHR((i+1)>>12), XCHR((i+1)>>8),
1214: XCHR((i+1)>>4), XCHR(i+1));
1215: }
1216: }
1217: printf("\n");
1218: }
1219: #undef XCHR
1220: #endif
CVSweb <webmaster@jp.NetBSD.org>