version 1.3, 2006/05/27 13:54:35 |
version 1.3.8.6, 2007/12/07 17:27:25 |
|
|
#include <sys/ioctl.h> |
#include <sys/ioctl.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/device.h> |
#include <sys/device.h> |
|
#include <sys/intr.h> |
|
|
#include <net/if.h> |
#include <net/if.h> |
#include <net/if_types.h> |
#include <net/if_types.h> |
|
|
#include <net/if_ether.h> |
#include <net/if_ether.h> |
|
|
|
|
#include <machine/xen.h> |
#include <xen/xen.h> |
#include <machine/xen_shm.h> |
#include <xen/xen_shm.h> |
#include <machine/evtchn.h> |
#include <xen/evtchn.h> |
#include <machine/xenbus.h> |
#include <xen/xenbus.h> |
|
#include <xen/xennet_checksum.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
|
|
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) |
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) |
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) |
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) |
|
|
|
/* linux wants at last 16 bytes free in front of the packet */ |
|
#define LINUX_REQUESTED_OFFSET 16 |
|
|
/* hash list for TX requests */ |
/* hash list for TX requests */ |
/* descriptor of a packet being handled by the kernel */ |
/* descriptor of a packet being handled by the kernel */ |
struct xni_pkt { |
struct xni_pkt { |
Line 124 struct xnetback_instance { |
|
Line 129 struct xnetback_instance { |
|
#define xni_bpf xni_if.if_bpf |
#define xni_bpf xni_if.if_bpf |
|
|
void xvifattach(int); |
void xvifattach(int); |
static int xennetback_ifioctl(struct ifnet *, u_long, caddr_t); |
static int xennetback_ifioctl(struct ifnet *, u_long, void *); |
static void xennetback_ifstart(struct ifnet *); |
static void xennetback_ifstart(struct ifnet *); |
static void xennetback_ifsoftstart(void *); |
static void xennetback_ifsoftstart(void *); |
static void xennetback_ifwatchdog(struct ifnet *); |
static void xennetback_ifwatchdog(struct ifnet *); |
Line 137 static void xennetback_frontend_changed( |
|
Line 142 static void xennetback_frontend_changed( |
|
|
|
static inline void xennetback_tx_response(struct xnetback_instance *, |
static inline void xennetback_tx_response(struct xnetback_instance *, |
int, int); |
int, int); |
static void xennetback_tx_free(struct mbuf * , caddr_t, size_t, void *); |
static void xennetback_tx_free(struct mbuf * , void *, size_t, void *); |
|
|
SLIST_HEAD(, xnetback_instance) xnetback_instances; |
SLIST_HEAD(, xnetback_instance) xnetback_instances; |
|
|
Line 166 static int xennetback_get_mcl_page(padd |
|
Line 171 static int xennetback_get_mcl_page(padd |
|
static void xennetback_get_new_mcl_pages(void); |
static void xennetback_get_new_mcl_pages(void); |
/* |
/* |
* If we can't transfer the mbuf directly, we have to copy it to a page which |
* If we can't transfer the mbuf directly, we have to copy it to a page which |
* will be transfered to the remote domain. We use a pool + pool_cache |
* will be transfered to the remote domain. We use a pool_cache |
* for this, or the mbuf cluster pool cache if MCLBYTES == PAGE_SIZE |
* for this, or the mbuf cluster pool cache if MCLBYTES == PAGE_SIZE |
*/ |
*/ |
#if MCLBYTES != PAGE_SIZE |
#if MCLBYTES != PAGE_SIZE |
struct pool xmit_pages_pool; |
pool_cache_t xmit_pages_cache; |
struct pool_cache xmit_pages_pool_cache; |
|
#endif |
#endif |
struct pool_cache *xmit_pages_pool_cachep; |
pool_cache_t xmit_pages_cachep; |
|
|
/* arrays used in xennetback_ifstart(), too large to allocate on stack */ |
/* arrays used in xennetback_ifstart(), too large to allocate on stack */ |
static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH + 1]; |
static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH]; |
static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH]; |
static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1]; |
static gnttab_transfer_t xstart_gop[NB_XMIT_PAGES_BATCH]; |
static gnttab_transfer_t xstart_gop[NB_XMIT_PAGES_BATCH]; |
struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH]; |
struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH]; |
struct _pages_pool_free { |
struct _pages_pool_free { |
Line 189 struct _pages_pool_free { |
|
Line 193 struct _pages_pool_free { |
|
static inline void |
static inline void |
xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va) |
xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va) |
{ |
{ |
xen_shm_unmap(pkt_va, 1, pkt->pkt_handle); |
xen_shm_unmap(pkt_va, 1, &pkt->pkt_handle); |
pool_put(&xni_pkt_pool, pkt); |
pool_put(&xni_pkt_pool, pkt); |
} |
} |
|
|
Line 218 xvifattach(int n) |
|
Line 222 xvifattach(int n) |
|
|
|
/* initialise pools */ |
/* initialise pools */ |
pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0, |
pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0, |
"xnbpkt", NULL); |
"xnbpkt", NULL, IPL_VM); |
#if MCLBYTES != PAGE_SIZE |
#if MCLBYTES != PAGE_SIZE |
pool_init(&xmit_pages_pool, PAGE_SIZE, 0, 0, 0, "xnbxm", NULL); |
xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL, |
pool_cache_init(&xmit_pages_pool_cache, &xmit_pages_pool, |
IPL_VM, NULL, NULL, NULL); |
NULL, NULL, NULL); |
xmit_pages_cachep = xmit_pages_cache; |
xmit_pages_pool_cachep = &xmit_pages_pool_cache; |
|
#else |
#else |
xmit_pages_pool_cachep = &mclpool_cache; |
xmit_pages_cachep = mcl_cache; |
#endif |
#endif |
|
|
SLIST_INIT(&xnetback_instances); |
SLIST_INIT(&xnetback_instances); |
Line 243 xennetback_xenbus_create(struct xenbus_d |
|
Line 246 xennetback_xenbus_create(struct xenbus_d |
|
int i, err; |
int i, err; |
|
|
if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
"frontend-id", &domid)) != 0) { |
"frontend-id", &domid, 10)) != 0) { |
aprint_error("xvif: can' read %s/frontend-id: %d\n", |
aprint_error("xvif: can' read %s/frontend-id: %d\n", |
xbusd->xbusd_path, err); |
xbusd->xbusd_path, err); |
return err; |
return err; |
} |
} |
if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
"handle", &handle)) != 0) { |
"handle", &handle, 10)) != 0) { |
aprint_error("xvif: can' read %s/handle: %d\n", |
aprint_error("xvif: can' read %s/handle: %d\n", |
xbusd->xbusd_path, err); |
xbusd->xbusd_path, err); |
return err; |
return err; |
Line 271 xennetback_xenbus_create(struct xenbus_d |
|
Line 274 xennetback_xenbus_create(struct xenbus_d |
|
xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy; |
xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy; |
xneti->xni_xbusd = xbusd; |
xneti->xni_xbusd = xbusd; |
|
|
xneti->xni_softintr = softintr_establish(IPL_SOFTNET, |
xneti->xni_softintr = softint_establish(SOFTINT_NET, |
xennetback_ifsoftstart, xneti); |
xennetback_ifsoftstart, xneti); |
if (xneti->xni_softintr == NULL) { |
if (xneti->xni_softintr == NULL) { |
err = ENOMEM; |
err = ENOMEM; |
Line 309 xennetback_xenbus_create(struct xenbus_d |
|
Line 312 xennetback_xenbus_create(struct xenbus_d |
|
ifp->if_flags = |
ifp->if_flags = |
IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; |
IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; |
ifp->if_snd.ifq_maxlen = |
ifp->if_snd.ifq_maxlen = |
max(ifqmaxlen, /*NETIF_RX_RING_SIZE XXX*/0 * 2); |
max(ifqmaxlen, NET_TX_RING_SIZE * 2); |
|
ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx; |
ifp->if_ioctl = xennetback_ifioctl; |
ifp->if_ioctl = xennetback_ifioctl; |
ifp->if_start = xennetback_ifstart; |
ifp->if_start = xennetback_ifstart; |
ifp->if_watchdog = xennetback_ifwatchdog; |
ifp->if_watchdog = xennetback_ifwatchdog; |
Line 330 xennetback_xenbus_create(struct xenbus_d |
|
Line 334 xennetback_xenbus_create(struct xenbus_d |
|
xbusd->xbusd_path, err); |
xbusd->xbusd_path, err); |
goto fail; |
goto fail; |
} |
} |
err = xenbus_write(NULL, xbusd->xbusd_path, "hotplug-status", |
|
"connected"); /* XXX userland daemon */ |
|
if (err) { |
if (err) { |
printf("failed to write %s/hotplug-status: %d\n", |
printf("failed to write %s/hotplug-status: %d\n", |
xbusd->xbusd_path, err); |
xbusd->xbusd_path, err); |
Line 358 xennetback_xenbus_destroy(void *arg) |
|
Line 360 xennetback_xenbus_destroy(void *arg) |
|
printf("%s: disconnecting\n", xneti->xni_if.if_xname); |
printf("%s: disconnecting\n", xneti->xni_if.if_xname); |
hypervisor_mask_event(xneti->xni_evtchn); |
hypervisor_mask_event(xneti->xni_evtchn); |
event_remove_handler(xneti->xni_evtchn, xennetback_evthandler, xneti); |
event_remove_handler(xneti->xni_evtchn, xennetback_evthandler, xneti); |
softintr_disestablish(xneti->xni_softintr); |
softint_disestablish(xneti->xni_softintr); |
|
|
SLIST_REMOVE(&xnetback_instances, |
SLIST_REMOVE(&xnetback_instances, |
xneti, xnetback_instance, next); |
xneti, xnetback_instance, next); |
Line 416 xennetback_frontend_changed(void *arg, X |
|
Line 418 xennetback_frontend_changed(void *arg, X |
|
case XenbusStateConnected: |
case XenbusStateConnected: |
/* read comunication informations */ |
/* read comunication informations */ |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
"tx-ring-ref", &tx_ring_ref); |
"tx-ring-ref", &tx_ring_ref, 10); |
if (err) { |
if (err) { |
xenbus_dev_fatal(xbusd, err, "reading %s/tx-ring-ref", |
xenbus_dev_fatal(xbusd, err, "reading %s/tx-ring-ref", |
xbusd->xbusd_otherend); |
xbusd->xbusd_otherend); |
break; |
break; |
} |
} |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
"rx-ring-ref", &rx_ring_ref); |
"rx-ring-ref", &rx_ring_ref, 10); |
if (err) { |
if (err) { |
xenbus_dev_fatal(xbusd, err, "reading %s/rx-ring-ref", |
xenbus_dev_fatal(xbusd, err, "reading %s/rx-ring-ref", |
xbusd->xbusd_otherend); |
xbusd->xbusd_otherend); |
break; |
break; |
} |
} |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
"event-channel", &revtchn); |
"event-channel", &revtchn, 10); |
if (err) { |
if (err) { |
xenbus_dev_fatal(xbusd, err, "reading %s/event-channel", |
xenbus_dev_fatal(xbusd, err, "reading %s/event-channel", |
xbusd->xbusd_otherend); |
xbusd->xbusd_otherend); |
Line 439 xennetback_frontend_changed(void *arg, X |
|
Line 441 xennetback_frontend_changed(void *arg, X |
|
/* allocate VA space and map rings */ |
/* allocate VA space and map rings */ |
xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
UVM_KMF_VAONLY); |
UVM_KMF_VAONLY); |
if (xneti->xni_tx_ring_va == 0) |
if (xneti->xni_tx_ring_va == 0) { |
|
xenbus_dev_fatal(xbusd, ENOMEM, |
|
"can't get VA for tx ring", xbusd->xbusd_otherend); |
break; |
break; |
|
} |
tx_ring = (void *)xneti->xni_tx_ring_va; |
tx_ring = (void *)xneti->xni_tx_ring_va; |
xneti->xni_rx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
xneti->xni_rx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
UVM_KMF_VAONLY); |
UVM_KMF_VAONLY); |
if (xneti->xni_rx_ring_va == 0) |
if (xneti->xni_rx_ring_va == 0) { |
|
xenbus_dev_fatal(xbusd, ENOMEM, |
|
"can't get VA for rx ring", xbusd->xbusd_otherend); |
goto err1; |
goto err1; |
|
} |
rx_ring = (void *)xneti->xni_rx_ring_va; |
rx_ring = (void *)xneti->xni_rx_ring_va; |
op.host_addr = xneti->xni_tx_ring_va; |
op.host_addr = xneti->xni_tx_ring_va; |
op.flags = GNTMAP_host_map; |
op.flags = GNTMAP_host_map; |
Line 622 xennetback_evthandler(void *arg) |
|
Line 630 xennetback_evthandler(void *arg) |
|
x86_lfence(); |
x86_lfence(); |
XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, |
XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, |
txreq->size)); |
txreq->size)); |
|
req_cons++; |
if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != |
if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != |
(IFF_UP | IFF_RUNNING))) { |
(IFF_UP | IFF_RUNNING))) { |
/* interface not up, drop */ |
/* interface not up, drop */ |
Line 629 xennetback_evthandler(void *arg) |
|
Line 638 xennetback_evthandler(void *arg) |
|
NETIF_RSP_DROPPED); |
NETIF_RSP_DROPPED); |
continue; |
continue; |
} |
} |
req_cons++; |
|
/* |
/* |
* Do some sanity checks, and map the packet's page. |
* Do some sanity checks, and map the packet's page. |
*/ |
*/ |
Line 681 xennetback_evthandler(void *arg) |
|
Line 689 xennetback_evthandler(void *arg) |
|
m_freem(m); |
m_freem(m); |
continue; |
continue; |
} |
} |
err = xen_shm_map(1, xneti->xni_domid, txreq->gref, &pkt_va, |
err = xen_shm_map(1, xneti->xni_domid, &txreq->gref, &pkt_va, |
&pkt->pkt_handle, XSHM_RO); |
&pkt->pkt_handle, XSHM_RO); |
if (__predict_false(err == ENOMEM)) { |
if (__predict_false(err == ENOMEM)) { |
xennetback_tx_response(xneti, txreq->id, |
xennetback_tx_response(xneti, txreq->id, |
Line 707 xennetback_evthandler(void *arg) |
|
Line 715 xennetback_evthandler(void *arg) |
|
struct ether_header *eh = |
struct ether_header *eh = |
(void*)(pkt_va + txreq->offset); |
(void*)(pkt_va + txreq->offset); |
if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && |
if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && |
memcmp(LLADDR(ifp->if_sadl), eh->ether_dhost, |
memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, |
ETHER_ADDR_LEN) != 0) { |
ETHER_ADDR_LEN) != 0) { |
xni_pkt_unmap(pkt, pkt_va); |
xni_pkt_unmap(pkt, pkt_va); |
m_freem(m); |
m_freem(m); |
Line 720 xennetback_evthandler(void *arg) |
|
Line 728 xennetback_evthandler(void *arg) |
|
a lot of work is needed in the tcp stack to handle read-only ext storage |
a lot of work is needed in the tcp stack to handle read-only ext storage |
so always copy for now. |
so always copy for now. |
if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == |
if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == |
(xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) { |
(xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) |
|
#else |
|
if (1) |
#endif /* notyet */ |
#endif /* notyet */ |
if (1) { |
{ |
/* |
/* |
* This is the last TX buffer. Copy the data and |
* This is the last TX buffer. Copy the data and |
* ack it. Delaying it until the mbuf is |
* ack it. Delaying it until the mbuf is |
Line 731 so always copy for now. |
|
Line 741 so always copy for now. |
|
m->m_len = min(MHLEN, txreq->size); |
m->m_len = min(MHLEN, txreq->size); |
m->m_pkthdr.len = 0; |
m->m_pkthdr.len = 0; |
m_copyback(m, 0, txreq->size, |
m_copyback(m, 0, txreq->size, |
(caddr_t)(pkt_va + txreq->offset)); |
(void *)(pkt_va + txreq->offset)); |
xni_pkt_unmap(pkt, pkt_va); |
xni_pkt_unmap(pkt, pkt_va); |
if (m->m_pkthdr.len < txreq->size) { |
if (m->m_pkthdr.len < txreq->size) { |
ifp->if_ierrors++; |
ifp->if_ierrors++; |
Line 752 so always copy for now. |
|
Line 762 so always copy for now. |
|
m->m_pkthdr.len = m->m_len = txreq->size; |
m->m_pkthdr.len = m->m_len = txreq->size; |
m->m_flags |= M_EXT_ROMAP; |
m->m_flags |= M_EXT_ROMAP; |
} |
} |
|
if ((txreq->flags & NETTXF_csum_blank) != 0) { |
|
xennet_checksum_fill(&m); |
|
if (m == NULL) { |
|
ifp->if_ierrors++; |
|
continue; |
|
} |
|
} |
m->m_pkthdr.rcvif = ifp; |
m->m_pkthdr.rcvif = ifp; |
ifp->if_ipackets++; |
ifp->if_ipackets++; |
|
|
Line 765 so always copy for now. |
|
Line 782 so always copy for now. |
|
xneti->xni_txring.req_cons = req_cons; |
xneti->xni_txring.req_cons = req_cons; |
x86_sfence(); |
x86_sfence(); |
/* check to see if we can transmit more packets */ |
/* check to see if we can transmit more packets */ |
softintr_schedule(xneti->xni_softintr); |
softint_schedule(xneti->xni_softintr); |
|
|
return 1; |
return 1; |
} |
} |
|
|
static void |
static void |
xennetback_tx_free(struct mbuf *m, caddr_t va, size_t size, void * arg) |
xennetback_tx_free(struct mbuf *m, void *va, size_t size, void *arg) |
{ |
{ |
int s = splnet(); |
int s = splnet(); |
struct xni_pkt *pkt = arg; |
struct xni_pkt *pkt = arg; |
Line 784 xennetback_tx_free(struct mbuf *m, caddr |
|
Line 801 xennetback_tx_free(struct mbuf *m, caddr |
|
xni_pkt_unmap(pkt, (vaddr_t)va & ~PAGE_MASK); |
xni_pkt_unmap(pkt, (vaddr_t)va & ~PAGE_MASK); |
|
|
if (m) |
if (m) |
pool_cache_put(&mbpool_cache, m); |
pool_cache_put(mb_cache, m); |
splx(s); |
splx(s); |
} |
} |
|
|
static int |
static int |
xennetback_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
xennetback_ifioctl(struct ifnet *ifp, u_long cmd, void *data) |
{ |
{ |
//struct xnetback_instance *xneti = ifp->if_softc; |
//struct xnetback_instance *xneti = ifp->if_softc; |
//struct ifreq *ifr = (struct ifreq *)data; |
//struct ifreq *ifr = (struct ifreq *)data; |
Line 815 xennetback_ifstart(struct ifnet *ifp) |
|
Line 832 xennetback_ifstart(struct ifnet *ifp) |
|
* stack will enqueue all pending mbufs in the interface's send queue |
* stack will enqueue all pending mbufs in the interface's send queue |
* before it is processed by xennet_softstart(). |
* before it is processed by xennet_softstart(). |
*/ |
*/ |
softintr_schedule(xneti->xni_softintr); |
softint_schedule(xneti->xni_softintr); |
} |
} |
|
|
static void |
static void |
Line 827 xennetback_ifsoftstart(void *arg) |
|
Line 844 xennetback_ifsoftstart(void *arg) |
|
vaddr_t xmit_va; |
vaddr_t xmit_va; |
paddr_t xmit_pa; |
paddr_t xmit_pa; |
paddr_t xmit_ma; |
paddr_t xmit_ma; |
paddr_t newp_ma; |
paddr_t newp_ma = 0; /* XXX gcc */ |
int i, j, nppitems; |
int i, j, nppitems; |
mmu_update_t *mmup; |
mmu_update_t *mmup; |
multicall_entry_t *mclp; |
multicall_entry_t *mclp; |
Line 889 xennetback_ifsoftstart(void *arg) |
|
Line 906 xennetback_ifsoftstart(void *arg) |
|
} else { |
} else { |
/* we have to copy the packet */ |
/* we have to copy the packet */ |
xmit_va = (vaddr_t)pool_cache_get_paddr( |
xmit_va = (vaddr_t)pool_cache_get_paddr( |
xmit_pages_pool_cachep, |
xmit_pages_cachep, |
PR_NOWAIT, &xmit_pa); |
PR_NOWAIT, &xmit_pa); |
if (__predict_false(xmit_va == 0)) |
if (__predict_false(xmit_va == 0)) |
break; /* out of memory */ |
break; /* out of memory */ |
Line 900 xennetback_ifsoftstart(void *arg) |
|
Line 917 xennetback_ifsoftstart(void *arg) |
|
"0x%x ma 0x%x\n", (u_int)xmit_va, |
"0x%x ma 0x%x\n", (u_int)xmit_va, |
(u_int)xmit_ma)); |
(u_int)xmit_ma)); |
m_copydata(m, 0, m->m_pkthdr.len, |
m_copydata(m, 0, m->m_pkthdr.len, |
(caddr_t)xmit_va); |
(char *)xmit_va + LINUX_REQUESTED_OFFSET); |
offset = 0; |
offset = LINUX_REQUESTED_OFFSET; |
pages_pool_free[nppitems].va = xmit_va; |
pages_pool_free[nppitems].va = xmit_va; |
pages_pool_free[nppitems].pa = xmit_pa; |
pages_pool_free[nppitems].pa = xmit_pa; |
nppitems++; |
nppitems++; |
Line 911 xennetback_ifsoftstart(void *arg) |
|
Line 928 xennetback_ifsoftstart(void *arg) |
|
xneti->xni_rxring.req_cons)->gref; |
xneti->xni_rxring.req_cons)->gref; |
id = RING_GET_REQUEST(&xneti->xni_rxring, |
id = RING_GET_REQUEST(&xneti->xni_rxring, |
xneti->xni_rxring.req_cons)->id; |
xneti->xni_rxring.req_cons)->id; |
|
x86_lfence(); |
xneti->xni_rxring.req_cons++; |
xneti->xni_rxring.req_cons++; |
rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
resp_prod); |
resp_prod); |
rxresp->id = id; |
rxresp->id = id; |
rxresp->offset = offset; |
rxresp->offset = offset; |
rxresp->status = m->m_pkthdr.len; |
rxresp->status = m->m_pkthdr.len; |
|
if ((m->m_pkthdr.csum_flags & |
|
(M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { |
|
rxresp->flags = NETRXF_csum_blank; |
|
} else { |
|
rxresp->flags = 0; |
|
} |
/* |
/* |
* transfers the page containing the packet to the |
* transfers the page containing the packet to the |
* remote domain, and map newp in place. |
* remote domain, and map newp in place. |
Line 936 xennetback_ifsoftstart(void *arg) |
|
Line 960 xennetback_ifsoftstart(void *arg) |
|
mmup++; |
mmup++; |
|
|
/* done with this packet */ |
/* done with this packet */ |
resp_prod++; |
|
IFQ_DEQUEUE(&ifp->if_snd, m); |
IFQ_DEQUEUE(&ifp->if_snd, m); |
mbufs_sent[i] = m; |
mbufs_sent[i] = m; |
|
resp_prod++; |
i++; /* this packet has been queued */ |
i++; /* this packet has been queued */ |
ifp->if_opackets++; |
ifp->if_opackets++; |
#if NBPFILTER > 0 |
#if NBPFILTER > 0 |
Line 969 xennetback_ifsoftstart(void *arg) |
|
Line 993 xennetback_ifsoftstart(void *arg) |
|
ifp->if_xname); |
ifp->if_xname); |
} |
} |
for (j = 0; j < i + 1; j++) { |
for (j = 0; j < i + 1; j++) { |
if (xstart_mcl[j].result != 0) |
if (xstart_mcl[j].result != 0) { |
printf("%s: xstart_mcl[%d] failed (%lu)\n", |
printf("%s: xstart_mcl[%d] " |
ifp->if_xname, j, xstart_mcl[j].result); |
"failed (%lu)\n", ifp->if_xname, |
|
j, xstart_mcl[j].result); |
|
printf("%s: req_prod %u req_cons " |
|
"%u rsp_prod %u rsp_prod_pvt %u " |
|
"i %u\n", |
|
ifp->if_xname, |
|
xneti->xni_rxring.sring->req_prod, |
|
xneti->xni_rxring.req_cons, |
|
xneti->xni_rxring.sring->rsp_prod, |
|
xneti->xni_rxring.rsp_prod_pvt, |
|
i); |
|
} |
} |
} |
if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, |
if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, |
xstart_gop, i) != 0) { |
xstart_gop, i) != 0) { |
Line 980 xennetback_ifsoftstart(void *arg) |
|
Line 1015 xennetback_ifsoftstart(void *arg) |
|
} |
} |
|
|
for (j = 0; j < i; j++) { |
for (j = 0; j < i; j++) { |
if (xstart_gop[j].status == GNTST_bad_page) |
|
panic("%s: gop[%d] failed", |
|
ifp->if_xname, j); |
|
rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
|
xneti->xni_rxring.rsp_prod_pvt + j); |
|
if (xstart_gop[j].status != 0) { |
if (xstart_gop[j].status != 0) { |
printf("GNTTABOP_transfer[%d] %d\n", |
printf("%s GNTTABOP_transfer[%d] %d\n", |
|
ifp->if_xname, |
j, xstart_gop[j].status); |
j, xstart_gop[j].status); |
|
printf("%s: req_prod %u req_cons " |
|
"%u rsp_prod %u rsp_prod_pvt %u " |
|
"i %d\n", |
|
ifp->if_xname, |
|
xneti->xni_rxring.sring->req_prod, |
|
xneti->xni_rxring.req_cons, |
|
xneti->xni_rxring.sring->rsp_prod, |
|
xneti->xni_rxring.rsp_prod_pvt, |
|
i); |
|
rxresp = RING_GET_RESPONSE( |
|
&xneti->xni_rxring, |
|
xneti->xni_rxring.rsp_prod_pvt + j); |
rxresp->status = NETIF_RSP_ERROR; |
rxresp->status = NETIF_RSP_ERROR; |
} |
} |
rxresp->flags = 0; |
|
} |
} |
|
|
/* update pointer */ |
/* update pointer */ |
|
KASSERT( |
|
xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); |
xneti->xni_rxring.rsp_prod_pvt = resp_prod; |
xneti->xni_rxring.rsp_prod_pvt = resp_prod; |
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( |
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( |
&xneti->xni_rxring, j); |
&xneti->xni_rxring, j); |
Line 1004 xennetback_ifsoftstart(void *arg) |
|
Line 1048 xennetback_ifsoftstart(void *arg) |
|
m_freem(mbufs_sent[j]); |
m_freem(mbufs_sent[j]); |
} |
} |
for (j = 0; j < nppitems; j++) { |
for (j = 0; j < nppitems; j++) { |
pool_cache_put_paddr(xmit_pages_pool_cachep, |
pool_cache_put_paddr(xmit_pages_cachep, |
(void *)pages_pool_free[j].va, |
(void *)pages_pool_free[j].va, |
pages_pool_free[j].pa); |
pages_pool_free[j].pa); |
} |
} |
Line 1031 xennetback_ifsoftstart(void *arg) |
|
Line 1075 xennetback_ifsoftstart(void *arg) |
|
break; |
break; |
} |
} |
} |
} |
|
/* |
|
* note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() |
|
* here, as the frontend doesn't notify when adding |
|
* requests anyway |
|
*/ |
if (__predict_false( |
if (__predict_false( |
xneti->xni_rxring.req_cons - resp_prod == |
!RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { |
NET_RX_RING_SIZE)) { |
|
/* ring full */ |
/* ring full */ |
break; |
break; |
} |
} |