version 1.39.2.2, 2018/07/28 04:37:46 |
version 1.39.2.3, 2019/01/18 08:50:27 |
Line 45 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 45 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/sockio.h> |
#include <sys/sockio.h> |
#include <sys/cpu.h> |
#include <sys/cpu.h> |
#include <sys/module.h> |
#include <sys/module.h> |
|
#include <sys/pcq.h> |
|
|
#include <dev/pci/virtioreg.h> |
#include <dev/pci/virtioreg.h> |
#include <dev/pci/virtiovar.h> |
#include <dev/pci/virtiovar.h> |
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 60 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#ifdef NET_MPSAFE |
#ifdef NET_MPSAFE |
#define VIOIF_MPSAFE 1 |
#define VIOIF_MPSAFE 1 |
|
#define VIOIF_MULTIQ 1 |
#endif |
#endif |
|
|
#ifdef SOFTINT_INTR |
#ifdef SOFTINT_INTR |
Line 71 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 73 __KERNEL_RCSID(0, "$NetBSD$"); |
|
/* Configuration registers */ |
/* Configuration registers */ |
#define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ |
#define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ |
#define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ |
#define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ |
|
#define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */ |
|
|
/* Feature bits */ |
/* Feature bits */ |
#define VIRTIO_NET_F_CSUM (1<<0) |
#define VIRTIO_NET_F_CSUM __BIT(0) |
#define VIRTIO_NET_F_GUEST_CSUM (1<<1) |
#define VIRTIO_NET_F_GUEST_CSUM __BIT(1) |
#define VIRTIO_NET_F_MAC (1<<5) |
#define VIRTIO_NET_F_MAC __BIT(5) |
#define VIRTIO_NET_F_GSO (1<<6) |
#define VIRTIO_NET_F_GSO __BIT(6) |
#define VIRTIO_NET_F_GUEST_TSO4 (1<<7) |
#define VIRTIO_NET_F_GUEST_TSO4 __BIT(7) |
#define VIRTIO_NET_F_GUEST_TSO6 (1<<8) |
#define VIRTIO_NET_F_GUEST_TSO6 __BIT(8) |
#define VIRTIO_NET_F_GUEST_ECN (1<<9) |
#define VIRTIO_NET_F_GUEST_ECN __BIT(9) |
#define VIRTIO_NET_F_GUEST_UFO (1<<10) |
#define VIRTIO_NET_F_GUEST_UFO __BIT(10) |
#define VIRTIO_NET_F_HOST_TSO4 (1<<11) |
#define VIRTIO_NET_F_HOST_TSO4 __BIT(11) |
#define VIRTIO_NET_F_HOST_TSO6 (1<<12) |
#define VIRTIO_NET_F_HOST_TSO6 __BIT(12) |
#define VIRTIO_NET_F_HOST_ECN (1<<13) |
#define VIRTIO_NET_F_HOST_ECN __BIT(13) |
#define VIRTIO_NET_F_HOST_UFO (1<<14) |
#define VIRTIO_NET_F_HOST_UFO __BIT(14) |
#define VIRTIO_NET_F_MRG_RXBUF (1<<15) |
#define VIRTIO_NET_F_MRG_RXBUF __BIT(15) |
#define VIRTIO_NET_F_STATUS (1<<16) |
#define VIRTIO_NET_F_STATUS __BIT(16) |
#define VIRTIO_NET_F_CTRL_VQ (1<<17) |
#define VIRTIO_NET_F_CTRL_VQ __BIT(17) |
#define VIRTIO_NET_F_CTRL_RX (1<<18) |
#define VIRTIO_NET_F_CTRL_RX __BIT(18) |
#define VIRTIO_NET_F_CTRL_VLAN (1<<19) |
#define VIRTIO_NET_F_CTRL_VLAN __BIT(19) |
|
#define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20) |
|
#define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21) |
|
#define VIRTIO_NET_F_MQ __BIT(22) |
|
|
#define VIRTIO_NET_FLAG_BITS \ |
#define VIRTIO_NET_FLAG_BITS \ |
VIRTIO_COMMON_FLAG_BITS \ |
VIRTIO_COMMON_FLAG_BITS \ |
|
"\x17""MQ" \ |
|
"\x16""GUEST_ANNOUNCE" \ |
|
"\x15""CTRL_RX_EXTRA" \ |
"\x14""CTRL_VLAN" \ |
"\x14""CTRL_VLAN" \ |
"\x13""CTRL_RX" \ |
"\x13""CTRL_RX" \ |
"\x12""CTRL_VQ" \ |
"\x12""CTRL_VQ" \ |
Line 152 struct virtio_net_ctrl_cmd { |
|
Line 161 struct virtio_net_ctrl_cmd { |
|
# define VIRTIO_NET_CTRL_VLAN_ADD 0 |
# define VIRTIO_NET_CTRL_VLAN_ADD 0 |
# define VIRTIO_NET_CTRL_VLAN_DEL 1 |
# define VIRTIO_NET_CTRL_VLAN_DEL 1 |
|
|
|
#define VIRTIO_NET_CTRL_MQ 4 |
|
# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 |
|
# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 |
|
# define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 |
|
|
struct virtio_net_ctrl_status { |
struct virtio_net_ctrl_status { |
uint8_t ack; |
uint8_t ack; |
} __packed; |
} __packed; |
Line 171 struct virtio_net_ctrl_vlan { |
|
Line 185 struct virtio_net_ctrl_vlan { |
|
uint16_t id; |
uint16_t id; |
} __packed; |
} __packed; |
|
|
|
struct virtio_net_ctrl_mq { |
|
uint16_t virtqueue_pairs; |
|
} __packed; |
|
|
|
struct vioif_ctrl_cmdspec { |
|
bus_dmamap_t dmamap; |
|
void *buf; |
|
bus_size_t bufsize; |
|
}; |
|
|
/* |
/* |
* if_vioifvar.h: |
* if_vioifvar.h: |
*/ |
*/ |
|
|
|
/* |
|
* Locking notes: |
|
* + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and |
|
* a filds in vioif_rxqueue is protected by rxq_lock (a spin mutex). |
|
* - more than one lock cannot be held at onece |
|
* + ctrlq_inuse is protected by ctrlq_wait_lock. |
|
* - other fields in vioif_ctrlqueue are protected by ctrlq_inuse |
|
* - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock |
|
*/ |
|
|
|
struct vioif_txqueue { |
|
kmutex_t *txq_lock; /* lock for tx operations */ |
|
|
|
struct virtqueue *txq_vq; |
|
bool txq_stopping; |
|
bool txq_link_active; |
|
pcq_t *txq_intrq; |
|
|
|
struct virtio_net_hdr *txq_hdrs; |
|
bus_dmamap_t *txq_hdr_dmamaps; |
|
|
|
struct mbuf **txq_mbufs; |
|
bus_dmamap_t *txq_dmamaps; |
|
|
|
void *txq_deferred_transmit; |
|
}; |
|
|
|
struct vioif_rxqueue { |
|
kmutex_t *rxq_lock; /* lock for rx operations */ |
|
|
|
struct virtqueue *rxq_vq; |
|
bool rxq_stopping; |
|
|
|
struct virtio_net_hdr *rxq_hdrs; |
|
bus_dmamap_t *rxq_hdr_dmamaps; |
|
|
|
struct mbuf **rxq_mbufs; |
|
bus_dmamap_t *rxq_dmamaps; |
|
|
|
void *rxq_softint; |
|
}; |
|
|
|
struct vioif_ctrlqueue { |
|
struct virtqueue *ctrlq_vq; |
|
enum { |
|
FREE, INUSE, DONE |
|
} ctrlq_inuse; |
|
kcondvar_t ctrlq_wait; |
|
kmutex_t ctrlq_wait_lock; |
|
struct lwp *ctrlq_owner; |
|
|
|
struct virtio_net_ctrl_cmd *ctrlq_cmd; |
|
struct virtio_net_ctrl_status *ctrlq_status; |
|
struct virtio_net_ctrl_rx *ctrlq_rx; |
|
struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc; |
|
struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc; |
|
struct virtio_net_ctrl_mq *ctrlq_mq; |
|
|
|
bus_dmamap_t ctrlq_cmd_dmamap; |
|
bus_dmamap_t ctrlq_status_dmamap; |
|
bus_dmamap_t ctrlq_rx_dmamap; |
|
bus_dmamap_t ctrlq_tbl_uc_dmamap; |
|
bus_dmamap_t ctrlq_tbl_mc_dmamap; |
|
bus_dmamap_t ctrlq_mq_dmamap; |
|
}; |
|
|
struct vioif_softc { |
struct vioif_softc { |
device_t sc_dev; |
device_t sc_dev; |
|
|
struct virtio_softc *sc_virtio; |
struct virtio_softc *sc_virtio; |
struct virtqueue sc_vq[3]; |
struct virtqueue *sc_vqs; |
#define VQ_RX 0 |
|
#define VQ_TX 1 |
int sc_max_nvq_pairs; |
#define VQ_CTRL 2 |
int sc_req_nvq_pairs; |
|
int sc_act_nvq_pairs; |
|
|
uint8_t sc_mac[ETHER_ADDR_LEN]; |
uint8_t sc_mac[ETHER_ADDR_LEN]; |
struct ethercom sc_ethercom; |
struct ethercom sc_ethercom; |
short sc_deferred_init_done; |
short sc_deferred_init_done; |
bool sc_link_active; |
bool sc_link_active; |
|
|
/* bus_dmamem */ |
struct vioif_txqueue *sc_txq; |
bus_dma_segment_t sc_hdr_segs[1]; |
struct vioif_rxqueue *sc_rxq; |
struct virtio_net_hdr *sc_hdrs; |
|
#define sc_rx_hdrs sc_hdrs |
|
struct virtio_net_hdr *sc_tx_hdrs; |
|
struct virtio_net_ctrl_cmd *sc_ctrl_cmd; |
|
struct virtio_net_ctrl_status *sc_ctrl_status; |
|
struct virtio_net_ctrl_rx *sc_ctrl_rx; |
|
struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc; |
|
struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc; |
|
|
|
/* kmem */ |
|
bus_dmamap_t *sc_arrays; |
|
#define sc_rxhdr_dmamaps sc_arrays |
|
bus_dmamap_t *sc_txhdr_dmamaps; |
|
bus_dmamap_t *sc_rx_dmamaps; |
|
bus_dmamap_t *sc_tx_dmamaps; |
|
struct mbuf **sc_rx_mbufs; |
|
struct mbuf **sc_tx_mbufs; |
|
|
|
bus_dmamap_t sc_ctrl_cmd_dmamap; |
|
bus_dmamap_t sc_ctrl_status_dmamap; |
|
bus_dmamap_t sc_ctrl_rx_dmamap; |
|
bus_dmamap_t sc_ctrl_tbl_uc_dmamap; |
|
bus_dmamap_t sc_ctrl_tbl_mc_dmamap; |
|
|
|
void *sc_rx_softint; |
bool sc_has_ctrl; |
void *sc_ctl_softint; |
struct vioif_ctrlqueue sc_ctrlq; |
|
|
enum { |
bus_dma_segment_t sc_hdr_segs[1]; |
FREE, INUSE, DONE |
void *sc_dmamem; |
} sc_ctrl_inuse; |
void *sc_kmem; |
kcondvar_t sc_ctrl_wait; |
|
kmutex_t sc_ctrl_wait_lock; |
|
kmutex_t sc_tx_lock; |
|
kmutex_t sc_rx_lock; |
|
bool sc_stopping; |
|
|
|
bool sc_has_ctrl; |
void *sc_ctl_softint; |
}; |
}; |
#define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ |
#define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ |
#define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ |
#define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ |
|
|
#define VIOIF_TX_LOCK(_sc) mutex_enter(&(_sc)->sc_tx_lock) |
|
#define VIOIF_TX_UNLOCK(_sc) mutex_exit(&(_sc)->sc_tx_lock) |
|
#define VIOIF_TX_LOCKED(_sc) mutex_owned(&(_sc)->sc_tx_lock) |
|
#define VIOIF_RX_LOCK(_sc) mutex_enter(&(_sc)->sc_rx_lock) |
|
#define VIOIF_RX_UNLOCK(_sc) mutex_exit(&(_sc)->sc_rx_lock) |
|
#define VIOIF_RX_LOCKED(_sc) mutex_owned(&(_sc)->sc_rx_lock) |
|
|
|
/* cfattach interface functions */ |
/* cfattach interface functions */ |
static int vioif_match(device_t, cfdata_t, void *); |
static int vioif_match(device_t, cfdata_t, void *); |
static void vioif_attach(device_t, device_t, void *); |
static void vioif_attach(device_t, device_t, void *); |
Line 248 static void vioif_deferred_init(device_t |
|
Line 304 static void vioif_deferred_init(device_t |
|
static int vioif_init(struct ifnet *); |
static int vioif_init(struct ifnet *); |
static void vioif_stop(struct ifnet *, int); |
static void vioif_stop(struct ifnet *, int); |
static void vioif_start(struct ifnet *); |
static void vioif_start(struct ifnet *); |
|
static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *); |
|
static int vioif_transmit(struct ifnet *, struct mbuf *); |
|
static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *); |
static int vioif_ioctl(struct ifnet *, u_long, void *); |
static int vioif_ioctl(struct ifnet *, u_long, void *); |
static void vioif_watchdog(struct ifnet *); |
static void vioif_watchdog(struct ifnet *); |
|
|
/* rx */ |
/* rx */ |
static int vioif_add_rx_mbuf(struct vioif_softc *, int); |
static int vioif_add_rx_mbuf(struct vioif_rxqueue *, int); |
static void vioif_free_rx_mbuf(struct vioif_softc *, int); |
static void vioif_free_rx_mbuf(struct vioif_rxqueue *, int); |
static void vioif_populate_rx_mbufs(struct vioif_softc *); |
static void vioif_populate_rx_mbufs(struct vioif_rxqueue *); |
static void vioif_populate_rx_mbufs_locked(struct vioif_softc *); |
static void vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *); |
static int vioif_rx_deq(struct vioif_softc *); |
static int vioif_rx_deq(struct vioif_rxqueue *); |
static int vioif_rx_deq_locked(struct vioif_softc *); |
static int vioif_rx_deq_locked(struct vioif_rxqueue *); |
static int vioif_rx_vq_done(struct virtqueue *); |
static int vioif_rx_vq_done(struct virtqueue *); |
static void vioif_rx_softint(void *); |
static void vioif_rx_softint(void *); |
static void vioif_rx_drain(struct vioif_softc *); |
static void vioif_rx_drain(struct vioif_rxqueue *); |
|
|
/* tx */ |
/* tx */ |
static int vioif_tx_vq_done(struct virtqueue *); |
static int vioif_tx_vq_done(struct virtqueue *); |
static int vioif_tx_vq_done_locked(struct virtqueue *); |
static int vioif_tx_vq_done_locked(struct virtqueue *); |
static void vioif_tx_drain(struct vioif_softc *); |
static void vioif_tx_drain(struct vioif_txqueue *); |
|
static void vioif_deferred_transmit(void *); |
|
|
/* other control */ |
/* other control */ |
static bool vioif_is_link_up(struct vioif_softc *); |
static bool vioif_is_link_up(struct vioif_softc *); |
Line 278 static int vioif_rx_filter(struct vioif_ |
|
Line 338 static int vioif_rx_filter(struct vioif_ |
|
static int vioif_ctrl_vq_done(struct virtqueue *); |
static int vioif_ctrl_vq_done(struct virtqueue *); |
static int vioif_config_change(struct virtio_softc *); |
static int vioif_config_change(struct virtio_softc *); |
static void vioif_ctl_softint(void *); |
static void vioif_ctl_softint(void *); |
|
static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int); |
|
static void vioif_enable_interrupt_vqpairs(struct vioif_softc *); |
|
static void vioif_disable_interrupt_vqpairs(struct vioif_softc *); |
|
|
CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), |
CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), |
vioif_match, vioif_attach, NULL, NULL); |
vioif_match, vioif_attach, NULL, NULL); |
Line 293 vioif_match(device_t parent, cfdata_t ma |
|
Line 356 vioif_match(device_t parent, cfdata_t ma |
|
return 0; |
return 0; |
} |
} |
|
|
|
static int |
|
vioif_alloc_queues(struct vioif_softc *sc) |
|
{ |
|
int nvq_pairs = sc->sc_max_nvq_pairs; |
|
int nvqs = nvq_pairs * 2; |
|
int i; |
|
|
|
KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX); |
|
|
|
sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs, |
|
KM_NOSLEEP); |
|
if (sc->sc_rxq == NULL) |
|
return -1; |
|
|
|
sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs, |
|
KM_NOSLEEP); |
|
if (sc->sc_txq == NULL) |
|
return -1; |
|
|
|
if (sc->sc_has_ctrl) |
|
nvqs++; |
|
|
|
sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_NOSLEEP); |
|
if (sc->sc_vqs == NULL) |
|
return -1; |
|
|
|
nvqs = 0; |
|
for (i = 0; i < nvq_pairs; i++) { |
|
sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++]; |
|
sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++]; |
|
} |
|
|
|
if (sc->sc_has_ctrl) |
|
sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++]; |
|
|
|
return 0; |
|
} |
|
|
|
static void |
|
vioif_free_queues(struct vioif_softc *sc) |
|
{ |
|
int nvq_pairs = sc->sc_max_nvq_pairs; |
|
int nvqs = nvq_pairs * 2; |
|
|
|
if (sc->sc_ctrlq.ctrlq_vq) |
|
nvqs++; |
|
|
|
if (sc->sc_txq) { |
|
kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs); |
|
sc->sc_txq = NULL; |
|
} |
|
|
|
if (sc->sc_rxq) { |
|
kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs); |
|
sc->sc_rxq = NULL; |
|
} |
|
|
|
if (sc->sc_vqs) { |
|
kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs); |
|
sc->sc_vqs = NULL; |
|
} |
|
} |
|
|
/* allocate memory */ |
/* allocate memory */ |
/* |
/* |
* dma memory is used for: |
* dma memory is used for: |
* sc_rx_hdrs[slot]: metadata array for received frames (READ) |
* rxq_hdrs[slot]: metadata array for received frames (READ) |
* sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE) |
* txq_hdrs[slot]: metadata array for frames to be sent (WRITE) |
* sc_ctrl_cmd: command to be sent via ctrl vq (WRITE) |
* ctrlq_cmd: command to be sent via ctrl vq (WRITE) |
* sc_ctrl_status: return value for a command via ctrl vq (READ) |
* ctrlq_status: return value for a command via ctrl vq (READ) |
* sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command |
* ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command |
* (WRITE) |
* (WRITE) |
* sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC |
* ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC |
* class command (WRITE) |
* class command (WRITE) |
* sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC |
* ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC |
* class command (WRITE) |
* class command (WRITE) |
* sc_ctrl_* structures are allocated only one each; they are protected by |
* ctrlq_* structures are allocated only one each; they are protected by |
* sc_ctrl_inuse variable and sc_ctrl_wait condvar. |
* ctrlq_inuse variable and ctrlq_wait condvar. |
*/ |
*/ |
/* |
/* |
* dynamically allocated memory is used for: |
* dynamically allocated memory is used for: |
* sc_rxhdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot] |
* rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot] |
* sc_txhdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot] |
* txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot] |
* sc_rx_dmamaps[slot]: bus_dmamap_t array for received payload |
* rxq_dmamaps[slot]: bus_dmamap_t array for received payload |
* sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload |
* txq_dmamaps[slot]: bus_dmamap_t array for sent payload |
* sc_rx_mbufs[slot]: mbuf pointer array for received frames |
* rxq_mbufs[slot]: mbuf pointer array for received frames |
* sc_tx_mbufs[slot]: mbuf pointer array for sent frames |
* txq_mbufs[slot]: mbuf pointer array for sent frames |
*/ |
*/ |
static int |
static int |
vioif_alloc_mems(struct vioif_softc *sc) |
vioif_alloc_mems(struct vioif_softc *sc) |
{ |
{ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_softc *vsc = sc->sc_virtio; |
int allocsize, allocsize2, r, rsegs, i; |
struct vioif_txqueue *txq; |
|
struct vioif_rxqueue *rxq; |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
int allocsize, allocsize2, r, rsegs, i, qid; |
void *vaddr; |
void *vaddr; |
intptr_t p; |
intptr_t p; |
int rxqsize, txqsize; |
|
|
|
rxqsize = sc->sc_vq[VQ_RX].vq_num; |
|
txqsize = sc->sc_vq[VQ_TX].vq_num; |
|
|
|
allocsize = sizeof(struct virtio_net_hdr) * rxqsize; |
allocsize = 0; |
allocsize += sizeof(struct virtio_net_hdr) * txqsize; |
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
|
rxq = &sc->sc_rxq[qid]; |
|
txq = &sc->sc_txq[qid]; |
|
|
|
allocsize += |
|
sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num; |
|
allocsize += |
|
sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num; |
|
} |
if (sc->sc_has_ctrl) { |
if (sc->sc_has_ctrl) { |
allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1; |
allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1; |
allocsize += sizeof(struct virtio_net_ctrl_status) * 1; |
allocsize += sizeof(struct virtio_net_ctrl_status) * 1; |
Line 339 vioif_alloc_mems(struct vioif_softc *sc) |
|
Line 472 vioif_alloc_mems(struct vioif_softc *sc) |
|
allocsize += sizeof(struct virtio_net_ctrl_mac_tbl) |
allocsize += sizeof(struct virtio_net_ctrl_mac_tbl) |
+ sizeof(struct virtio_net_ctrl_mac_tbl) |
+ sizeof(struct virtio_net_ctrl_mac_tbl) |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; |
|
allocsize += sizeof(struct virtio_net_ctrl_mq) * 1; |
} |
} |
r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0, |
r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0, |
&sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); |
&sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); |
Line 357 vioif_alloc_mems(struct vioif_softc *sc) |
|
Line 491 vioif_alloc_mems(struct vioif_softc *sc) |
|
"error code %d\n", r); |
"error code %d\n", r); |
goto err_dmamem_alloc; |
goto err_dmamem_alloc; |
} |
} |
sc->sc_hdrs = vaddr; |
|
|
#define P(p, p0, p0size) do { p0 = (void *) p; \ |
|
p += p0size; } while (0) |
memset(vaddr, 0, allocsize); |
memset(vaddr, 0, allocsize); |
|
sc->sc_dmamem = vaddr; |
p = (intptr_t) vaddr; |
p = (intptr_t) vaddr; |
p += sizeof(struct virtio_net_hdr) * rxqsize; |
|
#define P(name,size) do { sc->sc_ ##name = (void*) p; \ |
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
p += size; } while (0) |
rxq = &sc->sc_rxq[qid]; |
P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize); |
txq = &sc->sc_txq[qid]; |
|
|
|
P(p, rxq->rxq_hdrs, |
|
sizeof(rxq->rxq_hdrs[0]) * rxq->rxq_vq->vq_num); |
|
P(p, txq->txq_hdrs, |
|
sizeof(txq->txq_hdrs[0]) * txq->txq_vq->vq_num); |
|
} |
if (sc->sc_has_ctrl) { |
if (sc->sc_has_ctrl) { |
P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd)); |
P(p, ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd)); |
P(ctrl_status, sizeof(struct virtio_net_ctrl_status)); |
P(p, ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status)); |
P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx)); |
P(p, ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx)); |
P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl)); |
P(p, ctrlq->ctrlq_mac_tbl_uc, sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0); |
P(ctrl_mac_tbl_mc, |
P(p, ctrlq->ctrlq_mac_tbl_mc, |
(sizeof(struct virtio_net_ctrl_mac_tbl) |
(sizeof(*ctrlq->ctrlq_mac_tbl_mc) |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES)); |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES)); |
|
P(p, ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq)); |
|
} |
|
|
|
allocsize2 = 0; |
|
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
|
int rxqsize, txqsize; |
|
|
|
rxq = &sc->sc_rxq[qid]; |
|
txq = &sc->sc_txq[qid]; |
|
rxqsize = rxq->rxq_vq->vq_num; |
|
txqsize = txq->txq_vq->vq_num; |
|
|
|
allocsize2 += sizeof(rxq->rxq_dmamaps[0]) * rxqsize; |
|
allocsize2 += sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize; |
|
allocsize2 += sizeof(rxq->rxq_mbufs[0]) * rxqsize; |
|
|
|
allocsize2 += sizeof(txq->txq_dmamaps[0]) * txqsize; |
|
allocsize2 += sizeof(txq->txq_hdr_dmamaps[0]) * txqsize; |
|
allocsize2 += sizeof(txq->txq_mbufs[0]) * txqsize; |
} |
} |
#undef P |
vaddr = kmem_zalloc(allocsize2, KM_SLEEP); |
|
sc->sc_kmem = vaddr; |
|
p = (intptr_t) vaddr; |
|
|
allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize); |
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize); |
int rxqsize, txqsize; |
allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize); |
rxq = &sc->sc_rxq[qid]; |
sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP); |
txq = &sc->sc_txq[qid]; |
sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize; |
rxqsize = rxq->rxq_vq->vq_num; |
sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize; |
txqsize = txq->txq_vq->vq_num; |
sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize; |
|
sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize); |
P(p, rxq->rxq_hdr_dmamaps, sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize); |
sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize; |
P(p, txq->txq_hdr_dmamaps, sizeof(txq->txq_hdr_dmamaps[0]) * txqsize); |
|
P(p, rxq->rxq_dmamaps, sizeof(rxq->rxq_dmamaps[0]) * rxqsize); |
|
P(p, txq->txq_dmamaps, sizeof(txq->txq_dmamaps[0]) * txqsize); |
|
P(p, rxq->rxq_mbufs, sizeof(rxq->rxq_mbufs[0]) * rxqsize); |
|
P(p, txq->txq_mbufs, sizeof(txq->txq_mbufs[0]) * txqsize); |
|
} |
|
#undef P |
|
|
#define C(map, buf, size, nsegs, rw, usage) \ |
#define C(map, size, nsegs, usage) \ |
do { \ |
do { \ |
r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \ |
r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \ |
BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \ |
BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \ |
&sc->sc_ ##map); \ |
&map); \ |
if (r != 0) { \ |
if (r != 0) { \ |
aprint_error_dev(sc->sc_dev, \ |
aprint_error_dev(sc->sc_dev, \ |
usage " dmamap creation failed, " \ |
"%s dmamap creation failed, " \ |
"error code %d\n", r); \ |
"error code %d\n", usage, r); \ |
goto err_reqs; \ |
goto err_reqs; \ |
} \ |
} \ |
} while (0) |
|
#define C_L1(map, buf, size, nsegs, rw, usage) \ |
|
C(map, buf, size, nsegs, rw, usage); \ |
|
do { \ |
|
r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map, \ |
|
&sc->sc_ ##buf, size, NULL, \ |
|
BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ |
|
if (r != 0) { \ |
|
aprint_error_dev(sc->sc_dev, \ |
|
usage " dmamap load failed, " \ |
|
"error code %d\n", r); \ |
|
goto err_reqs; \ |
|
} \ |
|
} while (0) |
} while (0) |
#define C_L2(map, buf, size, nsegs, rw, usage) \ |
#define C_L(map, buf, size, nsegs, rw, usage) \ |
C(map, buf, size, nsegs, rw, usage); \ |
C(map, size, nsegs, usage); \ |
do { \ |
do { \ |
r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map, \ |
r = bus_dmamap_load(virtio_dmat(vsc), map, \ |
sc->sc_ ##buf, size, NULL, \ |
buf, size, NULL, \ |
BUS_DMA_ ##rw | BUS_DMA_NOWAIT); \ |
rw | BUS_DMA_NOWAIT); \ |
if (r != 0) { \ |
if (r != 0) { \ |
aprint_error_dev(sc->sc_dev, \ |
aprint_error_dev(sc->sc_dev, \ |
usage " dmamap load failed, " \ |
usage " dmamap load failed, " \ |
Line 423 vioif_alloc_mems(struct vioif_softc *sc) |
|
Line 580 vioif_alloc_mems(struct vioif_softc *sc) |
|
goto err_reqs; \ |
goto err_reqs; \ |
} \ |
} \ |
} while (0) |
} while (0) |
for (i = 0; i < rxqsize; i++) { |
|
C_L1(rxhdr_dmamaps[i], rx_hdrs[i], |
|
sizeof(struct virtio_net_hdr), 1, |
|
READ, "rx header"); |
|
C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload"); |
|
} |
|
|
|
for (i = 0; i < txqsize; i++) { |
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
C_L1(txhdr_dmamaps[i], tx_hdrs[i], |
rxq = &sc->sc_rxq[qid]; |
sizeof(struct virtio_net_hdr), 1, |
txq = &sc->sc_txq[qid]; |
WRITE, "tx header"); |
|
C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, 0, |
for (i = 0; i < rxq->rxq_vq->vq_num; i++) { |
"tx payload"); |
C_L(rxq->rxq_hdr_dmamaps[i], &rxq->rxq_hdrs[i], |
|
sizeof(rxq->rxq_hdrs[0]), 1, |
|
BUS_DMA_READ, "rx header"); |
|
C(rxq->rxq_dmamaps[i], MCLBYTES, 1, "rx payload"); |
|
} |
|
|
|
for (i = 0; i < txq->txq_vq->vq_num; i++) { |
|
C_L(txq->txq_hdr_dmamaps[i], &txq->txq_hdrs[i], |
|
sizeof(txq->txq_hdrs[0]), 1, |
|
BUS_DMA_READ, "tx header"); |
|
C(txq->txq_dmamaps[i], ETHER_MAX_LEN, |
|
VIRTIO_NET_TX_MAXNSEGS, "tx payload"); |
|
} |
} |
} |
|
|
if (sc->sc_has_ctrl) { |
if (sc->sc_has_ctrl) { |
/* control vq class & command */ |
/* control vq class & command */ |
C_L2(ctrl_cmd_dmamap, ctrl_cmd, |
C_L(ctrlq->ctrlq_cmd_dmamap, |
sizeof(struct virtio_net_ctrl_cmd), 1, WRITE, |
ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1, |
"control command"); |
BUS_DMA_WRITE, "control command"); |
|
C_L(ctrlq->ctrlq_status_dmamap, |
/* control vq status */ |
ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1, |
C_L2(ctrl_status_dmamap, ctrl_status, |
BUS_DMA_READ, "control status"); |
sizeof(struct virtio_net_ctrl_status), 1, READ, |
|
"control status"); |
|
|
|
/* control vq rx mode command parameter */ |
/* control vq rx mode command parameter */ |
C_L2(ctrl_rx_dmamap, ctrl_rx, |
C_L(ctrlq->ctrlq_rx_dmamap, |
sizeof(struct virtio_net_ctrl_rx), 1, WRITE, |
ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1, |
"rx mode control command"); |
BUS_DMA_WRITE, "rx mode control command"); |
|
|
|
/* multiqueue set command */ |
|
C_L(ctrlq->ctrlq_mq_dmamap, |
|
ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1, |
|
BUS_DMA_WRITE, "multiqueue set command"); |
|
|
/* control vq MAC filter table for unicast */ |
/* control vq MAC filter table for unicast */ |
/* do not load now since its length is variable */ |
/* do not load now since its length is variable */ |
C(ctrl_tbl_uc_dmamap, NULL, |
C(ctrlq->ctrlq_tbl_uc_dmamap, |
sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE, |
sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0, 1, |
"unicast MAC address filter command"); |
"unicast MAC address filter command"); |
|
|
/* control vq MAC filter table for multicast */ |
/* control vq MAC filter table for multicast */ |
C(ctrl_tbl_mc_dmamap, NULL, |
C(ctrlq->ctrlq_tbl_mc_dmamap, |
(sizeof(struct virtio_net_ctrl_mac_tbl) |
sizeof(*ctrlq->ctrlq_mac_tbl_mc) |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES), |
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1, |
1, WRITE, "multicast MAC address filter command"); |
"multicast MAC address filter command"); |
} |
} |
#undef C_L2 |
#undef C_L |
#undef C_L1 |
|
#undef C |
#undef C |
|
|
return 0; |
return 0; |
Line 475 vioif_alloc_mems(struct vioif_softc *sc) |
|
Line 640 vioif_alloc_mems(struct vioif_softc *sc) |
|
err_reqs: |
err_reqs: |
#define D(map) \ |
#define D(map) \ |
do { \ |
do { \ |
if (sc->sc_ ##map) { \ |
if (map) { \ |
bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_ ##map); \ |
bus_dmamap_destroy(virtio_dmat(vsc), map); \ |
sc->sc_ ##map = NULL; \ |
map = NULL; \ |
} \ |
} \ |
} while (0) |
} while (0) |
D(ctrl_tbl_mc_dmamap); |
D(ctrlq->ctrlq_tbl_mc_dmamap); |
D(ctrl_tbl_uc_dmamap); |
D(ctrlq->ctrlq_tbl_uc_dmamap); |
D(ctrl_rx_dmamap); |
D(ctrlq->ctrlq_rx_dmamap); |
D(ctrl_status_dmamap); |
D(ctrlq->ctrlq_status_dmamap); |
D(ctrl_cmd_dmamap); |
D(ctrlq->ctrlq_cmd_dmamap); |
for (i = 0; i < txqsize; i++) { |
for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) { |
D(tx_dmamaps[i]); |
rxq = &sc->sc_rxq[qid]; |
D(txhdr_dmamaps[i]); |
txq = &sc->sc_txq[qid]; |
} |
|
for (i = 0; i < rxqsize; i++) { |
for (i = 0; i < txq->txq_vq->vq_num; i++) { |
D(rx_dmamaps[i]); |
D(txq->txq_dmamaps[i]); |
D(rxhdr_dmamaps[i]); |
D(txq->txq_hdr_dmamaps[i]); |
|
} |
|
for (i = 0; i < rxq->rxq_vq->vq_num; i++) { |
|
D(rxq->rxq_dmamaps[i]); |
|
D(rxq->rxq_hdr_dmamaps[i]); |
|
} |
} |
} |
#undef D |
#undef D |
if (sc->sc_arrays) { |
if (sc->sc_kmem) { |
kmem_free(sc->sc_arrays, allocsize2); |
kmem_free(sc->sc_kmem, allocsize2); |
sc->sc_arrays = 0; |
sc->sc_kmem = NULL; |
} |
} |
bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_hdrs, allocsize); |
bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize); |
err_dmamem_alloc: |
err_dmamem_alloc: |
bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1); |
bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1); |
err_none: |
err_none: |
Line 510 vioif_attach(device_t parent, device_t s |
|
Line 680 vioif_attach(device_t parent, device_t s |
|
{ |
{ |
struct vioif_softc *sc = device_private(self); |
struct vioif_softc *sc = device_private(self); |
struct virtio_softc *vsc = device_private(parent); |
struct virtio_softc *vsc = device_private(parent); |
uint32_t features; |
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
struct vioif_txqueue *txq; |
|
struct vioif_rxqueue *rxq; |
|
uint32_t features, req_features; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
u_int flags; |
u_int softint_flags; |
int r, nvqs=0, req_flags; |
int r, i, nvqs=0, req_flags; |
|
|
if (virtio_child(vsc) != NULL) { |
if (virtio_child(vsc) != NULL) { |
aprint_normal(": child already attached for %s; " |
aprint_normal(": child already attached for %s; " |
Line 526 vioif_attach(device_t parent, device_t s |
|
Line 699 vioif_attach(device_t parent, device_t s |
|
sc->sc_virtio = vsc; |
sc->sc_virtio = vsc; |
sc->sc_link_active = false; |
sc->sc_link_active = false; |
|
|
|
sc->sc_max_nvq_pairs = 1; |
|
sc->sc_req_nvq_pairs = 1; |
|
sc->sc_act_nvq_pairs = 1; |
|
|
req_flags = 0; |
req_flags = 0; |
|
|
#ifdef VIOIF_MPSAFE |
#ifdef VIOIF_MPSAFE |
Line 536 vioif_attach(device_t parent, device_t s |
|
Line 713 vioif_attach(device_t parent, device_t s |
|
#endif |
#endif |
req_flags |= VIRTIO_F_PCI_INTR_MSIX; |
req_flags |= VIRTIO_F_PCI_INTR_MSIX; |
|
|
virtio_child_attach_start(vsc, self, IPL_NET, sc->sc_vq, |
req_features = |
|
VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ | |
|
VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY; |
|
#ifdef VIOIF_MULTIQ |
|
req_features |= VIRTIO_NET_F_MQ; |
|
#endif |
|
virtio_child_attach_start(vsc, self, IPL_NET, NULL, |
vioif_config_change, virtio_vq_intr, req_flags, |
vioif_config_change, virtio_vq_intr, req_flags, |
(VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ | |
req_features, VIRTIO_NET_FLAG_BITS); |
VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY), |
|
VIRTIO_NET_FLAG_BITS); |
|
|
|
features = virtio_features(vsc); |
features = virtio_features(vsc); |
|
|
Line 586 vioif_attach(device_t parent, device_t s |
|
Line 767 vioif_attach(device_t parent, device_t s |
|
|
|
aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac)); |
aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac)); |
|
|
mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET); |
if ((features & VIRTIO_NET_F_CTRL_VQ) && |
mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET); |
(features & VIRTIO_NET_F_CTRL_RX)) { |
sc->sc_stopping = false; |
sc->sc_has_ctrl = true; |
|
|
/* |
cv_init(&ctrlq->ctrlq_wait, "ctrl_vq"); |
* Allocating a virtqueue for Rx |
mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET); |
*/ |
ctrlq->ctrlq_inuse = FREE; |
r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_RX], VQ_RX, |
} else { |
MCLBYTES+sizeof(struct virtio_net_hdr), 2, "rx"); |
sc->sc_has_ctrl = false; |
|
} |
|
|
|
if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) { |
|
sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc, |
|
VIRTIO_NET_CONFIG_MAX_VQ_PAIRS); |
|
|
|
if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) |
|
goto err; |
|
|
|
/* Limit the number of queue pairs to use */ |
|
sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu); |
|
} |
|
|
|
r = vioif_alloc_queues(sc); |
if (r != 0) |
if (r != 0) |
goto err; |
goto err; |
nvqs = 1; |
|
sc->sc_vq[VQ_RX].vq_done = vioif_rx_vq_done; |
virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs); |
|
|
|
#ifdef VIOIF_MPSAFE |
|
softint_flags = SOFTINT_NET | SOFTINT_MPSAFE; |
|
#else |
|
softint_flags = SOFTINT_NET; |
|
#endif |
|
|
/* |
/* |
* Allocating a virtqueue for Tx |
* Allocating a virtqueues |
*/ |
*/ |
r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_TX], VQ_TX, |
for (i = 0; i < sc->sc_max_nvq_pairs; i++) { |
(sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)), |
rxq = &sc->sc_rxq[i]; |
VIRTIO_NET_TX_MAXNSEGS + 1, "tx"); |
txq = &sc->sc_txq[i]; |
if (r != 0) |
char qname[32]; |
goto err; |
|
nvqs = 2; |
rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
sc->sc_vq[VQ_TX].vq_done = vioif_tx_vq_done; |
|
|
rxq->rxq_softint = softint_establish(softint_flags, vioif_rx_softint, rxq); |
virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]); |
if (rxq->rxq_softint == NULL) { |
virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]); /* not urgent; do it later */ |
aprint_error_dev(self, "cannot establish rx softint\n"); |
|
goto err; |
|
} |
|
snprintf(qname, sizeof(qname), "rx%d", i); |
|
r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs, |
|
MCLBYTES+sizeof(struct virtio_net_hdr), nvqs, qname); |
|
if (r != 0) |
|
goto err; |
|
nvqs++; |
|
rxq->rxq_vq->vq_done = vioif_rx_vq_done; |
|
rxq->rxq_vq->vq_done_ctx = (void *)rxq; |
|
rxq->rxq_stopping = true; |
|
|
|
txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); |
|
txq->txq_deferred_transmit = softint_establish(softint_flags, |
|
vioif_deferred_transmit, txq); |
|
if (txq->txq_deferred_transmit == NULL) { |
|
aprint_error_dev(self, "cannot establish tx softint\n"); |
|
goto err; |
|
} |
|
snprintf(qname, sizeof(qname), "tx%d", i); |
|
r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs, |
|
(sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)), |
|
VIRTIO_NET_TX_MAXNSEGS + 1, qname); |
|
if (r != 0) |
|
goto err; |
|
nvqs++; |
|
txq->txq_vq->vq_done = vioif_tx_vq_done; |
|
txq->txq_vq->vq_done_ctx = (void *)txq; |
|
txq->txq_link_active = sc->sc_link_active; |
|
txq->txq_stopping = false; |
|
txq->txq_intrq = pcq_create(txq->txq_vq->vq_num, KM_NOSLEEP); |
|
if (txq->txq_intrq == NULL) |
|
goto err; |
|
} |
|
|
if ((features & VIRTIO_NET_F_CTRL_VQ) && |
if (sc->sc_has_ctrl) { |
(features & VIRTIO_NET_F_CTRL_RX)) { |
|
/* |
/* |
* Allocating a virtqueue for control channel |
* Allocating a virtqueue for control channel |
*/ |
*/ |
r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_CTRL], VQ_CTRL, |
r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, nvqs, |
NBPG, 1, "control"); |
NBPG, 1, "control"); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(self, "failed to allocate " |
aprint_error_dev(self, "failed to allocate " |
"a virtqueue for control channel\n"); |
"a virtqueue for control channel\n"); |
goto skip; |
|
} |
|
|
|
sc->sc_vq[VQ_CTRL].vq_done = vioif_ctrl_vq_done; |
|
cv_init(&sc->sc_ctrl_wait, "ctrl_vq"); |
|
mutex_init(&sc->sc_ctrl_wait_lock, MUTEX_DEFAULT, IPL_NET); |
|
sc->sc_ctrl_inuse = FREE; |
|
virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]); |
|
sc->sc_has_ctrl = true; |
|
nvqs = 3; |
|
} |
|
skip: |
|
|
|
#ifdef VIOIF_MPSAFE |
sc->sc_has_ctrl = false; |
flags = SOFTINT_NET | SOFTINT_MPSAFE; |
cv_destroy(&ctrlq->ctrlq_wait); |
#else |
mutex_destroy(&ctrlq->ctrlq_wait_lock); |
flags = SOFTINT_NET; |
} else { |
#endif |
nvqs++; |
sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc); |
ctrlq->ctrlq_vq->vq_done = vioif_ctrl_vq_done; |
if (sc->sc_rx_softint == NULL) { |
ctrlq->ctrlq_vq->vq_done_ctx = (void *) ctrlq; |
aprint_error_dev(self, "cannot establish rx softint\n"); |
} |
goto err; |
|
} |
} |
|
|
sc->sc_ctl_softint = softint_establish(flags, vioif_ctl_softint, sc); |
sc->sc_ctl_softint = softint_establish(softint_flags, vioif_ctl_softint, sc); |
if (sc->sc_ctl_softint == NULL) { |
if (sc->sc_ctl_softint == NULL) { |
aprint_error_dev(self, "cannot establish ctl softint\n"); |
aprint_error_dev(self, "cannot establish ctl softint\n"); |
goto err; |
goto err; |
|
|
strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); |
ifp->if_softc = sc; |
ifp->if_softc = sc; |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
|
#ifdef VIOIF_MPSAFE |
|
ifp->if_extflags = IFEF_MPSAFE; |
|
#endif |
ifp->if_start = vioif_start; |
ifp->if_start = vioif_start; |
|
if (sc->sc_req_nvq_pairs > 1) |
|
ifp->if_transmit = vioif_transmit; |
ifp->if_ioctl = vioif_ioctl; |
ifp->if_ioctl = vioif_ioctl; |
ifp->if_init = vioif_init; |
ifp->if_init = vioif_init; |
ifp->if_stop = vioif_stop; |
ifp->if_stop = vioif_stop; |
ifp->if_capabilities = 0; |
ifp->if_capabilities = 0; |
ifp->if_watchdog = vioif_watchdog; |
ifp->if_watchdog = vioif_watchdog; |
IFQ_SET_MAXLEN(&ifp->if_snd, MAX(sc->sc_vq[VQ_TX].vq_num, IFQ_MAXLEN)); |
txq = &sc->sc_txq[0]; |
|
IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN)); |
IFQ_SET_READY(&ifp->if_snd); |
IFQ_SET_READY(&ifp->if_snd); |
|
|
sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
|
|
return; |
return; |
|
|
err: |
err: |
mutex_destroy(&sc->sc_tx_lock); |
for (i = 0; i < sc->sc_max_nvq_pairs; i++) { |
mutex_destroy(&sc->sc_rx_lock); |
rxq = &sc->sc_rxq[i]; |
|
txq = &sc->sc_txq[i]; |
|
|
|
if (rxq->rxq_lock) { |
|
mutex_obj_free(rxq->rxq_lock); |
|
rxq->rxq_lock = NULL; |
|
} |
|
|
|
if (rxq->rxq_softint) { |
|
softint_disestablish(rxq->rxq_softint); |
|
rxq->rxq_softint = NULL; |
|
} |
|
|
|
if (txq->txq_lock) { |
|
mutex_obj_free(txq->txq_lock); |
|
txq->txq_lock = NULL; |
|
} |
|
|
|
if (txq->txq_deferred_transmit) { |
|
softint_disestablish(txq->txq_deferred_transmit); |
|
txq->txq_deferred_transmit = NULL; |
|
} |
|
|
|
if (txq->txq_intrq) { |
|
pcq_destroy(txq->txq_intrq); |
|
txq->txq_intrq = NULL; |
|
} |
|
} |
|
|
if (sc->sc_has_ctrl) { |
if (sc->sc_has_ctrl) { |
cv_destroy(&sc->sc_ctrl_wait); |
cv_destroy(&ctrlq->ctrlq_wait); |
mutex_destroy(&sc->sc_ctrl_wait_lock); |
mutex_destroy(&ctrlq->ctrlq_wait_lock); |
} |
} |
|
|
while (nvqs > 0) |
while (nvqs > 0) |
virtio_free_vq(vsc, &sc->sc_vq[--nvqs]); |
virtio_free_vq(vsc, &sc->sc_vqs[--nvqs]); |
|
|
|
vioif_free_queues(sc); |
|
|
virtio_child_attach_failed(vsc); |
virtio_child_attach_failed(vsc); |
return; |
return; |
Line 713 vioif_deferred_init(device_t self) |
|
Line 969 vioif_deferred_init(device_t self) |
|
"errror code %d\n", r); |
"errror code %d\n", r); |
} |
} |
|
|
|
static void |
|
vioif_enable_interrupt_vqpairs(struct vioif_softc *sc) |
|
{ |
|
struct virtio_softc *vsc = sc->sc_virtio; |
|
struct vioif_txqueue *txq; |
|
struct vioif_rxqueue *rxq; |
|
int i; |
|
|
|
for (i = 0; i < sc->sc_act_nvq_pairs; i++) { |
|
txq = &sc->sc_txq[i]; |
|
rxq = &sc->sc_rxq[i]; |
|
|
|
virtio_start_vq_intr(vsc, txq->txq_vq); |
|
virtio_start_vq_intr(vsc, rxq->rxq_vq); |
|
} |
|
} |
|
|
|
static void |
|
vioif_disable_interrupt_vqpairs(struct vioif_softc *sc) |
|
{ |
|
struct virtio_softc *vsc = sc->sc_virtio; |
|
struct vioif_txqueue *txq; |
|
struct vioif_rxqueue *rxq; |
|
int i; |
|
|
|
for (i = 0; i < sc->sc_act_nvq_pairs; i++) { |
|
txq = &sc->sc_txq[i]; |
|
rxq = &sc->sc_rxq[i]; |
|
|
|
virtio_stop_vq_intr(vsc, txq->txq_vq); |
|
virtio_stop_vq_intr(vsc, rxq->rxq_vq); |
|
} |
|
} |
|
|
/* |
/* |
* Interface functions for ifnet |
* Interface functions for ifnet |
*/ |
*/ |
Line 721 vioif_init(struct ifnet *ifp) |
|
Line 1011 vioif_init(struct ifnet *ifp) |
|
{ |
{ |
struct vioif_softc *sc = ifp->if_softc; |
struct vioif_softc *sc = ifp->if_softc; |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_softc *vsc = sc->sc_virtio; |
|
struct vioif_rxqueue *rxq; |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
int r, i; |
|
|
vioif_stop(ifp, 0); |
vioif_stop(ifp, 0); |
|
|
virtio_reinit_start(vsc); |
virtio_reinit_start(vsc); |
virtio_negotiate_features(vsc, virtio_features(vsc)); |
virtio_negotiate_features(vsc, virtio_features(vsc)); |
virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]); |
|
virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]); |
for (i = 0; i < sc->sc_req_nvq_pairs; i++) { |
if (sc->sc_has_ctrl) |
rxq = &sc->sc_rxq[i]; |
virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]); |
|
|
/* Have to set false before vioif_populate_rx_mbufs */ |
|
rxq->rxq_stopping = false; |
|
vioif_populate_rx_mbufs(rxq); |
|
} |
|
|
virtio_reinit_end(vsc); |
virtio_reinit_end(vsc); |
|
virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); |
|
|
|
r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs); |
|
if (r == 0) |
|
sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs; |
|
else |
|
sc->sc_act_nvq_pairs = 1; |
|
|
|
for (i = 0; i < sc->sc_act_nvq_pairs; i++) |
|
sc->sc_txq[i].txq_stopping = false; |
|
|
|
vioif_enable_interrupt_vqpairs(sc); |
|
|
if (!sc->sc_deferred_init_done) { |
if (!sc->sc_deferred_init_done) { |
sc->sc_deferred_init_done = 1; |
sc->sc_deferred_init_done = 1; |
Line 738 vioif_init(struct ifnet *ifp) |
|
Line 1048 vioif_init(struct ifnet *ifp) |
|
vioif_deferred_init(sc->sc_dev); |
vioif_deferred_init(sc->sc_dev); |
} |
} |
|
|
/* Have to set false before vioif_populate_rx_mbufs */ |
|
sc->sc_stopping = false; |
|
|
|
vioif_populate_rx_mbufs(sc); |
|
|
|
vioif_update_link_status(sc); |
vioif_update_link_status(sc); |
ifp->if_flags |= IFF_RUNNING; |
ifp->if_flags |= IFF_RUNNING; |
ifp->if_flags &= ~IFF_OACTIVE; |
ifp->if_flags &= ~IFF_OACTIVE; |
Line 756 vioif_stop(struct ifnet *ifp, int disabl |
|
Line 1061 vioif_stop(struct ifnet *ifp, int disabl |
|
{ |
{ |
struct vioif_softc *sc = ifp->if_softc; |
struct vioif_softc *sc = ifp->if_softc; |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_softc *vsc = sc->sc_virtio; |
|
struct vioif_txqueue *txq; |
|
struct vioif_rxqueue *rxq; |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
int i; |
|
|
/* Take the locks to ensure that ongoing TX/RX finish */ |
/* Take the locks to ensure that ongoing TX/RX finish */ |
VIOIF_TX_LOCK(sc); |
for (i = 0; i < sc->sc_act_nvq_pairs; i++) { |
VIOIF_RX_LOCK(sc); |
txq = &sc->sc_txq[i]; |
sc->sc_stopping = true; |
rxq = &sc->sc_rxq[i]; |
VIOIF_RX_UNLOCK(sc); |
|
VIOIF_TX_UNLOCK(sc); |
mutex_enter(txq->txq_lock); |
|
txq->txq_stopping = true; |
|
mutex_exit(txq->txq_lock); |
|
|
|
mutex_enter(rxq->rxq_lock); |
|
rxq->rxq_stopping = true; |
|
mutex_exit(rxq->rxq_lock); |
|
} |
|
|
/* disable interrupts */ |
/* disable interrupts */ |
virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_RX]); |
vioif_disable_interrupt_vqpairs(sc); |
virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]); |
|
if (sc->sc_has_ctrl) |
if (sc->sc_has_ctrl) |
virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]); |
virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq); |
|
|
/* only way to stop I/O and DMA is resetting... */ |
/* only way to stop I/O and DMA is resetting... */ |
virtio_reset(vsc); |
virtio_reset(vsc); |
vioif_rx_deq(sc); |
for (i = 0; i < sc->sc_act_nvq_pairs; i++) |
vioif_tx_drain(sc); |
vioif_rx_deq(&sc->sc_rxq[i]); |
|
|
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
sc->sc_link_active = false; |
sc->sc_link_active = false; |
|
|
if (disable) |
for (i = 0; i < sc->sc_act_nvq_pairs; i++) { |
vioif_rx_drain(sc); |
txq = &sc->sc_txq[i]; |
|
rxq = &sc->sc_rxq[i]; |
|
|
|
txq->txq_link_active = false; |
|
|
|
if (disable) |
|
vioif_rx_drain(rxq); |
|
|
|
vioif_tx_drain(txq); |
|
} |
} |
} |
|
|
static void |
static void |
vioif_start(struct ifnet *ifp) |
vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq, bool is_transmit) |
{ |
{ |
struct vioif_softc *sc = ifp->if_softc; |
struct vioif_softc *sc = ifp->if_softc; |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtqueue *vq = &sc->sc_vq[VQ_TX]; |
struct virtqueue *vq = txq->txq_vq; |
struct mbuf *m; |
struct mbuf *m; |
int queued = 0; |
int queued = 0; |
|
|
VIOIF_TX_LOCK(sc); |
KASSERT(mutex_owned(txq->txq_lock)); |
|
|
if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING || |
if ((ifp->if_flags & IFF_RUNNING) == 0) |
!sc->sc_link_active) |
return; |
goto out; |
|
|
|
if (sc->sc_stopping) |
if (!txq->txq_link_active || txq->txq_stopping) |
goto out; |
return; |
|
|
|
if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit) |
|
return; |
|
|
for (;;) { |
for (;;) { |
int slot, r; |
int slot, r; |
|
|
IFQ_DEQUEUE(&ifp->if_snd, m); |
if (is_transmit) |
|
m = pcq_get(txq->txq_intrq); |
|
else |
|
IFQ_DEQUEUE(&ifp->if_snd, m); |
|
|
if (m == NULL) |
if (m == NULL) |
break; |
break; |
|
|
Line 816 vioif_start(struct ifnet *ifp) |
|
Line 1148 vioif_start(struct ifnet *ifp) |
|
panic("enqueue_prep for a tx buffer"); |
panic("enqueue_prep for a tx buffer"); |
|
|
r = bus_dmamap_load_mbuf(virtio_dmat(vsc), |
r = bus_dmamap_load_mbuf(virtio_dmat(vsc), |
sc->sc_tx_dmamaps[slot], |
txq->txq_dmamaps[slot], |
m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
if (r != 0) { |
if (r != 0) { |
/* maybe just too fragmented */ |
/* maybe just too fragmented */ |
Line 831 vioif_start(struct ifnet *ifp) |
|
Line 1163 vioif_start(struct ifnet *ifp) |
|
|
|
m = newm; |
m = newm; |
r = bus_dmamap_load_mbuf(virtio_dmat(vsc), |
r = bus_dmamap_load_mbuf(virtio_dmat(vsc), |
sc->sc_tx_dmamaps[slot], |
txq->txq_dmamaps[slot], |
m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
|
|
|
|
/* This should actually never fail */ |
/* This should actually never fail */ |
r = virtio_enqueue_reserve(vsc, vq, slot, |
r = virtio_enqueue_reserve(vsc, vq, slot, |
sc->sc_tx_dmamaps[slot]->dm_nsegs + 1); |
txq->txq_dmamaps[slot]->dm_nsegs + 1); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
"virtio_enqueue_reserve failed, error code %d\n", |
"virtio_enqueue_reserve failed, error code %d\n", |
r); |
r); |
bus_dmamap_unload(virtio_dmat(vsc), |
bus_dmamap_unload(virtio_dmat(vsc), |
sc->sc_tx_dmamaps[slot]); |
txq->txq_dmamaps[slot]); |
/* slot already freed by virtio_enqueue_reserve */ |
/* slot already freed by virtio_enqueue_reserve */ |
m_freem(m); |
m_freem(m); |
continue; |
continue; |
} |
} |
|
|
sc->sc_tx_mbufs[slot] = m; |
txq->txq_mbufs[slot] = m; |
|
|
memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr)); |
memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr)); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot], |
0, sc->sc_tx_dmamaps[slot]->dm_mapsize, |
0, txq->txq_dmamaps[slot]->dm_mapsize, |
BUS_DMASYNC_PREWRITE); |
BUS_DMASYNC_PREWRITE); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot], |
0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize, |
0, txq->txq_hdr_dmamaps[slot]->dm_mapsize, |
BUS_DMASYNC_PREWRITE); |
BUS_DMASYNC_PREWRITE); |
virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true); |
virtio_enqueue(vsc, vq, slot, txq->txq_hdr_dmamaps[slot], true); |
virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true); |
virtio_enqueue(vsc, vq, slot, txq->txq_dmamaps[slot], true); |
virtio_enqueue_commit(vsc, vq, slot, false); |
virtio_enqueue_commit(vsc, vq, slot, false); |
|
|
queued++; |
queued++; |
|
|
virtio_enqueue_commit(vsc, vq, -1, true); |
virtio_enqueue_commit(vsc, vq, -1, true); |
ifp->if_timer = 5; |
ifp->if_timer = 5; |
} |
} |
|
} |
|
|
out: |
static void |
VIOIF_TX_UNLOCK(sc); |
vioif_start_locked(struct ifnet *ifp, struct vioif_txqueue *txq) |
|
{ |
|
|
|
/* |
|
* ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. |
|
*/ |
|
vioif_send_common_locked(ifp, txq, false); |
|
|
|
} |
|
|
|
static void |
|
vioif_start(struct ifnet *ifp) |
|
{ |
|
struct vioif_softc *sc = ifp->if_softc; |
|
struct vioif_txqueue *txq = &sc->sc_txq[0]; |
|
|
|
#ifdef VIOIF_MPSAFE |
|
KASSERT(if_is_mpsafe(ifp)); |
|
#endif |
|
|
|
mutex_enter(txq->txq_lock); |
|
if (!txq->txq_stopping) |
|
vioif_start_locked(ifp, txq); |
|
mutex_exit(txq->txq_lock); |
|
} |
|
|
|
static inline int |
|
vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m) |
|
{ |
|
struct vioif_softc *sc = ifp->if_softc; |
|
u_int cpuid = cpu_index(curcpu()); |
|
|
|
return cpuid % sc->sc_act_nvq_pairs; |
|
} |
|
|
|
static void |
|
vioif_transmit_locked(struct ifnet *ifp, struct vioif_txqueue *txq) |
|
{ |
|
|
|
vioif_send_common_locked(ifp, txq, true); |
|
} |
|
|
|
static int |
|
vioif_transmit(struct ifnet *ifp, struct mbuf *m) |
|
{ |
|
struct vioif_softc *sc = ifp->if_softc; |
|
struct vioif_txqueue *txq; |
|
int qid; |
|
|
|
qid = vioif_select_txqueue(ifp, m); |
|
txq = &sc->sc_txq[qid]; |
|
|
|
if (__predict_false(!pcq_put(txq->txq_intrq, m))) { |
|
m_freem(m); |
|
return ENOBUFS; |
|
} |
|
|
|
ifp->if_obytes += m->m_pkthdr.len; |
|
if (m->m_flags & M_MCAST) |
|
ifp->if_omcasts++; |
|
|
|
if (mutex_tryenter(txq->txq_lock)) { |
|
if (!txq->txq_stopping) |
|
vioif_transmit_locked(ifp, txq); |
|
mutex_exit(txq->txq_lock); |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
static void |
|
vioif_deferred_transmit(void *arg) |
|
{ |
|
struct vioif_txqueue *txq = arg; |
|
struct virtio_softc *vsc = txq->txq_vq->vq_owner; |
|
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
|
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
|
|
|
if (mutex_tryenter(txq->txq_lock)) { |
|
vioif_send_common_locked(ifp, txq, true); |
|
mutex_exit(txq->txq_lock); |
|
} |
} |
} |
|
|
static int |
static int |
|
|
vioif_watchdog(struct ifnet *ifp) |
vioif_watchdog(struct ifnet *ifp) |
{ |
{ |
struct vioif_softc *sc = ifp->if_softc; |
struct vioif_softc *sc = ifp->if_softc; |
|
int i; |
|
|
if (ifp->if_flags & IFF_RUNNING) |
if (ifp->if_flags & IFF_RUNNING) { |
vioif_tx_vq_done(&sc->sc_vq[VQ_TX]); |
for (i = 0; i < sc->sc_act_nvq_pairs; i++) |
|
vioif_tx_vq_done(sc->sc_txq[i].txq_vq); |
|
} |
} |
} |
|
|
|
|
Line 920 vioif_watchdog(struct ifnet *ifp) |
|
Line 1337 vioif_watchdog(struct ifnet *ifp) |
|
*/ |
*/ |
/* allocate and initialize a mbuf for receive */ |
/* allocate and initialize a mbuf for receive */ |
static int |
static int |
vioif_add_rx_mbuf(struct vioif_softc *sc, int i) |
vioif_add_rx_mbuf(struct vioif_rxqueue *rxq, int i) |
{ |
{ |
|
struct virtio_softc *vsc = rxq->rxq_vq->vq_owner; |
struct mbuf *m; |
struct mbuf *m; |
int r; |
int r; |
|
|
Line 933 vioif_add_rx_mbuf(struct vioif_softc *sc |
|
Line 1351 vioif_add_rx_mbuf(struct vioif_softc *sc |
|
m_freem(m); |
m_freem(m); |
return ENOBUFS; |
return ENOBUFS; |
} |
} |
sc->sc_rx_mbufs[i] = m; |
rxq->rxq_mbufs[i] = m; |
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
r = bus_dmamap_load_mbuf(virtio_dmat(sc->sc_virtio), |
r = bus_dmamap_load_mbuf(virtio_dmat(vsc), |
sc->sc_rx_dmamaps[i], |
rxq->rxq_dmamaps[i], |
m, BUS_DMA_READ|BUS_DMA_NOWAIT); |
m, BUS_DMA_READ|BUS_DMA_NOWAIT); |
if (r) { |
if (r) { |
m_freem(m); |
m_freem(m); |
sc->sc_rx_mbufs[i] = 0; |
rxq->rxq_mbufs[i] = NULL; |
return r; |
return r; |
} |
} |
|
|
Line 949 vioif_add_rx_mbuf(struct vioif_softc *sc |
|
Line 1367 vioif_add_rx_mbuf(struct vioif_softc *sc |
|
|
|
/* free a mbuf for receive */ |
/* free a mbuf for receive */ |
static void |
static void |
vioif_free_rx_mbuf(struct vioif_softc *sc, int i) |
vioif_free_rx_mbuf(struct vioif_rxqueue *rxq, int i) |
{ |
{ |
bus_dmamap_unload(virtio_dmat(sc->sc_virtio), sc->sc_rx_dmamaps[i]); |
struct virtio_softc *vsc = rxq->rxq_vq->vq_owner; |
m_freem(sc->sc_rx_mbufs[i]); |
|
sc->sc_rx_mbufs[i] = NULL; |
bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[i]); |
|
m_freem(rxq->rxq_mbufs[i]); |
|
rxq->rxq_mbufs[i] = NULL; |
} |
} |
|
|
/* add mbufs for all the empty receive slots */ |
/* add mbufs for all the empty receive slots */ |
static void |
static void |
vioif_populate_rx_mbufs(struct vioif_softc *sc) |
vioif_populate_rx_mbufs(struct vioif_rxqueue *rxq) |
{ |
{ |
VIOIF_RX_LOCK(sc); |
|
vioif_populate_rx_mbufs_locked(sc); |
mutex_enter(rxq->rxq_lock); |
VIOIF_RX_UNLOCK(sc); |
vioif_populate_rx_mbufs_locked(rxq); |
|
mutex_exit(rxq->rxq_lock); |
} |
} |
|
|
static void |
static void |
vioif_populate_rx_mbufs_locked(struct vioif_softc *sc) |
vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *rxq) |
{ |
{ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtqueue *vq = rxq->rxq_vq; |
|
struct virtio_softc *vsc = vq->vq_owner; |
|
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
int i, r, ndone = 0; |
int i, r, ndone = 0; |
struct virtqueue *vq = &sc->sc_vq[VQ_RX]; |
|
|
|
KASSERT(VIOIF_RX_LOCKED(sc)); |
KASSERT(mutex_owned(rxq->rxq_lock)); |
|
|
if (sc->sc_stopping) |
if (rxq->rxq_stopping) |
return; |
return; |
|
|
for (i = 0; i < vq->vq_num; i++) { |
for (i = 0; i < vq->vq_num; i++) { |
Line 984 vioif_populate_rx_mbufs_locked(struct vi |
|
Line 1406 vioif_populate_rx_mbufs_locked(struct vi |
|
break; |
break; |
if (r != 0) |
if (r != 0) |
panic("enqueue_prep for rx buffers"); |
panic("enqueue_prep for rx buffers"); |
if (sc->sc_rx_mbufs[slot] == NULL) { |
if (rxq->rxq_mbufs[slot] == NULL) { |
r = vioif_add_rx_mbuf(sc, slot); |
r = vioif_add_rx_mbuf(rxq, slot); |
if (r != 0) { |
if (r != 0) { |
printf("%s: rx mbuf allocation failed, " |
printf("%s: rx mbuf allocation failed, " |
"error code %d\n", |
"error code %d\n", |
Line 994 vioif_populate_rx_mbufs_locked(struct vi |
|
Line 1416 vioif_populate_rx_mbufs_locked(struct vi |
|
} |
} |
} |
} |
r = virtio_enqueue_reserve(vsc, vq, slot, |
r = virtio_enqueue_reserve(vsc, vq, slot, |
sc->sc_rx_dmamaps[slot]->dm_nsegs + 1); |
rxq->rxq_dmamaps[slot]->dm_nsegs + 1); |
if (r != 0) { |
if (r != 0) { |
vioif_free_rx_mbuf(sc, slot); |
vioif_free_rx_mbuf(rxq, slot); |
break; |
break; |
} |
} |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot], |
0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD); |
0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot], |
0, MCLBYTES, BUS_DMASYNC_PREREAD); |
0, MCLBYTES, BUS_DMASYNC_PREREAD); |
virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false); |
virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot], false); |
virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false); |
virtio_enqueue(vsc, vq, slot, rxq->rxq_dmamaps[slot], false); |
virtio_enqueue_commit(vsc, vq, slot, false); |
virtio_enqueue_commit(vsc, vq, slot, false); |
ndone++; |
ndone++; |
} |
} |
Line 1014 vioif_populate_rx_mbufs_locked(struct vi |
|
Line 1436 vioif_populate_rx_mbufs_locked(struct vi |
|
|
|
/* dequeue received packets */ |
/* dequeue received packets */ |
static int |
static int |
vioif_rx_deq(struct vioif_softc *sc) |
vioif_rx_deq(struct vioif_rxqueue *rxq) |
{ |
{ |
int r; |
int r; |
|
|
KASSERT(sc->sc_stopping); |
KASSERT(rxq->rxq_stopping); |
|
|
VIOIF_RX_LOCK(sc); |
mutex_enter(rxq->rxq_lock); |
r = vioif_rx_deq_locked(sc); |
r = vioif_rx_deq_locked(rxq); |
VIOIF_RX_UNLOCK(sc); |
mutex_exit(rxq->rxq_lock); |
|
|
return r; |
return r; |
} |
} |
|
|
/* dequeue received packets */ |
/* dequeue received packets */ |
static int |
static int |
vioif_rx_deq_locked(struct vioif_softc *sc) |
vioif_rx_deq_locked(struct vioif_rxqueue *rxq) |
{ |
{ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtqueue *vq = rxq->rxq_vq; |
struct virtqueue *vq = &sc->sc_vq[VQ_RX]; |
struct virtio_softc *vsc = vq->vq_owner; |
|
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct mbuf *m; |
struct mbuf *m; |
int r = 0; |
int r = 0; |
int slot, len; |
int slot, len; |
|
|
KASSERT(VIOIF_RX_LOCKED(sc)); |
KASSERT(mutex_owned(rxq->rxq_lock)); |
|
|
while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { |
while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { |
len -= sizeof(struct virtio_net_hdr); |
len -= sizeof(struct virtio_net_hdr); |
r = 1; |
r = 1; |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot], |
0, sizeof(struct virtio_net_hdr), |
0, sizeof(struct virtio_net_hdr), |
BUS_DMASYNC_POSTREAD); |
BUS_DMASYNC_POSTREAD); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot], |
0, MCLBYTES, |
0, MCLBYTES, |
BUS_DMASYNC_POSTREAD); |
BUS_DMASYNC_POSTREAD); |
m = sc->sc_rx_mbufs[slot]; |
m = rxq->rxq_mbufs[slot]; |
KASSERT(m != NULL); |
KASSERT(m != NULL); |
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot]); |
bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[slot]); |
sc->sc_rx_mbufs[slot] = 0; |
rxq->rxq_mbufs[slot] = NULL; |
virtio_dequeue_commit(vsc, vq, slot); |
virtio_dequeue_commit(vsc, vq, slot); |
m_set_rcvif(m, ifp); |
m_set_rcvif(m, ifp); |
m->m_len = m->m_pkthdr.len = len; |
m->m_len = m->m_pkthdr.len = len; |
|
|
VIOIF_RX_UNLOCK(sc); |
mutex_exit(rxq->rxq_lock); |
if_percpuq_enqueue(ifp->if_percpuq, m); |
if_percpuq_enqueue(ifp->if_percpuq, m); |
VIOIF_RX_LOCK(sc); |
mutex_enter(rxq->rxq_lock); |
|
|
if (sc->sc_stopping) |
if (rxq->rxq_stopping) |
break; |
break; |
} |
} |
|
|
Line 1072 vioif_rx_deq_locked(struct vioif_softc * |
|
Line 1495 vioif_rx_deq_locked(struct vioif_softc * |
|
static int |
static int |
vioif_rx_vq_done(struct virtqueue *vq) |
vioif_rx_vq_done(struct virtqueue *vq) |
{ |
{ |
struct virtio_softc *vsc = vq->vq_owner; |
struct vioif_rxqueue *rxq = vq->vq_done_ctx; |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
|
int r = 0; |
int r = 0; |
|
|
#ifdef VIOIF_SOFTINT_INTR |
#ifdef VIOIF_SOFTINT_INTR |
KASSERT(!cpu_intr_p()); |
KASSERT(!cpu_intr_p()); |
#endif |
#endif |
|
|
VIOIF_RX_LOCK(sc); |
mutex_enter(rxq->rxq_lock); |
|
|
if (sc->sc_stopping) |
if (rxq->rxq_stopping) |
goto out; |
goto out; |
|
|
r = vioif_rx_deq_locked(sc); |
r = vioif_rx_deq_locked(rxq); |
if (r) |
if (r) |
#ifdef VIOIF_SOFTINT_INTR |
#ifdef VIOIF_SOFTINT_INTR |
vioif_populate_rx_mbufs_locked(sc); |
vioif_populate_rx_mbufs_locked(rxq); |
#else |
#else |
softint_schedule(sc->sc_rx_softint); |
softint_schedule(rxq->rxq_softint); |
#endif |
#endif |
|
|
out: |
out: |
VIOIF_RX_UNLOCK(sc); |
mutex_exit(rxq->rxq_lock); |
return r; |
return r; |
} |
} |
|
|
|
|
static void |
static void |
vioif_rx_softint(void *arg) |
vioif_rx_softint(void *arg) |
{ |
{ |
struct vioif_softc *sc = arg; |
struct vioif_rxqueue *rxq = arg; |
|
|
vioif_populate_rx_mbufs(sc); |
vioif_populate_rx_mbufs(rxq); |
} |
} |
|
|
/* free all the mbufs; called from if_stop(disable) */ |
/* free all the mbufs; called from if_stop(disable) */ |
static void |
static void |
vioif_rx_drain(struct vioif_softc *sc) |
vioif_rx_drain(struct vioif_rxqueue *rxq) |
{ |
{ |
struct virtqueue *vq = &sc->sc_vq[VQ_RX]; |
struct virtqueue *vq = rxq->rxq_vq; |
int i; |
int i; |
|
|
for (i = 0; i < vq->vq_num; i++) { |
for (i = 0; i < vq->vq_num; i++) { |
if (sc->sc_rx_mbufs[i] == NULL) |
if (rxq->rxq_mbufs[i] == NULL) |
continue; |
continue; |
vioif_free_rx_mbuf(sc, i); |
vioif_free_rx_mbuf(rxq, i); |
} |
} |
} |
} |
|
|
Line 1137 vioif_tx_vq_done(struct virtqueue *vq) |
|
Line 1559 vioif_tx_vq_done(struct virtqueue *vq) |
|
struct virtio_softc *vsc = vq->vq_owner; |
struct virtio_softc *vsc = vq->vq_owner; |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
|
struct vioif_txqueue *txq = vq->vq_done_ctx; |
int r = 0; |
int r = 0; |
|
|
VIOIF_TX_LOCK(sc); |
mutex_enter(txq->txq_lock); |
|
|
if (sc->sc_stopping) |
if (txq->txq_stopping) |
goto out; |
goto out; |
|
|
r = vioif_tx_vq_done_locked(vq); |
r = vioif_tx_vq_done_locked(vq); |
|
|
out: |
out: |
VIOIF_TX_UNLOCK(sc); |
mutex_exit(txq->txq_lock); |
if (r) |
if (r) { |
if_schedule_deferred_start(ifp); |
if_schedule_deferred_start(ifp); |
|
|
|
KASSERT(txq->txq_deferred_transmit != NULL); |
|
softint_schedule(txq->txq_deferred_transmit); |
|
} |
return r; |
return r; |
} |
} |
|
|
Line 1158 vioif_tx_vq_done_locked(struct virtqueue |
|
Line 1585 vioif_tx_vq_done_locked(struct virtqueue |
|
{ |
{ |
struct virtio_softc *vsc = vq->vq_owner; |
struct virtio_softc *vsc = vq->vq_owner; |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
|
struct vioif_txqueue *txq = vq->vq_done_ctx; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct mbuf *m; |
struct mbuf *m; |
int r = 0; |
int r = 0; |
int slot, len; |
int slot, len; |
|
|
KASSERT(VIOIF_TX_LOCKED(sc)); |
KASSERT(mutex_owned(txq->txq_lock)); |
|
|
while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { |
while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { |
r++; |
r++; |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot], |
0, sizeof(struct virtio_net_hdr), |
0, sizeof(struct virtio_net_hdr), |
BUS_DMASYNC_POSTWRITE); |
BUS_DMASYNC_POSTWRITE); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot], |
bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot], |
0, sc->sc_tx_dmamaps[slot]->dm_mapsize, |
0, txq->txq_dmamaps[slot]->dm_mapsize, |
BUS_DMASYNC_POSTWRITE); |
BUS_DMASYNC_POSTWRITE); |
m = sc->sc_tx_mbufs[slot]; |
m = txq->txq_mbufs[slot]; |
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot]); |
bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[slot]); |
sc->sc_tx_mbufs[slot] = 0; |
txq->txq_mbufs[slot] = NULL; |
virtio_dequeue_commit(vsc, vq, slot); |
virtio_dequeue_commit(vsc, vq, slot); |
ifp->if_opackets++; |
ifp->if_opackets++; |
m_freem(m); |
m_freem(m); |
Line 1188 vioif_tx_vq_done_locked(struct virtqueue |
|
Line 1616 vioif_tx_vq_done_locked(struct virtqueue |
|
|
|
/* free all the mbufs already put on vq; called from if_stop(disable) */ |
/* free all the mbufs already put on vq; called from if_stop(disable) */ |
static void |
static void |
vioif_tx_drain(struct vioif_softc *sc) |
vioif_tx_drain(struct vioif_txqueue *txq) |
{ |
{ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtqueue *vq = txq->txq_vq; |
struct virtqueue *vq = &sc->sc_vq[VQ_TX]; |
struct virtio_softc *vsc = vq->vq_owner; |
int i; |
int i; |
|
|
KASSERT(sc->sc_stopping); |
KASSERT(txq->txq_stopping); |
|
|
for (i = 0; i < vq->vq_num; i++) { |
for (i = 0; i < vq->vq_num; i++) { |
if (sc->sc_tx_mbufs[i] == NULL) |
if (txq->txq_mbufs[i] == NULL) |
continue; |
continue; |
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[i]); |
bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[i]); |
m_freem(sc->sc_tx_mbufs[i]); |
m_freem(txq->txq_mbufs[i]); |
sc->sc_tx_mbufs[i] = NULL; |
txq->txq_mbufs[i] = NULL; |
} |
} |
} |
} |
|
|
Line 1209 vioif_tx_drain(struct vioif_softc *sc) |
|
Line 1637 vioif_tx_drain(struct vioif_softc *sc) |
|
* Control vq |
* Control vq |
*/ |
*/ |
/* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ |
/* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ |
|
static void |
|
vioif_ctrl_acquire(struct vioif_softc *sc) |
|
{ |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
|
|
mutex_enter(&ctrlq->ctrlq_wait_lock); |
|
while (ctrlq->ctrlq_inuse != FREE) |
|
cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); |
|
ctrlq->ctrlq_inuse = INUSE; |
|
ctrlq->ctrlq_owner = curlwp; |
|
mutex_exit(&ctrlq->ctrlq_wait_lock); |
|
} |
|
|
|
static void |
|
vioif_ctrl_release(struct vioif_softc *sc) |
|
{ |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
|
|
KASSERT(ctrlq->ctrlq_inuse != FREE); |
|
KASSERT(ctrlq->ctrlq_owner == curlwp); |
|
|
|
mutex_enter(&ctrlq->ctrlq_wait_lock); |
|
ctrlq->ctrlq_inuse = FREE; |
|
ctrlq->ctrlq_owner = NULL; |
|
cv_signal(&ctrlq->ctrlq_wait); |
|
mutex_exit(&ctrlq->ctrlq_wait_lock); |
|
} |
|
|
static int |
static int |
vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) |
vioif_ctrl_load_cmdspec(struct vioif_softc *sc, |
|
struct vioif_ctrl_cmdspec *specs, int nspecs) |
{ |
{ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtqueue *vq = &sc->sc_vq[VQ_CTRL]; |
int i, r, loaded; |
int r, slot; |
|
|
|
if (!sc->sc_has_ctrl) |
loaded = 0; |
return ENOTSUP; |
for (i = 0; i < nspecs; i++) { |
|
r = bus_dmamap_load(virtio_dmat(vsc), |
|
specs[i].dmamap, specs[i].buf, specs[i].bufsize, |
|
NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
|
if (r) { |
|
printf("%s: control command dmamap load failed, " |
|
"error code %d\n", device_xname(sc->sc_dev), r); |
|
goto err; |
|
} |
|
loaded++; |
|
|
|
} |
|
|
|
return r; |
|
|
|
err: |
|
for (i = 0; i < loaded; i++) { |
|
bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); |
|
} |
|
|
|
return r; |
|
} |
|
|
|
static void |
|
vioif_ctrl_unload_cmdspec(struct vioif_softc *sc, |
|
struct vioif_ctrl_cmdspec *specs, int nspecs) |
|
{ |
|
struct virtio_softc *vsc = sc->sc_virtio; |
|
int i; |
|
|
mutex_enter(&sc->sc_ctrl_wait_lock); |
for (i = 0; i < nspecs; i++) { |
while (sc->sc_ctrl_inuse != FREE) |
bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); |
cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); |
} |
sc->sc_ctrl_inuse = INUSE; |
} |
mutex_exit(&sc->sc_ctrl_wait_lock); |
|
|
|
sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX; |
|
sc->sc_ctrl_cmd->command = cmd; |
|
sc->sc_ctrl_rx->onoff = onoff; |
|
|
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, |
static int |
|
vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd, |
|
struct vioif_ctrl_cmdspec *specs, int nspecs) |
|
{ |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
|
struct virtqueue *vq = ctrlq->ctrlq_vq; |
|
struct virtio_softc *vsc = sc->sc_virtio; |
|
int i, r, slot; |
|
|
|
ctrlq->ctrlq_cmd->class = class; |
|
ctrlq->ctrlq_cmd->command = cmd; |
|
|
|
bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, |
0, sizeof(struct virtio_net_ctrl_cmd), |
0, sizeof(struct virtio_net_ctrl_cmd), |
BUS_DMASYNC_PREWRITE); |
BUS_DMASYNC_PREWRITE); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap, |
for (i = 0; i < nspecs; i++) { |
0, sizeof(struct virtio_net_ctrl_rx), |
bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, |
BUS_DMASYNC_PREWRITE); |
0, specs[i].bufsize, |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, |
BUS_DMASYNC_PREWRITE); |
|
} |
|
bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, |
0, sizeof(struct virtio_net_ctrl_status), |
0, sizeof(struct virtio_net_ctrl_status), |
BUS_DMASYNC_PREREAD); |
BUS_DMASYNC_PREREAD); |
|
|
r = virtio_enqueue_prep(vsc, vq, &slot); |
r = virtio_enqueue_prep(vsc, vq, &slot); |
if (r != 0) |
if (r != 0) |
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
r = virtio_enqueue_reserve(vsc, vq, slot, 3); |
r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2); |
if (r != 0) |
if (r != 0) |
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); |
virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true); |
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true); |
for (i = 0; i < nspecs; i++) { |
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); |
virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true); |
|
} |
|
virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false); |
virtio_enqueue_commit(vsc, vq, slot, true); |
virtio_enqueue_commit(vsc, vq, slot, true); |
|
|
/* wait for done */ |
/* wait for done */ |
mutex_enter(&sc->sc_ctrl_wait_lock); |
mutex_enter(&ctrlq->ctrlq_wait_lock); |
while (sc->sc_ctrl_inuse != DONE) |
while (ctrlq->ctrlq_inuse != DONE) |
cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); |
cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); |
mutex_exit(&sc->sc_ctrl_wait_lock); |
mutex_exit(&ctrlq->ctrlq_wait_lock); |
/* already dequeueued */ |
/* already dequeueued */ |
|
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0, |
bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0, |
sizeof(struct virtio_net_ctrl_cmd), |
sizeof(struct virtio_net_ctrl_cmd), |
BUS_DMASYNC_POSTWRITE); |
BUS_DMASYNC_POSTWRITE); |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap, 0, |
for (i = 0; i < nspecs; i++) { |
sizeof(struct virtio_net_ctrl_rx), |
bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0, |
BUS_DMASYNC_POSTWRITE); |
specs[i].bufsize, |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0, |
BUS_DMASYNC_POSTWRITE); |
|
} |
|
bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0, |
sizeof(struct virtio_net_ctrl_status), |
sizeof(struct virtio_net_ctrl_status), |
BUS_DMASYNC_POSTREAD); |
BUS_DMASYNC_POSTREAD); |
|
|
if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) |
if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK) |
r = 0; |
r = 0; |
else { |
else { |
printf("%s: failed setting rx mode\n", |
printf("%s: failed setting rx mode\n", |
Line 1275 vioif_ctrl_rx(struct vioif_softc *sc, in |
|
Line 1772 vioif_ctrl_rx(struct vioif_softc *sc, in |
|
r = EIO; |
r = EIO; |
} |
} |
|
|
mutex_enter(&sc->sc_ctrl_wait_lock); |
return r; |
sc->sc_ctrl_inuse = FREE; |
} |
cv_signal(&sc->sc_ctrl_wait); |
|
mutex_exit(&sc->sc_ctrl_wait_lock); |
static int |
|
vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) |
|
{ |
|
struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx; |
|
struct vioif_ctrl_cmdspec specs[1]; |
|
int r; |
|
|
|
if (!sc->sc_has_ctrl) |
|
return ENOTSUP; |
|
|
|
vioif_ctrl_acquire(sc); |
|
|
|
rx->onoff = onoff; |
|
specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap; |
|
specs[0].buf = rx; |
|
specs[0].bufsize = sizeof(*rx); |
|
|
|
r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd, |
|
specs, __arraycount(specs)); |
|
|
|
vioif_ctrl_release(sc); |
return r; |
return r; |
} |
} |
|
|
Line 1307 vioif_set_allmulti(struct vioif_softc *s |
|
Line 1823 vioif_set_allmulti(struct vioif_softc *s |
|
static int |
static int |
vioif_set_rx_filter(struct vioif_softc *sc) |
vioif_set_rx_filter(struct vioif_softc *sc) |
{ |
{ |
/* filter already set in sc_ctrl_mac_tbl */ |
/* filter already set in ctrlq->ctrlq_mac_tbl */ |
struct virtio_softc *vsc = sc->sc_virtio; |
struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc; |
struct virtqueue *vq = &sc->sc_vq[VQ_CTRL]; |
struct vioif_ctrl_cmdspec specs[2]; |
int r, slot; |
int nspecs = __arraycount(specs); |
|
int r; |
|
|
|
mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc; |
|
mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc; |
|
|
if (!sc->sc_has_ctrl) |
if (!sc->sc_has_ctrl) |
return ENOTSUP; |
return ENOTSUP; |
|
|
mutex_enter(&sc->sc_ctrl_wait_lock); |
vioif_ctrl_acquire(sc); |
while (sc->sc_ctrl_inuse != FREE) |
|
cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); |
|
sc->sc_ctrl_inuse = INUSE; |
|
mutex_exit(&sc->sc_ctrl_wait_lock); |
|
|
|
sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC; |
|
sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET; |
|
|
|
r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, |
|
sc->sc_ctrl_mac_tbl_uc, |
|
(sizeof(struct virtio_net_ctrl_mac_tbl) |
|
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), |
|
NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
|
if (r) { |
|
printf("%s: control command dmamap load failed, " |
|
"error code %d\n", device_xname(sc->sc_dev), r); |
|
goto out; |
|
} |
|
r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, |
|
sc->sc_ctrl_mac_tbl_mc, |
|
(sizeof(struct virtio_net_ctrl_mac_tbl) |
|
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), |
|
NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
|
if (r) { |
|
printf("%s: control command dmamap load failed, " |
|
"error code %d\n", device_xname(sc->sc_dev), r); |
|
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap); |
|
goto out; |
|
} |
|
|
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, |
specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap; |
0, sizeof(struct virtio_net_ctrl_cmd), |
specs[0].buf = mac_tbl_uc; |
BUS_DMASYNC_PREWRITE); |
specs[0].bufsize = sizeof(*mac_tbl_uc) |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0, |
+ (ETHER_ADDR_LEN * mac_tbl_uc->nentries); |
(sizeof(struct virtio_net_ctrl_mac_tbl) |
|
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), |
specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap; |
BUS_DMASYNC_PREWRITE); |
specs[1].buf = mac_tbl_mc; |
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0, |
specs[1].bufsize = sizeof(*mac_tbl_mc) |
(sizeof(struct virtio_net_ctrl_mac_tbl) |
+ (ETHER_ADDR_LEN * mac_tbl_mc->nentries); |
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), |
|
BUS_DMASYNC_PREWRITE); |
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, |
|
0, sizeof(struct virtio_net_ctrl_status), |
|
BUS_DMASYNC_PREREAD); |
|
|
|
r = virtio_enqueue_prep(vsc, vq, &slot); |
r = vioif_ctrl_load_cmdspec(sc, specs, nspecs); |
if (r != 0) |
|
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
|
r = virtio_enqueue_reserve(vsc, vq, slot, 4); |
|
if (r != 0) |
if (r != 0) |
panic("%s: control vq busy!?", device_xname(sc->sc_dev)); |
goto out; |
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true); |
|
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true); |
|
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true); |
|
virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false); |
|
virtio_enqueue_commit(vsc, vq, slot, true); |
|
|
|
/* wait for done */ |
r = vioif_ctrl_send_command(sc, |
mutex_enter(&sc->sc_ctrl_wait_lock); |
VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, |
while (sc->sc_ctrl_inuse != DONE) |
specs, nspecs); |
cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock); |
|
mutex_exit(&sc->sc_ctrl_wait_lock); |
|
/* already dequeueued */ |
|
|
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0, |
vioif_ctrl_unload_cmdspec(sc, specs, nspecs); |
sizeof(struct virtio_net_ctrl_cmd), |
|
BUS_DMASYNC_POSTWRITE); |
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0, |
|
(sizeof(struct virtio_net_ctrl_mac_tbl) |
|
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries), |
|
BUS_DMASYNC_POSTWRITE); |
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0, |
|
(sizeof(struct virtio_net_ctrl_mac_tbl) |
|
+ ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries), |
|
BUS_DMASYNC_POSTWRITE); |
|
bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0, |
|
sizeof(struct virtio_net_ctrl_status), |
|
BUS_DMASYNC_POSTREAD); |
|
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap); |
|
bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap); |
|
|
|
if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK) |
|
r = 0; |
|
else { |
|
printf("%s: failed setting rx filter\n", |
|
device_xname(sc->sc_dev)); |
|
r = EIO; |
|
} |
|
|
|
out: |
out: |
mutex_enter(&sc->sc_ctrl_wait_lock); |
vioif_ctrl_release(sc); |
sc->sc_ctrl_inuse = FREE; |
|
cv_signal(&sc->sc_ctrl_wait); |
return r; |
mutex_exit(&sc->sc_ctrl_wait_lock); |
} |
|
|
|
static int |
|
vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs) |
|
{ |
|
struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq; |
|
struct vioif_ctrl_cmdspec specs[1]; |
|
int r; |
|
|
|
if (!sc->sc_has_ctrl) |
|
return ENOTSUP; |
|
|
|
if (nvq_pairs <= 1) |
|
return EINVAL; |
|
|
|
vioif_ctrl_acquire(sc); |
|
|
|
mq->virtqueue_pairs = nvq_pairs; |
|
specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap; |
|
specs[0].buf = mq; |
|
specs[0].bufsize = sizeof(*mq); |
|
|
|
r = vioif_ctrl_send_command(sc, |
|
VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, |
|
specs, __arraycount(specs)); |
|
|
|
vioif_ctrl_release(sc); |
|
|
return r; |
return r; |
} |
} |
|
|
vioif_ctrl_vq_done(struct virtqueue *vq) |
vioif_ctrl_vq_done(struct virtqueue *vq) |
{ |
{ |
struct virtio_softc *vsc = vq->vq_owner; |
struct virtio_softc *vsc = vq->vq_owner; |
struct vioif_softc *sc = device_private(virtio_child(vsc)); |
struct vioif_ctrlqueue *ctrlq = vq->vq_done_ctx; |
int r, slot; |
int r, slot; |
|
|
r = virtio_dequeue(vsc, vq, &slot, NULL); |
r = virtio_dequeue(vsc, vq, &slot, NULL); |
Line 1427 vioif_ctrl_vq_done(struct virtqueue *vq) |
|
Line 1905 vioif_ctrl_vq_done(struct virtqueue *vq) |
|
return 0; |
return 0; |
virtio_dequeue_commit(vsc, vq, slot); |
virtio_dequeue_commit(vsc, vq, slot); |
|
|
mutex_enter(&sc->sc_ctrl_wait_lock); |
mutex_enter(&ctrlq->ctrlq_wait_lock); |
sc->sc_ctrl_inuse = DONE; |
ctrlq->ctrlq_inuse = DONE; |
cv_signal(&sc->sc_ctrl_wait); |
cv_signal(&ctrlq->ctrlq_wait); |
mutex_exit(&sc->sc_ctrl_wait_lock); |
mutex_exit(&ctrlq->ctrlq_wait_lock); |
|
|
return 1; |
return 1; |
} |
} |
Line 1450 vioif_rx_filter(struct vioif_softc *sc) |
|
Line 1928 vioif_rx_filter(struct vioif_softc *sc) |
|
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ether_multi *enm; |
struct ether_multi *enm; |
struct ether_multistep step; |
struct ether_multistep step; |
|
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; |
int nentries; |
int nentries; |
int promisc = 0, allmulti = 0, rxfilter = 0; |
int promisc = 0, allmulti = 0, rxfilter = 0; |
int r; |
int r; |
Line 1477 vioif_rx_filter(struct vioif_softc *sc) |
|
Line 1956 vioif_rx_filter(struct vioif_softc *sc) |
|
allmulti = 1; |
allmulti = 1; |
goto set_unlock; |
goto set_unlock; |
} |
} |
memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries], |
memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries], |
enm->enm_addrlo, ETHER_ADDR_LEN); |
enm->enm_addrlo, ETHER_ADDR_LEN); |
ETHER_NEXT_MULTI(step, enm); |
ETHER_NEXT_MULTI(step, enm); |
} |
} |
|
|
|
|
set: |
set: |
if (rxfilter) { |
if (rxfilter) { |
sc->sc_ctrl_mac_tbl_uc->nentries = 0; |
ctrlq->ctrlq_mac_tbl_uc->nentries = 0; |
sc->sc_ctrl_mac_tbl_mc->nentries = nentries; |
ctrlq->ctrlq_mac_tbl_mc->nentries = nentries; |
r = vioif_set_rx_filter(sc); |
r = vioif_set_rx_filter(sc); |
if (r != 0) { |
if (r != 0) { |
rxfilter = 0; |
rxfilter = 0; |
|
|
} |
} |
} else { |
} else { |
/* remove rx filter */ |
/* remove rx filter */ |
sc->sc_ctrl_mac_tbl_uc->nentries = 0; |
ctrlq->ctrlq_mac_tbl_uc->nentries = 0; |
sc->sc_ctrl_mac_tbl_mc->nentries = 0; |
ctrlq->ctrlq_mac_tbl_mc->nentries = 0; |
r = vioif_set_rx_filter(sc); |
r = vioif_set_rx_filter(sc); |
/* what to do on failure? */ |
/* what to do on failure? */ |
} |
} |
|
|
vioif_update_link_status(struct vioif_softc *sc) |
vioif_update_link_status(struct vioif_softc *sc) |
{ |
{ |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
|
struct vioif_txqueue *txq; |
bool active, changed; |
bool active, changed; |
int link; |
int link, i; |
|
|
active = vioif_is_link_up(sc); |
active = vioif_is_link_up(sc); |
changed = false; |
changed = false; |
|
|
VIOIF_TX_LOCK(sc); |
|
if (active) { |
if (active) { |
if (!sc->sc_link_active) |
if (!sc->sc_link_active) |
changed = true; |
changed = true; |
Line 1561 vioif_update_link_status(struct vioif_so |
|
Line 2040 vioif_update_link_status(struct vioif_so |
|
link = LINK_STATE_DOWN; |
link = LINK_STATE_DOWN; |
sc->sc_link_active = false; |
sc->sc_link_active = false; |
} |
} |
VIOIF_TX_UNLOCK(sc); |
|
|
|
if (changed) |
if (changed) { |
|
for (i = 0; i < sc->sc_act_nvq_pairs; i++) { |
|
txq = &sc->sc_txq[i]; |
|
|
|
mutex_enter(txq->txq_lock); |
|
txq->txq_link_active = sc->sc_link_active; |
|
mutex_exit(txq->txq_lock); |
|
} |
|
|
if_link_state_change(ifp, link); |
if_link_state_change(ifp, link); |
|
} |
} |
} |
|
|
static int |
static int |