version 1.42, 2020/09/17 17:09:59 |
version 1.42.2.1, 2021/04/03 22:28:49 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/* |
/* |
|
* Copyright (c) 2020 The NetBSD Foundation, Inc. |
|
* Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg. |
* Copyright (c) 2010 Minoura Makoto. |
* Copyright (c) 2010 Minoura Makoto. |
* All rights reserved. |
* All rights reserved. |
* |
* |
Line 44 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ |
#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ |
|
|
|
/* incomplete list */ |
|
static const char *virtio_device_name[] = { |
|
"unknown (0)", /* 0 */ |
|
"network", /* 1 */ |
|
"block", /* 2 */ |
|
"console", /* 3 */ |
|
"entropy", /* 4 */ |
|
"memory balloon", /* 5 */ |
|
"I/O memory", /* 6 */ |
|
"remote processor messaging", /* 7 */ |
|
"SCSI", /* 8 */ |
|
"9P transport", /* 9 */ |
|
}; |
|
#define NDEVNAMES __arraycount(virtio_device_name) |
|
|
static void virtio_init_vq(struct virtio_softc *, |
static void virtio_init_vq(struct virtio_softc *, |
struct virtqueue *, const bool); |
struct virtqueue *, const bool); |
|
|
Line 93 virtio_reinit_start(struct virtio_softc |
|
Line 110 virtio_reinit_start(struct virtio_softc |
|
} |
} |
virtio_init_vq(sc, vq, true); |
virtio_init_vq(sc, vq, true); |
sc->sc_ops->setup_queue(sc, vq->vq_index, |
sc->sc_ops->setup_queue(sc, vq->vq_index, |
vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); |
vq->vq_dmamap->dm_segs[0].ds_addr); |
} |
} |
} |
} |
|
|
Line 106 virtio_reinit_end(struct virtio_softc *s |
|
Line 123 virtio_reinit_end(struct virtio_softc *s |
|
/* |
/* |
* Feature negotiation. |
* Feature negotiation. |
*/ |
*/ |
uint32_t |
void |
virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features) |
virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features) |
{ |
{ |
uint32_t r; |
|
|
|
if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && |
if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && |
!(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ |
!(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ |
guest_features |= VIRTIO_F_RING_INDIRECT_DESC; |
guest_features |= VIRTIO_F_RING_INDIRECT_DESC; |
r = sc->sc_ops->neg_features(sc, guest_features); |
sc->sc_ops->neg_features(sc, guest_features); |
sc->sc_features = r; |
if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC) |
if (r & VIRTIO_F_RING_INDIRECT_DESC) |
|
sc->sc_indirect = true; |
sc->sc_indirect = true; |
else |
else |
sc->sc_indirect = false; |
sc->sc_indirect = false; |
|
|
return r; |
|
} |
} |
|
|
|
|
/* |
/* |
* Device configuration registers. |
* Device configuration registers readers/writers |
*/ |
*/ |
|
#if 0 |
|
#define DPRINTFR(n, fmt, val, index, num) \ |
|
printf("\n%s (", n); \ |
|
for (int i = 0; i < num; i++) \ |
|
printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \ |
|
printf(") -> "); printf(fmt, val); printf("\n"); |
|
#define DPRINTFR2(n, fmt, val_s, val_n) \ |
|
printf("%s ", n); \ |
|
printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n"); |
|
#else |
|
#define DPRINTFR(n, fmt, val, index, num) |
|
#define DPRINTFR2(n, fmt, val_s, val_n) |
|
#endif |
|
|
|
|
uint8_t |
uint8_t |
virtio_read_device_config_1(struct virtio_softc *sc, int index) |
virtio_read_device_config_1(struct virtio_softc *sc, int index) { |
{ |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
return sc->sc_ops->read_dev_cfg_1(sc, index); |
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
uint8_t val; |
|
|
|
val = bus_space_read_1(iot, ioh, index); |
|
|
|
DPRINTFR("read_1", "%02x", val, index, 1); |
|
return val; |
} |
} |
|
|
uint16_t |
uint16_t |
virtio_read_device_config_2(struct virtio_softc *sc, int index) |
virtio_read_device_config_2(struct virtio_softc *sc, int index) { |
{ |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
return sc->sc_ops->read_dev_cfg_2(sc, index); |
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
uint16_t val; |
|
|
|
val = bus_space_read_2(iot, ioh, index); |
|
if (BYTE_ORDER != sc->sc_bus_endian) |
|
val = bswap16(val); |
|
|
|
DPRINTFR("read_2", "%04x", val, index, 2); |
|
DPRINTFR2("read_2", "%04x", |
|
bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), |
|
bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); |
|
return val; |
} |
} |
|
|
uint32_t |
uint32_t |
virtio_read_device_config_4(struct virtio_softc *sc, int index) |
virtio_read_device_config_4(struct virtio_softc *sc, int index) { |
{ |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
return sc->sc_ops->read_dev_cfg_4(sc, index); |
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
uint32_t val; |
|
|
|
val = bus_space_read_4(iot, ioh, index); |
|
if (BYTE_ORDER != sc->sc_bus_endian) |
|
val = bswap32(val); |
|
|
|
DPRINTFR("read_4", "%08x", val, index, 4); |
|
DPRINTFR2("read_4", "%08x", |
|
bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), |
|
bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); |
|
return val; |
} |
} |
|
|
|
/* |
|
* The Virtio spec explicitly tells that reading and writing 8 bytes are not |
|
* considered atomic and no triggers may be connected to reading or writing |
|
* it. We access it using two 32 reads. See virtio spec 4.1.3.1. |
|
*/ |
uint64_t |
uint64_t |
virtio_read_device_config_8(struct virtio_softc *sc, int index) |
virtio_read_device_config_8(struct virtio_softc *sc, int index) { |
|
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
union { |
|
uint64_t u64; |
|
uint32_t l[2]; |
|
} v; |
|
uint64_t val; |
|
|
|
v.l[0] = bus_space_read_4(iot, ioh, index); |
|
v.l[1] = bus_space_read_4(iot, ioh, index + 4); |
|
if (sc->sc_bus_endian != sc->sc_struct_endian) { |
|
v.l[0] = bswap32(v.l[0]); |
|
v.l[1] = bswap32(v.l[1]); |
|
} |
|
val = v.u64; |
|
|
|
if (BYTE_ORDER != sc->sc_struct_endian) |
|
val = bswap64(val); |
|
|
|
DPRINTFR("read_8", "%08lx", val, index, 8); |
|
DPRINTFR2("read_8 low ", "%08x", |
|
bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index), |
|
bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); |
|
DPRINTFR2("read_8 high ", "%08x", |
|
bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4), |
|
bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4)); |
|
return val; |
|
} |
|
|
|
/* |
|
* In the older virtio spec, device config registers are host endian. On newer |
|
* they are little endian. Some newer devices however explicitly specify their |
|
* register to always be little endian. These fuctions cater for these. |
|
*/ |
|
uint16_t |
|
virtio_read_device_config_le_2(struct virtio_softc *sc, int index) { |
|
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
uint16_t val; |
|
|
|
val = bus_space_read_2(iot, ioh, index); |
|
if (sc->sc_bus_endian != LITTLE_ENDIAN) |
|
val = bswap16(val); |
|
|
|
DPRINTFR("read_le_2", "%04x", val, index, 2); |
|
DPRINTFR2("read_le_2", "%04x", |
|
bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), |
|
bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); |
|
return val; |
|
} |
|
|
|
uint32_t |
|
virtio_read_device_config_le_4(struct virtio_softc *sc, int index) { |
|
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
uint32_t val; |
|
|
|
val = bus_space_read_4(iot, ioh, index); |
|
if (sc->sc_bus_endian != LITTLE_ENDIAN) |
|
val = bswap32(val); |
|
|
|
DPRINTFR("read_le_4", "%08x", val, index, 4); |
|
DPRINTFR2("read_le_4", "%08x", |
|
bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), |
|
bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); |
|
return val; |
|
} |
|
|
|
void |
|
virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value) |
|
{ |
|
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
|
|
bus_space_write_1(iot, ioh, index, value); |
|
} |
|
|
|
void |
|
virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value) |
{ |
{ |
return sc->sc_ops->read_dev_cfg_8(sc, index); |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
|
|
if (BYTE_ORDER != sc->sc_bus_endian) |
|
value = bswap16(value); |
|
bus_space_write_2(iot, ioh, index, value); |
} |
} |
|
|
void |
void |
virtio_write_device_config_1(struct virtio_softc *sc, |
virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value) |
int index, uint8_t value) |
|
{ |
{ |
return sc->sc_ops->write_dev_cfg_1(sc, index, value); |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
|
|
if (BYTE_ORDER != sc->sc_bus_endian) |
|
value = bswap32(value); |
|
bus_space_write_4(iot, ioh, index, value); |
} |
} |
|
|
|
/* |
|
* The Virtio spec explicitly tells that reading and writing 8 bytes are not |
|
* considered atomic and no triggers may be connected to reading or writing |
|
* it. We access it using two 32 bit writes. For good measure it is stated to |
|
* always write lsb first just in case of a hypervisor bug. See See virtio |
|
* spec 4.1.3.1. |
|
*/ |
void |
void |
virtio_write_device_config_2(struct virtio_softc *sc, |
virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value) |
int index, uint16_t value) |
|
{ |
{ |
return sc->sc_ops->write_dev_cfg_2(sc, index, value); |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
union { |
|
uint64_t u64; |
|
uint32_t l[2]; |
|
} v; |
|
|
|
if (BYTE_ORDER != sc->sc_struct_endian) |
|
value = bswap64(value); |
|
|
|
v.u64 = value; |
|
if (sc->sc_bus_endian != sc->sc_struct_endian) { |
|
v.l[0] = bswap32(v.l[0]); |
|
v.l[1] = bswap32(v.l[1]); |
|
} |
|
|
|
if (sc->sc_struct_endian == LITTLE_ENDIAN) { |
|
bus_space_write_4(iot, ioh, index, v.l[0]); |
|
bus_space_write_4(iot, ioh, index + 4, v.l[1]); |
|
} else { |
|
bus_space_write_4(iot, ioh, index + 4, v.l[1]); |
|
bus_space_write_4(iot, ioh, index, v.l[0]); |
|
} |
} |
} |
|
|
|
/* |
|
* In the older virtio spec, device config registers are host endian. On newer |
|
* they are little endian. Some newer devices however explicitly specify their |
|
* register to always be little endian. These fuctions cater for these. |
|
*/ |
void |
void |
virtio_write_device_config_4(struct virtio_softc *sc, |
virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value) |
int index, uint32_t value) |
|
{ |
{ |
return sc->sc_ops->write_dev_cfg_4(sc, index, value); |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
|
|
if (sc->sc_bus_endian != LITTLE_ENDIAN) |
|
value = bswap16(value); |
|
bus_space_write_2(iot, ioh, index, value); |
} |
} |
|
|
void |
void |
virtio_write_device_config_8(struct virtio_softc *sc, |
virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value) |
int index, uint64_t value) |
|
{ |
{ |
return sc->sc_ops->write_dev_cfg_8(sc, index, value); |
bus_space_tag_t iot = sc->sc_devcfg_iot; |
|
bus_space_handle_t ioh = sc->sc_devcfg_ioh; |
|
|
|
if (sc->sc_bus_endian != LITTLE_ENDIAN) |
|
value = bswap32(value); |
|
bus_space_write_4(iot, ioh, index, value); |
} |
} |
|
|
|
|
|
/* |
|
* data structures endian helpers |
|
*/ |
|
uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val) |
|
{ |
|
KASSERT(sc); |
|
return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val; |
|
} |
|
|
|
uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val) |
|
{ |
|
KASSERT(sc); |
|
return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val; |
|
} |
|
|
|
uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val) |
|
{ |
|
KASSERT(sc); |
|
return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val; |
|
} |
|
|
|
|
/* |
/* |
* Interrupt handler. |
* Interrupt handler. |
*/ |
*/ |
Line 206 vq_sync_descs(struct virtio_softc *sc, s |
|
Line 419 vq_sync_descs(struct virtio_softc *sc, s |
|
static inline void |
static inline void |
vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) |
vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) |
{ |
{ |
|
uint16_t hdrlen = offsetof(struct vring_avail, ring); |
|
if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) |
|
hdrlen += sizeof(uint16_t); |
|
|
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, |
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, |
vq->vq_availoffset, |
vq->vq_availoffset, |
offsetof(struct vring_avail, ring) |
hdrlen + sc->sc_nvqs * sizeof(uint16_t), |
+ vq->vq_num * sizeof(uint16_t), |
|
ops); |
ops); |
} |
} |
|
|
static inline void |
static inline void |
vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) |
vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) |
{ |
{ |
|
uint16_t hdrlen = offsetof(struct vring_used, ring); |
|
if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) |
|
hdrlen += sizeof(uint16_t); |
|
|
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, |
bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, |
vq->vq_usedoffset, |
vq->vq_usedoffset, |
offsetof(struct vring_used, ring) |
hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem), |
+ vq->vq_num * sizeof(struct vring_used_elem), |
|
ops); |
ops); |
} |
} |
|
|
Line 253 virtio_vq_is_enqueued(struct virtio_soft |
|
Line 472 virtio_vq_is_enqueued(struct virtio_soft |
|
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); |
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); |
membar_consumer(); |
membar_consumer(); |
|
|
return (vq->vq_used_idx != vq->vq_used->idx) ? 1 : 0; |
return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0; |
} |
} |
|
|
int |
int |
Line 287 virtio_vq_intrhand(struct virtio_softc * |
|
Line 506 virtio_vq_intrhand(struct virtio_softc * |
|
return r; |
return r; |
} |
} |
|
|
|
|
|
/* |
|
* Increase the event index in order to delay interrupts. |
|
*/ |
|
int |
|
virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq, |
|
uint16_t nslots) |
|
{ |
|
uint16_t idx, nused; |
|
|
|
idx = vq->vq_used_idx + nslots; |
|
|
|
/* set the new event index: avail_ring->used_event = idx */ |
|
*vq->vq_used_event = virtio_rw16(sc, idx); |
|
membar_producer(); |
|
|
|
vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE); |
|
vq->vq_queued++; |
|
|
|
nused = (uint16_t) |
|
(virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx); |
|
KASSERT(nused <= vq->vq_num); |
|
|
|
return nslots < nused; |
|
} |
|
|
|
/* |
|
* Postpone interrupt until 3/4 of the available descriptors have been |
|
* consumed. |
|
*/ |
|
int |
|
virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq) |
|
{ |
|
uint16_t nslots; |
|
|
|
nslots = (uint16_t) |
|
(virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4; |
|
|
|
return virtio_postpone_intr(sc, vq, nslots); |
|
} |
|
|
|
/* |
|
* Postpone interrupt until all of the available descriptors have been |
|
* consumed. |
|
*/ |
|
int |
|
virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq) |
|
{ |
|
uint16_t nslots; |
|
|
|
nslots = (uint16_t) |
|
(virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx); |
|
|
|
return virtio_postpone_intr(sc, vq, nslots); |
|
} |
|
|
/* |
/* |
* Start/stop vq interrupt. No guarantee. |
* Start/stop vq interrupt. No guarantee. |
*/ |
*/ |
void |
void |
virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) |
virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) |
{ |
{ |
vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { |
|
/* |
|
* No way to disable the interrupt completely with |
|
* RingEventIdx. Instead advance used_event by half the |
|
* possible value. This won't happen soon and is far enough in |
|
* the past to not trigger a spurios interrupt. |
|
*/ |
|
*vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000); |
|
} else { |
|
vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); |
|
} |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
vq->vq_queued++; |
vq->vq_queued++; |
} |
} |
|
|
void |
int |
virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) |
virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) |
{ |
{ |
vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { |
|
/* |
|
* If event index feature is negotiated, enabling interrupts |
|
* is done through setting the latest consumed index in the |
|
* used_event field |
|
*/ |
|
*vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx); |
|
} else { |
|
vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); |
|
} |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
vq->vq_queued++; |
vq->vq_queued++; |
|
|
|
return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx); |
} |
} |
|
|
/* |
/* |
Line 326 virtio_init_vq(struct virtio_softc *sc, |
|
Line 622 virtio_init_vq(struct virtio_softc *sc, |
|
vd = vq->vq_indirect; |
vd = vq->vq_indirect; |
vd += vq->vq_maxnsegs * i; |
vd += vq->vq_maxnsegs * i; |
for (j = 0; j < vq->vq_maxnsegs-1; j++) { |
for (j = 0; j < vq->vq_maxnsegs-1; j++) { |
vd[j].next = j + 1; |
vd[j].next = virtio_rw16(sc, j + 1); |
} |
} |
} |
} |
} |
} |
Line 353 virtio_init_vq(struct virtio_softc *sc, |
|
Line 649 virtio_init_vq(struct virtio_softc *sc, |
|
vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); |
vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); |
vq->vq_queued++; |
vq->vq_queued++; |
} |
} |
|
|
/* |
/* |
* Allocate/free a vq. |
* Allocate/free a vq. |
*/ |
*/ |
Line 362 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 658 virtio_alloc_vq(struct virtio_softc *sc, |
|
int maxsegsize, int maxnsegs, const char *name) |
int maxsegsize, int maxnsegs, const char *name) |
{ |
{ |
int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; |
int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; |
int rsegs, r; |
int rsegs, r, hdrlen; |
#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ |
#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ |
~(VIRTIO_PAGE_SIZE-1)) |
~(VIRTIO_PAGE_SIZE-1)) |
|
|
Line 378 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 674 virtio_alloc_vq(struct virtio_softc *sc, |
|
index, name); |
index, name); |
goto err; |
goto err; |
} |
} |
|
|
|
hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2; |
|
|
/* allocsize1: descriptor table + avail ring + pad */ |
/* allocsize1: descriptor table + avail ring + pad */ |
allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size |
allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size |
+ sizeof(uint16_t)*(2+vq_size)); |
+ sizeof(uint16_t)*(hdrlen + vq_size)); |
/* allocsize2: used ring + pad */ |
/* allocsize2: used ring + pad */ |
allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 |
allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen |
+ sizeof(struct vring_used_elem)*vq_size); |
+ sizeof(struct vring_used_elem)*vq_size); |
/* allocsize3: indirect table */ |
/* allocsize3: indirect table */ |
if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) |
if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) |
allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; |
allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; |
Line 393 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 692 virtio_alloc_vq(struct virtio_softc *sc, |
|
|
|
/* alloc and map the memory */ |
/* alloc and map the memory */ |
r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, |
r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, |
&vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); |
&vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
"virtqueue %d for %s allocation failed, " |
"virtqueue %d for %s allocation failed, " |
"error code %d\n", index, name, r); |
"error code %d\n", index, name, r); |
goto err; |
goto err; |
} |
} |
r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, |
r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize, |
&vq->vq_vaddr, BUS_DMA_NOWAIT); |
&vq->vq_vaddr, BUS_DMA_WAITOK); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
"virtqueue %d for %s map failed, " |
"virtqueue %d for %s map failed, " |
Line 409 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 708 virtio_alloc_vq(struct virtio_softc *sc, |
|
goto err; |
goto err; |
} |
} |
r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, |
r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, |
BUS_DMA_NOWAIT, &vq->vq_dmamap); |
BUS_DMA_WAITOK, &vq->vq_dmamap); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
"virtqueue %d for %s dmamap creation failed, " |
"virtqueue %d for %s dmamap creation failed, " |
Line 417 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 716 virtio_alloc_vq(struct virtio_softc *sc, |
|
goto err; |
goto err; |
} |
} |
r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, |
r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, |
vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT); |
vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, |
aprint_error_dev(sc->sc_dev, |
"virtqueue %d for %s dmamap load failed, " |
"virtqueue %d for %s dmamap load failed, " |
Line 425 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 724 virtio_alloc_vq(struct virtio_softc *sc, |
|
goto err; |
goto err; |
} |
} |
|
|
/* set the vq address */ |
|
sc->sc_ops->setup_queue(sc, index, |
|
vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); |
|
|
|
/* remember addresses and offsets for later use */ |
/* remember addresses and offsets for later use */ |
vq->vq_owner = sc; |
vq->vq_owner = sc; |
vq->vq_num = vq_size; |
vq->vq_num = vq_size; |
Line 436 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 731 virtio_alloc_vq(struct virtio_softc *sc, |
|
vq->vq_desc = vq->vq_vaddr; |
vq->vq_desc = vq->vq_vaddr; |
vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; |
vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; |
vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); |
vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset); |
|
vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail + |
|
offsetof(struct vring_avail, ring[vq->vq_num])); |
vq->vq_usedoffset = allocsize1; |
vq->vq_usedoffset = allocsize1; |
vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); |
vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset); |
|
vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used + |
|
offsetof(struct vring_used, ring[vq->vq_num])); |
|
|
if (allocsize3 > 0) { |
if (allocsize3 > 0) { |
vq->vq_indirectoffset = allocsize1 + allocsize2; |
vq->vq_indirectoffset = allocsize1 + allocsize2; |
vq->vq_indirect = (void*)(((char*)vq->vq_desc) |
vq->vq_indirect = (void*)(((char*)vq->vq_desc) |
Line 452 virtio_alloc_vq(struct virtio_softc *sc, |
|
Line 752 virtio_alloc_vq(struct virtio_softc *sc, |
|
KM_SLEEP); |
KM_SLEEP); |
virtio_init_vq(sc, vq, false); |
virtio_init_vq(sc, vq, false); |
|
|
|
/* set the vq address */ |
|
sc->sc_ops->setup_queue(sc, index, |
|
vq->vq_dmamap->dm_segs[0].ds_addr); |
|
|
aprint_verbose_dev(sc->sc_dev, |
aprint_verbose_dev(sc->sc_dev, |
"allocated %u byte for virtqueue %d for %s, " |
"allocated %u byte for virtqueue %d for %s, " |
"size %d\n", allocsize, index, name, vq_size); |
"size %d\n", allocsize, index, name, vq_size); |
Line 560 vq_free_entry(struct virtqueue *vq, stru |
|
Line 864 vq_free_entry(struct virtqueue *vq, stru |
|
* virtio_enqueue_abort(sc, vq, slot); |
* virtio_enqueue_abort(sc, vq, slot); |
* return r; |
* return r; |
* } |
* } |
* r = virtio_enqueue_reserve(sc, vq, slot, |
* r = virtio_enqueue_reserve(sc, vq, slot, |
* dmamap_payload[slot]->dm_nsegs+1); |
* dmamap_payload[slot]->dm_nsegs+1); |
* // ^ +1 for command |
* // ^ +1 for command |
* if (r) { // currently 0 or EAGAIN |
* if (r) { // currently 0 or EAGAIN |
Line 618 virtio_enqueue_reserve(struct virtio_sof |
|
Line 922 virtio_enqueue_reserve(struct virtio_sof |
|
|
|
if (indirect) { |
if (indirect) { |
struct vring_desc *vd; |
struct vring_desc *vd; |
|
uint64_t addr; |
int i; |
int i; |
|
|
vd = &vq->vq_desc[qe1->qe_index]; |
vd = &vq->vq_desc[qe1->qe_index]; |
vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr |
addr = vq->vq_dmamap->dm_segs[0].ds_addr |
+ vq->vq_indirectoffset; |
+ vq->vq_indirectoffset; |
vd->addr += sizeof(struct vring_desc) |
addr += sizeof(struct vring_desc) |
* vq->vq_maxnsegs * qe1->qe_index; |
* vq->vq_maxnsegs * qe1->qe_index; |
vd->len = sizeof(struct vring_desc) * nsegs; |
vd->addr = virtio_rw64(sc, addr); |
vd->flags = VRING_DESC_F_INDIRECT; |
vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs); |
|
vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT); |
|
|
vd = vq->vq_indirect; |
vd = vq->vq_indirect; |
vd += vq->vq_maxnsegs * qe1->qe_index; |
vd += vq->vq_maxnsegs * qe1->qe_index; |
qe1->qe_desc_base = vd; |
qe1->qe_desc_base = vd; |
|
|
for (i = 0; i < nsegs-1; i++) { |
for (i = 0; i < nsegs-1; i++) { |
vd[i].flags = VRING_DESC_F_NEXT; |
vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); |
} |
} |
vd[i].flags = 0; |
vd[i].flags = virtio_rw16(sc, 0); |
qe1->qe_next = 0; |
qe1->qe_next = 0; |
|
|
return 0; |
return 0; |
Line 651 virtio_enqueue_reserve(struct virtio_sof |
|
Line 957 virtio_enqueue_reserve(struct virtio_sof |
|
for (i = 0; i < nsegs - 1; i++) { |
for (i = 0; i < nsegs - 1; i++) { |
qe = vq_alloc_entry(vq); |
qe = vq_alloc_entry(vq); |
if (qe == NULL) { |
if (qe == NULL) { |
vd[s].flags = 0; |
vd[s].flags = virtio_rw16(sc, 0); |
virtio_enqueue_abort(sc, vq, slot); |
virtio_enqueue_abort(sc, vq, slot); |
return EAGAIN; |
return EAGAIN; |
} |
} |
vd[s].flags = VRING_DESC_F_NEXT; |
vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); |
vd[s].next = qe->qe_index; |
vd[s].next = virtio_rw16(sc, qe->qe_index); |
s = qe->qe_index; |
s = qe->qe_index; |
} |
} |
vd[s].flags = 0; |
vd[s].flags = virtio_rw16(sc, 0); |
|
|
return 0; |
return 0; |
} |
} |
Line 681 virtio_enqueue(struct virtio_softc *sc, |
|
Line 987 virtio_enqueue(struct virtio_softc *sc, |
|
KASSERT(dmamap->dm_nsegs > 0); |
KASSERT(dmamap->dm_nsegs > 0); |
|
|
for (i = 0; i < dmamap->dm_nsegs; i++) { |
for (i = 0; i < dmamap->dm_nsegs; i++) { |
vd[s].addr = dmamap->dm_segs[i].ds_addr; |
vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); |
vd[s].len = dmamap->dm_segs[i].ds_len; |
vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); |
if (!write) |
if (!write) |
vd[s].flags |= VRING_DESC_F_WRITE; |
vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); |
s = vd[s].next; |
s = virtio_rw16(sc, vd[s].next); |
} |
} |
qe1->qe_next = s; |
qe1->qe_next = s; |
|
|
Line 706 virtio_enqueue_p(struct virtio_softc *sc |
|
Line 1012 virtio_enqueue_p(struct virtio_softc *sc |
|
KASSERT((dmamap->dm_segs[0].ds_len > start) && |
KASSERT((dmamap->dm_segs[0].ds_len > start) && |
(dmamap->dm_segs[0].ds_len >= start + len)); |
(dmamap->dm_segs[0].ds_len >= start + len)); |
|
|
vd[s].addr = dmamap->dm_segs[0].ds_addr + start; |
vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); |
vd[s].len = len; |
vd[s].len = virtio_rw32(sc, len); |
if (!write) |
if (!write) |
vd[s].flags |= VRING_DESC_F_WRITE; |
vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); |
qe1->qe_next = vd[s].next; |
qe1->qe_next = virtio_rw16(sc, vd[s].next); |
|
|
return 0; |
return 0; |
} |
} |
Line 733 virtio_enqueue_commit(struct virtio_soft |
|
Line 1039 virtio_enqueue_commit(struct virtio_soft |
|
if (qe1->qe_indirect) |
if (qe1->qe_indirect) |
vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); |
vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); |
mutex_enter(&vq->vq_aring_lock); |
mutex_enter(&vq->vq_aring_lock); |
vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot; |
vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = |
|
virtio_rw16(sc, slot); |
|
|
notify: |
notify: |
if (notifynow) { |
if (notifynow) { |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
uint16_t o, n, t; |
vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); |
uint16_t flags; |
|
o = virtio_rw16(sc, vq->vq_avail->idx); |
|
n = vq->vq_avail_idx; |
|
|
|
/* publish avail idx */ |
membar_producer(); |
membar_producer(); |
vq->vq_avail->idx = vq->vq_avail_idx; |
vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx); |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); |
membar_producer(); |
|
vq->vq_queued++; |
vq->vq_queued++; |
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); |
|
membar_consumer(); |
membar_consumer(); |
if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) |
vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); |
sc->sc_ops->kick(sc, vq->vq_index); |
if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { |
|
t = virtio_rw16(sc, *vq->vq_avail_event) + 1; |
|
if ((uint16_t) (n - t) < (uint16_t) (n - o)) |
|
sc->sc_ops->kick(sc, vq->vq_index); |
|
} else { |
|
flags = virtio_rw16(sc, vq->vq_used->flags); |
|
if (!(flags & VRING_USED_F_NO_NOTIFY)) |
|
sc->sc_ops->kick(sc, vq->vq_index); |
|
} |
|
vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); |
|
vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); |
} |
} |
mutex_exit(&vq->vq_aring_lock); |
mutex_exit(&vq->vq_aring_lock); |
|
|
Line 771 virtio_enqueue_abort(struct virtio_softc |
|
Line 1091 virtio_enqueue_abort(struct virtio_softc |
|
|
|
s = slot; |
s = slot; |
vd = &vq->vq_desc[0]; |
vd = &vq->vq_desc[0]; |
while (vd[s].flags & VRING_DESC_F_NEXT) { |
while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { |
s = vd[s].next; |
s = virtio_rw16(sc, vd[s].next); |
vq_free_entry(vq, qe); |
vq_free_entry(vq, qe); |
qe = &vq->vq_entries[s]; |
qe = &vq->vq_entries[s]; |
} |
} |
Line 794 virtio_dequeue(struct virtio_softc *sc, |
|
Line 1114 virtio_dequeue(struct virtio_softc *sc, |
|
uint16_t slot, usedidx; |
uint16_t slot, usedidx; |
struct vq_entry *qe; |
struct vq_entry *qe; |
|
|
if (vq->vq_used_idx == vq->vq_used->idx) |
if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) |
return ENOENT; |
return ENOENT; |
mutex_enter(&vq->vq_uring_lock); |
mutex_enter(&vq->vq_uring_lock); |
usedidx = vq->vq_used_idx++; |
usedidx = vq->vq_used_idx++; |
mutex_exit(&vq->vq_uring_lock); |
mutex_exit(&vq->vq_uring_lock); |
usedidx %= vq->vq_num; |
usedidx %= vq->vq_num; |
slot = vq->vq_used->ring[usedidx].id; |
slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id); |
qe = &vq->vq_entries[slot]; |
qe = &vq->vq_entries[slot]; |
|
|
if (qe->qe_indirect) |
if (qe->qe_indirect) |
Line 809 virtio_dequeue(struct virtio_softc *sc, |
|
Line 1129 virtio_dequeue(struct virtio_softc *sc, |
|
if (slotp) |
if (slotp) |
*slotp = slot; |
*slotp = slot; |
if (lenp) |
if (lenp) |
*lenp = vq->vq_used->ring[usedidx].len; |
*lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len); |
|
|
return 0; |
return 0; |
} |
} |
Line 825 virtio_dequeue_commit(struct virtio_soft |
|
Line 1145 virtio_dequeue_commit(struct virtio_soft |
|
struct vring_desc *vd = &vq->vq_desc[0]; |
struct vring_desc *vd = &vq->vq_desc[0]; |
int s = slot; |
int s = slot; |
|
|
while (vd[s].flags & VRING_DESC_F_NEXT) { |
while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { |
s = vd[s].next; |
s = virtio_rw16(sc, vd[s].next); |
vq_free_entry(vq, qe); |
vq_free_entry(vq, qe); |
qe = &vq->vq_entries[s]; |
qe = &vq->vq_entries[s]; |
} |
} |
Line 839 virtio_dequeue_commit(struct virtio_soft |
|
Line 1159 virtio_dequeue_commit(struct virtio_soft |
|
* Attach a child, fill all the members. |
* Attach a child, fill all the members. |
*/ |
*/ |
void |
void |
virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, |
virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, |
struct virtqueue *vqs, |
struct virtqueue *vqs, |
virtio_callback config_change, |
virtio_callback config_change, |
virtio_callback intr_hand, |
virtio_callback intr_hand, |
int req_flags, int req_features, const char *feat_bits) |
int req_flags, int req_features, const char *feat_bits) |
{ |
{ |
char buf[256]; |
char buf[1024]; |
int features; |
|
|
|
sc->sc_child = child; |
sc->sc_child = child; |
sc->sc_ipl = ipl; |
sc->sc_ipl = ipl; |
Line 855 virtio_child_attach_start(struct virtio_ |
|
Line 1174 virtio_child_attach_start(struct virtio_ |
|
sc->sc_intrhand = intr_hand; |
sc->sc_intrhand = intr_hand; |
sc->sc_flags = req_flags; |
sc->sc_flags = req_flags; |
|
|
features = virtio_negotiate_features(sc, req_features); |
virtio_negotiate_features(sc, req_features); |
snprintb(buf, sizeof(buf), feat_bits, features); |
snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features); |
aprint_normal(": Features: %s\n", buf); |
aprint_normal(": features: %s\n", buf); |
aprint_naive("\n"); |
aprint_naive("\n"); |
} |
} |
|
|
Line 867 virtio_child_attach_set_vqs(struct virti |
|
Line 1186 virtio_child_attach_set_vqs(struct virti |
|
{ |
{ |
|
|
KASSERT(nvq_pairs == 1 || |
KASSERT(nvq_pairs == 1 || |
(sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) == 0); |
(sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0); |
if (nvq_pairs > 1) |
if (nvq_pairs > 1) |
sc->sc_child_mq = true; |
sc->sc_child_mq = true; |
|
|
Line 879 virtio_child_attach_finish(struct virtio |
|
Line 1198 virtio_child_attach_finish(struct virtio |
|
{ |
{ |
int r; |
int r; |
|
|
|
sc->sc_finished_called = true; |
r = sc->sc_ops->setup_interrupts(sc); |
r = sc->sc_ops->setup_interrupts(sc); |
if (r != 0) { |
if (r != 0) { |
aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); |
aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); |
Line 886 virtio_child_attach_finish(struct virtio |
|
Line 1206 virtio_child_attach_finish(struct virtio |
|
} |
} |
|
|
KASSERT(sc->sc_soft_ih == NULL); |
KASSERT(sc->sc_soft_ih == NULL); |
if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) { |
if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) { |
u_int flags = SOFTINT_NET; |
u_int flags = SOFTINT_NET; |
if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) |
if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) |
flags |= SOFTINT_MPSAFE; |
flags |= SOFTINT_MPSAFE; |
|
|
sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); |
sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc); |
Line 957 virtio_intrhand(struct virtio_softc *sc) |
|
Line 1277 virtio_intrhand(struct virtio_softc *sc) |
|
return (sc->sc_intrhand)(sc); |
return (sc->sc_intrhand)(sc); |
} |
} |
|
|
uint32_t |
uint64_t |
virtio_features(struct virtio_softc *sc) |
virtio_features(struct virtio_softc *sc) |
{ |
{ |
return sc->sc_features; |
return sc->sc_active_features; |
} |
} |
|
|
int |
int |
virtiobusprint(void *aux, const char *pnp) |
virtio_attach_failed(struct virtio_softc *sc) |
{ |
{ |
struct virtio_attach_args * const va = aux; |
device_t self = sc->sc_dev; |
|
|
if (va->sc_childdevid == 0) |
/* no error if its not connected, but its failed */ |
return QUIET; /* No device present */ |
if (sc->sc_childdevid == 0) |
|
return 1; |
|
|
if (pnp) |
if (sc->sc_child == NULL) { |
aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp); |
aprint_error_dev(self, |
|
"no matching child driver; not configured\n"); |
|
return 1; |
|
} |
|
|
|
if (sc->sc_child == VIRTIO_CHILD_FAILED) { |
|
aprint_error_dev(self, "virtio configuration failed\n"); |
|
return 1; |
|
} |
|
|
|
/* sanity check */ |
|
if (!sc->sc_finished_called) { |
|
aprint_error_dev(self, "virtio internal error, child driver " |
|
"signaled OK but didn't initialize interrupts\n"); |
|
return 1; |
|
} |
|
|
|
return 0; |
|
} |
|
|
return UNCONF; |
void |
|
virtio_print_device_type(device_t self, int id, int revision) |
|
{ |
|
aprint_normal_dev(self, "%s device (rev. 0x%02x)\n", |
|
(id < NDEVNAMES ? virtio_device_name[id] : "Unknown"), |
|
revision); |
} |
} |
|
|
|
|
MODULE(MODULE_CLASS_DRIVER, virtio, NULL); |
MODULE(MODULE_CLASS_DRIVER, virtio, NULL); |
|
|
#ifdef _MODULE |
#ifdef _MODULE |
#include "ioconf.c" |
#include "ioconf.c" |
#endif |
#endif |
|
|
static int |
static int |
virtio_modcmd(modcmd_t cmd, void *opaque) |
virtio_modcmd(modcmd_t cmd, void *opaque) |
{ |
{ |
int error = 0; |
int error = 0; |
|
|
#ifdef _MODULE |
#ifdef _MODULE |
switch (cmd) { |
switch (cmd) { |
case MODULE_CMD_INIT: |
case MODULE_CMD_INIT: |
error = config_init_component(cfdriver_ioconf_virtio, |
error = config_init_component(cfdriver_ioconf_virtio, |
cfattach_ioconf_virtio, cfdata_ioconf_virtio); |
cfattach_ioconf_virtio, cfdata_ioconf_virtio); |
break; |
break; |
case MODULE_CMD_FINI: |
case MODULE_CMD_FINI: |
error = config_fini_component(cfdriver_ioconf_virtio, |
error = config_fini_component(cfdriver_ioconf_virtio, |
cfattach_ioconf_virtio, cfdata_ioconf_virtio); |
cfattach_ioconf_virtio, cfdata_ioconf_virtio); |
break; |
break; |
default: |
default: |
Line 1003 virtio_modcmd(modcmd_t cmd, void *opaque |
|
Line 1348 virtio_modcmd(modcmd_t cmd, void *opaque |
|
break; |
break; |
} |
} |
#endif |
#endif |
|
|
return error; |
return error; |
} |
} |