[BACK]Return to virtio.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / dev / pci

Annotation of src/sys/dev/pci/virtio.c, Revision 1.56

1.56    ! riastrad    1: /*     $NetBSD: virtio.c,v 1.55 2022/06/18 22:11:01 andvar Exp $       */
1.1       hannken     2:
                      3: /*
1.43      reinoud     4:  * Copyright (c) 2020 The NetBSD Foundation, Inc.
                      5:  * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
1.1       hannken     6:  * Copyright (c) 2010 Minoura Makoto.
                      7:  * All rights reserved.
                      8:  *
                      9:  * Redistribution and use in source and binary forms, with or without
                     10:  * modification, are permitted provided that the following conditions
                     11:  * are met:
                     12:  * 1. Redistributions of source code must retain the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer in the
                     16:  *    documentation and/or other materials provided with the distribution.
                     17:  *
                     18:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     19:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     20:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     21:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     22:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     23:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     24:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     25:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     26:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     27:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     28:  */
                     29:
                     30: #include <sys/cdefs.h>
1.56    ! riastrad   31: __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.55 2022/06/18 22:11:01 andvar Exp $");
1.1       hannken    32:
                     33: #include <sys/param.h>
                     34: #include <sys/systm.h>
                     35: #include <sys/kernel.h>
                     36: #include <sys/atomic.h>
                     37: #include <sys/bus.h>
                     38: #include <sys/device.h>
                     39: #include <sys/kmem.h>
1.18      pgoyette   40: #include <sys/module.h>
1.1       hannken    41:
1.22      jdolecek   42: #define VIRTIO_PRIVATE
                     43:
1.29      cherry     44: #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
                     45: #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
1.1       hannken    46:
                     47: #define MINSEG_INDIRECT                2 /* use indirect if nsegs >= this value */
                     48:
1.43      reinoud    49: /* incomplete list */
                     50: static const char *virtio_device_name[] = {
                     51:        "unknown (0)",                  /*  0 */
                     52:        "network",                      /*  1 */
                     53:        "block",                        /*  2 */
                     54:        "console",                      /*  3 */
                     55:        "entropy",                      /*  4 */
                     56:        "memory balloon",               /*  5 */
                     57:        "I/O memory",                   /*  6 */
                     58:        "remote processor messaging",   /*  7 */
                     59:        "SCSI",                         /*  8 */
                     60:        "9P transport",                 /*  9 */
                     61: };
                     62: #define NDEVNAMES      __arraycount(virtio_device_name)
                     63:
1.1       hannken    64: static void    virtio_init_vq(struct virtio_softc *,
                     65:                    struct virtqueue *, const bool);
                     66:
1.29      cherry     67: void
1.1       hannken    68: virtio_set_status(struct virtio_softc *sc, int status)
                     69: {
1.31      jakllsch   70:        sc->sc_ops->set_status(sc, status);
1.11      ozaki-r    71: }
                     72:
1.1       hannken    73: /*
                     74:  * Reset the device.
                     75:  */
                     76: /*
                     77:  * To reset the device to a known state, do following:
                     78:  *     virtio_reset(sc);            // this will stop the device activity
                     79:  *     <dequeue finished requests>; // virtio_dequeue() still can be called
                     80:  *     <revoke pending requests in the vqs if any>;
1.42      jakllsch   81:  *     virtio_reinit_start(sc);     // dequeue prohibitted
1.1       hannken    82:  *     newfeatures = virtio_negotiate_features(sc, requestedfeatures);
                     83:  *     <some other initialization>;
                     84:  *     virtio_reinit_end(sc);       // device activated; enqueue allowed
                     85:  * Once attached, feature negotiation can only be allowed after virtio_reset.
                     86:  */
                     87: void
                     88: virtio_reset(struct virtio_softc *sc)
                     89: {
                     90:        virtio_device_reset(sc);
                     91: }
                     92:
1.53      yamaguch   93: int
1.1       hannken    94: virtio_reinit_start(struct virtio_softc *sc)
                     95: {
1.51      yamaguch   96:        int i, r;
1.1       hannken    97:
                     98:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
                     99:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
                    100:        for (i = 0; i < sc->sc_nvqs; i++) {
                    101:                int n;
                    102:                struct virtqueue *vq = &sc->sc_vqs[i];
1.31      jakllsch  103:                n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
1.1       hannken   104:                if (n == 0)     /* vq disappeared */
                    105:                        continue;
                    106:                if (n != vq->vq_num) {
                    107:                        panic("%s: virtqueue size changed, vq index %d\n",
                    108:                              device_xname(sc->sc_dev),
                    109:                              vq->vq_index);
                    110:                }
                    111:                virtio_init_vq(sc, vq, true);
1.31      jakllsch  112:                sc->sc_ops->setup_queue(sc, vq->vq_index,
1.43      reinoud   113:                    vq->vq_dmamap->dm_segs[0].ds_addr);
1.11      ozaki-r   114:        }
1.51      yamaguch  115:
                    116:        r = sc->sc_ops->setup_interrupts(sc, 1);
1.53      yamaguch  117:        if (r != 0)
                    118:                goto fail;
                    119:
                    120:        return 0;
                    121:
                    122: fail:
                    123:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
                    124:
                    125:        return 1;
1.1       hannken   126: }
                    127:
                    128: void
                    129: virtio_reinit_end(struct virtio_softc *sc)
                    130: {
                    131:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
                    132: }
                    133:
                    134: /*
                    135:  * Feature negotiation.
                    136:  */
1.43      reinoud   137: void
                    138: virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
1.1       hannken   139: {
                    140:        if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
                    141:            !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
                    142:                guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
1.43      reinoud   143:        sc->sc_ops->neg_features(sc, guest_features);
                    144:        if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
1.1       hannken   145:                sc->sc_indirect = true;
                    146:        else
                    147:                sc->sc_indirect = false;
1.43      reinoud   148: }
1.1       hannken   149:
                    150:
                    151: /*
1.43      reinoud   152:  * Device configuration registers readers/writers
1.1       hannken   153:  */
1.43      reinoud   154: #if 0
                    155: #define DPRINTFR(n, fmt, val, index, num) \
                    156:        printf("\n%s (", n); \
                    157:        for (int i = 0; i < num; i++) \
                    158:                printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
                    159:        printf(") -> "); printf(fmt, val); printf("\n");
1.45      reinoud   160: #define DPRINTFR2(n, fmt, val_s, val_n) \
                    161:        printf("%s ", n); \
                    162:        printf("\n        stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n");
1.43      reinoud   163: #else
                    164: #define DPRINTFR(n, fmt, val, index, num)
1.45      reinoud   165: #define DPRINTFR2(n, fmt, val_s, val_n)
1.43      reinoud   166: #endif
                    167:
1.45      reinoud   168:
1.1       hannken   169: uint8_t
1.43      reinoud   170: virtio_read_device_config_1(struct virtio_softc *sc, int index) {
1.45      reinoud   171:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    172:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.43      reinoud   173:        uint8_t val;
1.45      reinoud   174:
                    175:        val = bus_space_read_1(iot, ioh, index);
                    176:
1.43      reinoud   177:        DPRINTFR("read_1", "%02x", val, index, 1);
                    178:        return val;
                    179: }
                    180:
                    181: uint16_t
                    182: virtio_read_device_config_2(struct virtio_softc *sc, int index) {
1.45      reinoud   183:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    184:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.43      reinoud   185:        uint16_t val;
1.45      reinoud   186:
                    187:        val = bus_space_read_2(iot, ioh, index);
                    188:        if (BYTE_ORDER != sc->sc_bus_endian)
                    189:                val = bswap16(val);
                    190:
1.43      reinoud   191:        DPRINTFR("read_2", "%04x", val, index, 2);
1.45      reinoud   192:        DPRINTFR2("read_2", "%04x",
                    193:                bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
                    194:                bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
1.43      reinoud   195:        return val;
                    196: }
                    197:
                    198: uint32_t
                    199: virtio_read_device_config_4(struct virtio_softc *sc, int index) {
1.45      reinoud   200:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    201:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.43      reinoud   202:        uint32_t val;
1.45      reinoud   203:
                    204:        val = bus_space_read_4(iot, ioh, index);
                    205:        if (BYTE_ORDER != sc->sc_bus_endian)
                    206:                val = bswap32(val);
                    207:
1.43      reinoud   208:        DPRINTFR("read_4", "%08x", val, index, 4);
1.45      reinoud   209:        DPRINTFR2("read_4", "%08x",
                    210:                bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
                    211:                bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
1.43      reinoud   212:        return val;
                    213: }
                    214:
1.46      reinoud   215: /*
                    216:  * The Virtio spec explicitly tells that reading and writing 8 bytes are not
                    217:  * considered atomic and no triggers may be connected to reading or writing
1.47      reinoud   218:  * it. We access it using two 32 reads. See virtio spec 4.1.3.1.
1.46      reinoud   219:  */
1.43      reinoud   220: uint64_t
                    221: virtio_read_device_config_8(struct virtio_softc *sc, int index) {
1.45      reinoud   222:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    223:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.46      reinoud   224:        union {
                    225:                uint64_t u64;
1.47      reinoud   226:                uint32_t l[2];
1.46      reinoud   227:        } v;
                    228:        uint64_t val;
                    229:
1.47      reinoud   230:        v.l[0] = bus_space_read_4(iot, ioh, index);
                    231:        v.l[1] = bus_space_read_4(iot, ioh, index + 4);
                    232:        if (sc->sc_bus_endian != sc->sc_struct_endian) {
                    233:                v.l[0] = bswap32(v.l[0]);
                    234:                v.l[1] = bswap32(v.l[1]);
                    235:        }
1.46      reinoud   236:        val = v.u64;
1.45      reinoud   237:
1.46      reinoud   238:        if (BYTE_ORDER != sc->sc_struct_endian)
                    239:                val = bswap64(val);
1.45      reinoud   240:
1.43      reinoud   241:        DPRINTFR("read_8", "%08lx", val, index, 8);
1.45      reinoud   242:        DPRINTFR2("read_8 low ", "%08x",
                    243:                bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
                    244:                bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
                    245:        DPRINTFR2("read_8 high ", "%08x",
                    246:                bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4),
                    247:                bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4));
1.43      reinoud   248:        return val;
1.1       hannken   249: }
                    250:
1.43      reinoud   251: /*
                    252:  * In the older virtio spec, device config registers are host endian. On newer
1.45      reinoud   253:  * they are little endian. Some newer devices however explicitly specify their
1.55      andvar    254:  * register to always be little endian. These functions cater for these.
1.43      reinoud   255:  */
1.1       hannken   256: uint16_t
1.43      reinoud   257: virtio_read_device_config_le_2(struct virtio_softc *sc, int index) {
1.45      reinoud   258:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    259:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.43      reinoud   260:        uint16_t val;
                    261:
1.45      reinoud   262:        val = bus_space_read_2(iot, ioh, index);
                    263:        if (sc->sc_bus_endian != LITTLE_ENDIAN)
                    264:                val = bswap16(val);
                    265:
                    266:        DPRINTFR("read_le_2", "%04x", val, index, 2);
                    267:        DPRINTFR2("read_le_2", "%04x",
                    268:                bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
                    269:                bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
1.43      reinoud   270:        return val;
1.1       hannken   271: }
                    272:
                    273: uint32_t
1.43      reinoud   274: virtio_read_device_config_le_4(struct virtio_softc *sc, int index) {
1.45      reinoud   275:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    276:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.43      reinoud   277:        uint32_t val;
                    278:
1.45      reinoud   279:        val = bus_space_read_4(iot, ioh, index);
                    280:        if (sc->sc_bus_endian != LITTLE_ENDIAN)
                    281:                val = bswap32(val);
                    282:
1.43      reinoud   283:        DPRINTFR("read_le_4", "%08x", val, index, 4);
1.45      reinoud   284:        DPRINTFR2("read_le_4", "%08x",
                    285:                bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
                    286:                bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
1.43      reinoud   287:        return val;
                    288: }
                    289:
                    290: void
                    291: virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
1.1       hannken   292: {
1.45      reinoud   293:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    294:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
                    295:
                    296:        bus_space_write_1(iot, ioh, index, value);
1.1       hannken   297: }
                    298:
1.43      reinoud   299: void
                    300: virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value)
1.1       hannken   301: {
1.45      reinoud   302:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    303:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
                    304:
                    305:        if (BYTE_ORDER != sc->sc_bus_endian)
                    306:                value = bswap16(value);
                    307:        bus_space_write_2(iot, ioh, index, value);
1.1       hannken   308: }
                    309:
                    310: void
1.43      reinoud   311: virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value)
1.1       hannken   312: {
1.45      reinoud   313:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    314:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
                    315:
                    316:        if (BYTE_ORDER != sc->sc_bus_endian)
                    317:                value = bswap32(value);
                    318:        bus_space_write_4(iot, ioh, index, value);
1.1       hannken   319: }
                    320:
1.46      reinoud   321: /*
                    322:  * The Virtio spec explicitly tells that reading and writing 8 bytes are not
                    323:  * considered atomic and no triggers may be connected to reading or writing
1.47      reinoud   324:  * it. We access it using two 32 bit writes. For good measure it is stated to
                    325:  * always write lsb first just in case of a hypervisor bug. See See virtio
                    326:  * spec 4.1.3.1.
1.46      reinoud   327:  */
1.1       hannken   328: void
1.43      reinoud   329: virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value)
1.1       hannken   330: {
1.45      reinoud   331:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    332:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
1.46      reinoud   333:        union {
                    334:                uint64_t u64;
1.47      reinoud   335:                uint32_t l[2];
1.46      reinoud   336:        } v;
                    337:
                    338:        if (BYTE_ORDER != sc->sc_struct_endian)
                    339:                value = bswap64(value);
                    340:
                    341:        v.u64 = value;
1.47      reinoud   342:        if (sc->sc_bus_endian != sc->sc_struct_endian) {
                    343:                v.l[0] = bswap32(v.l[0]);
                    344:                v.l[1] = bswap32(v.l[1]);
                    345:        }
                    346:
                    347:        if (sc->sc_struct_endian == LITTLE_ENDIAN) {
                    348:                bus_space_write_4(iot, ioh, index,     v.l[0]);
                    349:                bus_space_write_4(iot, ioh, index + 4, v.l[1]);
                    350:        } else {
                    351:                bus_space_write_4(iot, ioh, index + 4, v.l[1]);
                    352:                bus_space_write_4(iot, ioh, index,     v.l[0]);
                    353:        }
1.1       hannken   354: }
                    355:
1.43      reinoud   356: /*
                    357:  * In the older virtio spec, device config registers are host endian. On newer
1.45      reinoud   358:  * they are little endian. Some newer devices however explicitly specify their
1.55      andvar    359:  * register to always be little endian. These functions cater for these.
1.43      reinoud   360:  */
1.1       hannken   361: void
1.43      reinoud   362: virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value)
1.1       hannken   363: {
1.45      reinoud   364:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    365:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
                    366:
                    367:        if (sc->sc_bus_endian != LITTLE_ENDIAN)
                    368:                value = bswap16(value);
                    369:        bus_space_write_2(iot, ioh, index, value);
1.1       hannken   370: }
                    371:
                    372: void
1.43      reinoud   373: virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value)
                    374: {
1.45      reinoud   375:        bus_space_tag_t    iot = sc->sc_devcfg_iot;
                    376:        bus_space_handle_t ioh = sc->sc_devcfg_ioh;
                    377:
                    378:        if (sc->sc_bus_endian != LITTLE_ENDIAN)
                    379:                value = bswap32(value);
                    380:        bus_space_write_4(iot, ioh, index, value);
1.43      reinoud   381: }
                    382:
1.45      reinoud   383:
1.43      reinoud   384: /*
                    385:  * data structures endian helpers
                    386:  */
                    387: uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val)
1.1       hannken   388: {
1.43      reinoud   389:        KASSERT(sc);
1.45      reinoud   390:        return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val;
1.1       hannken   391: }
                    392:
1.43      reinoud   393: uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val)
                    394: {
                    395:        KASSERT(sc);
1.45      reinoud   396:        return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val;
1.43      reinoud   397: }
                    398:
                    399: uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val)
                    400: {
                    401:        KASSERT(sc);
1.45      reinoud   402:        return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val;
1.43      reinoud   403: }
                    404:
                    405:
1.1       hannken   406: /*
                    407:  * Interrupt handler.
                    408:  */
1.8       ozaki-r   409: static void
                    410: virtio_soft_intr(void *arg)
                    411: {
                    412:        struct virtio_softc *sc = arg;
                    413:
                    414:        KASSERT(sc->sc_intrhand != NULL);
                    415:
1.54      uwe       416:        (*sc->sc_intrhand)(sc);
1.8       ozaki-r   417: }
                    418:
1.1       hannken   419: /*
                    420:  * dmamap sync operations for a virtqueue.
                    421:  */
                    422: static inline void
                    423: vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
                    424: {
                    425:        /* availoffset == sizeof(vring_desc)*vq_num */
                    426:        bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
                    427:                        ops);
                    428: }
                    429:
                    430: static inline void
                    431: vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
                    432: {
1.43      reinoud   433:        uint16_t hdrlen = offsetof(struct vring_avail, ring);
                    434:        if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
                    435:                hdrlen += sizeof(uint16_t);
                    436:
1.1       hannken   437:        bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
                    438:                        vq->vq_availoffset,
1.43      reinoud   439:                        hdrlen + sc->sc_nvqs * sizeof(uint16_t),
1.1       hannken   440:                        ops);
                    441: }
                    442:
                    443: static inline void
                    444: vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
                    445: {
1.43      reinoud   446:        uint16_t hdrlen = offsetof(struct vring_used, ring);
                    447:        if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
                    448:                hdrlen += sizeof(uint16_t);
                    449:
1.1       hannken   450:        bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
                    451:                        vq->vq_usedoffset,
1.43      reinoud   452:                        hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem),
1.1       hannken   453:                        ops);
                    454: }
                    455:
                    456: static inline void
                    457: vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
                    458:                     int ops)
                    459: {
                    460:        int offset = vq->vq_indirectoffset
                    461:                      + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
                    462:
                    463:        bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
                    464:                        offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
                    465:                        ops);
                    466: }
                    467:
1.41      yamaguch  468: bool
                    469: virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq)
1.37      yamaguch  470: {
                    471:
                    472:        if (vq->vq_queued) {
                    473:                vq->vq_queued = 0;
                    474:                vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
                    475:        }
                    476:        vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
                    477:        membar_consumer();
                    478:
1.43      reinoud   479:        return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0;
1.37      yamaguch  480: }
                    481:
1.56    ! riastrad  482: /*
        !           483:  * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
        !           484:  * and calls (*vq_done)() if some entries are consumed.
        !           485:  *
        !           486:  * Can be used as sc_intrhand.
        !           487:  */
1.1       hannken   488: int
                    489: virtio_vq_intr(struct virtio_softc *sc)
                    490: {
                    491:        struct virtqueue *vq;
                    492:        int i, r = 0;
                    493:
                    494:        for (i = 0; i < sc->sc_nvqs; i++) {
                    495:                vq = &sc->sc_vqs[i];
1.41      yamaguch  496:                if (virtio_vq_is_enqueued(sc, vq) == 1) {
                    497:                        if (vq->vq_done)
1.54      uwe       498:                                r |= (*vq->vq_done)(vq);
1.41      yamaguch  499:                }
1.1       hannken   500:        }
                    501:
                    502:        return r;
                    503: }
                    504:
1.41      yamaguch  505: int
                    506: virtio_vq_intrhand(struct virtio_softc *sc)
1.37      yamaguch  507: {
1.41      yamaguch  508:        struct virtqueue *vq;
                    509:        int i, r = 0;
                    510:
                    511:        for (i = 0; i < sc->sc_nvqs; i++) {
                    512:                vq = &sc->sc_vqs[i];
1.54      uwe       513:                r |= (*vq->vq_intrhand)(vq->vq_intrhand_arg);
1.41      yamaguch  514:        }
1.37      yamaguch  515:
1.41      yamaguch  516:        return r;
1.37      yamaguch  517: }
                    518:
1.43      reinoud   519:
                    520: /*
                    521:  * Increase the event index in order to delay interrupts.
                    522:  */
                    523: int
                    524: virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
                    525:                uint16_t nslots)
                    526: {
                    527:        uint16_t        idx, nused;
                    528:
                    529:        idx = vq->vq_used_idx + nslots;
                    530:
                    531:        /* set the new event index: avail_ring->used_event = idx */
                    532:        *vq->vq_used_event = virtio_rw16(sc, idx);
                    533:        membar_producer();
                    534:
                    535:        vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
                    536:        vq->vq_queued++;
                    537:
                    538:        nused = (uint16_t)
                    539:                (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
                    540:        KASSERT(nused <= vq->vq_num);
                    541:
                    542:        return nslots < nused;
                    543: }
                    544:
                    545: /*
                    546:  * Postpone interrupt until 3/4 of the available descriptors have been
                    547:  * consumed.
                    548:  */
                    549: int
                    550: virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
                    551: {
                    552:        uint16_t        nslots;
                    553:
                    554:        nslots = (uint16_t)
                    555:                (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
                    556:
                    557:        return virtio_postpone_intr(sc, vq, nslots);
                    558: }
                    559:
                    560: /*
                    561:  * Postpone interrupt until all of the available descriptors have been
                    562:  * consumed.
                    563:  */
                    564: int
                    565: virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
                    566: {
                    567:        uint16_t        nslots;
                    568:
                    569:        nslots = (uint16_t)
                    570:                (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
                    571:
                    572:        return virtio_postpone_intr(sc, vq, nslots);
                    573: }
                    574:
1.1       hannken   575: /*
                    576:  * Start/stop vq interrupt.  No guarantee.
                    577:  */
                    578: void
                    579: virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
                    580: {
1.43      reinoud   581:        if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
                    582:                /*
                    583:                 * No way to disable the interrupt completely with
                    584:                 * RingEventIdx. Instead advance used_event by half the
                    585:                 * possible value. This won't happen soon and is far enough in
                    586:                 * the past to not trigger a spurios interrupt.
                    587:                 */
                    588:                *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
                    589:        } else {
                    590:                vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
                    591:        }
1.1       hannken   592:        vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
                    593:        vq->vq_queued++;
                    594: }
                    595:
1.43      reinoud   596: int
1.1       hannken   597: virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
                    598: {
1.43      reinoud   599:        if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
                    600:                /*
                    601:                 * If event index feature is negotiated, enabling interrupts
                    602:                 * is done through setting the latest consumed index in the
                    603:                 * used_event field
                    604:                 */
                    605:                *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
                    606:        } else {
                    607:                vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
                    608:        }
1.1       hannken   609:        vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
                    610:        vq->vq_queued++;
1.43      reinoud   611:
                    612:        return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx);
1.1       hannken   613: }
                    614:
                    615: /*
                    616:  * Initialize vq structure.
                    617:  */
                    618: static void
1.15      msaitoh   619: virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
                    620:     const bool reinit)
1.1       hannken   621: {
                    622:        int i, j;
                    623:        int vq_size = vq->vq_num;
                    624:
                    625:        memset(vq->vq_vaddr, 0, vq->vq_bytesize);
                    626:
                    627:        /* build the indirect descriptor chain */
                    628:        if (vq->vq_indirect != NULL) {
                    629:                struct vring_desc *vd;
                    630:
                    631:                for (i = 0; i < vq_size; i++) {
                    632:                        vd = vq->vq_indirect;
                    633:                        vd += vq->vq_maxnsegs * i;
1.23      martin    634:                        for (j = 0; j < vq->vq_maxnsegs-1; j++) {
1.43      reinoud   635:                                vd[j].next = virtio_rw16(sc, j + 1);
1.23      martin    636:                        }
1.1       hannken   637:                }
                    638:        }
                    639:
                    640:        /* free slot management */
                    641:        SIMPLEQ_INIT(&vq->vq_freelist);
                    642:        for (i = 0; i < vq_size; i++) {
                    643:                SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
                    644:                                    &vq->vq_entries[i], qe_list);
                    645:                vq->vq_entries[i].qe_index = i;
                    646:        }
                    647:        if (!reinit)
                    648:                mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
                    649:
                    650:        /* enqueue/dequeue status */
                    651:        vq->vq_avail_idx = 0;
                    652:        vq->vq_used_idx = 0;
                    653:        vq->vq_queued = 0;
                    654:        if (!reinit) {
                    655:                mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
                    656:                mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
                    657:        }
                    658:        vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
                    659:        vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
                    660:        vq->vq_queued++;
                    661: }
1.48      skrll     662:
1.1       hannken   663: /*
                    664:  * Allocate/free a vq.
                    665:  */
                    666: int
1.15      msaitoh   667: virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
                    668:     int maxsegsize, int maxnsegs, const char *name)
1.1       hannken   669: {
                    670:        int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
1.43      reinoud   671:        int rsegs, r, hdrlen;
1.1       hannken   672: #define VIRTQUEUE_ALIGN(n)     (((n)+(VIRTIO_PAGE_SIZE-1))&    \
                    673:                                 ~(VIRTIO_PAGE_SIZE-1))
                    674:
1.22      jdolecek  675:        /* Make sure callers allocate vqs in order */
                    676:        KASSERT(sc->sc_nvqs == index);
                    677:
1.1       hannken   678:        memset(vq, 0, sizeof(*vq));
                    679:
1.31      jakllsch  680:        vq_size = sc->sc_ops->read_queue_size(sc, index);
1.1       hannken   681:        if (vq_size == 0) {
                    682:                aprint_error_dev(sc->sc_dev,
                    683:                                 "virtqueue not exist, index %d for %s\n",
                    684:                                 index, name);
                    685:                goto err;
                    686:        }
1.43      reinoud   687:
                    688:        hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
                    689:
1.1       hannken   690:        /* allocsize1: descriptor table + avail ring + pad */
                    691:        allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
1.43      reinoud   692:                             + sizeof(uint16_t)*(hdrlen + vq_size));
1.1       hannken   693:        /* allocsize2: used ring + pad */
1.43      reinoud   694:        allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
                    695:                             + sizeof(struct vring_used_elem)*vq_size);
1.1       hannken   696:        /* allocsize3: indirect table */
                    697:        if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
                    698:                allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
                    699:        else
                    700:                allocsize3 = 0;
                    701:        allocsize = allocsize1 + allocsize2 + allocsize3;
                    702:
                    703:        /* alloc and map the memory */
                    704:        r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
1.49      skrll     705:                             &vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK);
1.1       hannken   706:        if (r != 0) {
                    707:                aprint_error_dev(sc->sc_dev,
                    708:                                 "virtqueue %d for %s allocation failed, "
                    709:                                 "error code %d\n", index, name, r);
                    710:                goto err;
                    711:        }
1.43      reinoud   712:        r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
1.49      skrll     713:                           &vq->vq_vaddr, BUS_DMA_WAITOK);
1.1       hannken   714:        if (r != 0) {
                    715:                aprint_error_dev(sc->sc_dev,
                    716:                                 "virtqueue %d for %s map failed, "
                    717:                                 "error code %d\n", index, name, r);
                    718:                goto err;
                    719:        }
                    720:        r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
1.49      skrll     721:                              BUS_DMA_WAITOK, &vq->vq_dmamap);
1.1       hannken   722:        if (r != 0) {
                    723:                aprint_error_dev(sc->sc_dev,
                    724:                                 "virtqueue %d for %s dmamap creation failed, "
                    725:                                 "error code %d\n", index, name, r);
                    726:                goto err;
                    727:        }
                    728:        r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
1.49      skrll     729:                            vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK);
1.1       hannken   730:        if (r != 0) {
                    731:                aprint_error_dev(sc->sc_dev,
                    732:                                 "virtqueue %d for %s dmamap load failed, "
                    733:                                 "error code %d\n", index, name, r);
                    734:                goto err;
                    735:        }
                    736:
                    737:        /* remember addresses and offsets for later use */
                    738:        vq->vq_owner = sc;
                    739:        vq->vq_num = vq_size;
                    740:        vq->vq_index = index;
                    741:        vq->vq_desc = vq->vq_vaddr;
                    742:        vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
                    743:        vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
1.43      reinoud   744:        vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail +
                    745:                 offsetof(struct vring_avail, ring[vq->vq_num]));
1.1       hannken   746:        vq->vq_usedoffset = allocsize1;
                    747:        vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
1.43      reinoud   748:        vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used +
                    749:                 offsetof(struct vring_used, ring[vq->vq_num]));
                    750:
1.1       hannken   751:        if (allocsize3 > 0) {
                    752:                vq->vq_indirectoffset = allocsize1 + allocsize2;
                    753:                vq->vq_indirect = (void*)(((char*)vq->vq_desc)
                    754:                                          + vq->vq_indirectoffset);
                    755:        }
                    756:        vq->vq_bytesize = allocsize;
                    757:        vq->vq_maxsegsize = maxsegsize;
                    758:        vq->vq_maxnsegs = maxnsegs;
                    759:
                    760:        /* free slot management */
                    761:        vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
1.38      chs       762:                                     KM_SLEEP);
1.1       hannken   763:        virtio_init_vq(sc, vq, false);
                    764:
1.43      reinoud   765:        /* set the vq address */
                    766:        sc->sc_ops->setup_queue(sc, index,
                    767:            vq->vq_dmamap->dm_segs[0].ds_addr);
                    768:
1.1       hannken   769:        aprint_verbose_dev(sc->sc_dev,
                    770:                           "allocated %u byte for virtqueue %d for %s, "
                    771:                           "size %d\n", allocsize, index, name, vq_size);
                    772:        if (allocsize3 > 0)
                    773:                aprint_verbose_dev(sc->sc_dev,
                    774:                                   "using %d byte (%d entries) "
                    775:                                   "indirect descriptors\n",
                    776:                                   allocsize3, maxnsegs * vq_size);
1.22      jdolecek  777:
                    778:        sc->sc_nvqs++;
                    779:
1.1       hannken   780:        return 0;
                    781:
                    782: err:
1.34      jakllsch  783:        sc->sc_ops->setup_queue(sc, index, 0);
1.1       hannken   784:        if (vq->vq_dmamap)
                    785:                bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
                    786:        if (vq->vq_vaddr)
                    787:                bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
                    788:        if (vq->vq_segs[0].ds_addr)
                    789:                bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
                    790:        memset(vq, 0, sizeof(*vq));
                    791:
                    792:        return -1;
                    793: }
                    794:
                    795: int
                    796: virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
                    797: {
                    798:        struct vq_entry *qe;
                    799:        int i = 0;
                    800:
                    801:        /* device must be already deactivated */
                    802:        /* confirm the vq is empty */
                    803:        SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
                    804:                i++;
                    805:        }
                    806:        if (i != vq->vq_num) {
                    807:                printf("%s: freeing non-empty vq, index %d\n",
                    808:                       device_xname(sc->sc_dev), vq->vq_index);
                    809:                return EBUSY;
                    810:        }
                    811:
                    812:        /* tell device that there's no virtqueue any longer */
1.31      jakllsch  813:        sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
1.1       hannken   814:
1.14      christos  815:        kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
1.1       hannken   816:        bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
                    817:        bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
                    818:        bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
                    819:        bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
                    820:        mutex_destroy(&vq->vq_freelist_lock);
                    821:        mutex_destroy(&vq->vq_uring_lock);
                    822:        mutex_destroy(&vq->vq_aring_lock);
                    823:        memset(vq, 0, sizeof(*vq));
                    824:
1.22      jdolecek  825:        sc->sc_nvqs--;
                    826:
1.1       hannken   827:        return 0;
                    828: }
                    829:
                    830: /*
                    831:  * Free descriptor management.
                    832:  */
                    833: static struct vq_entry *
                    834: vq_alloc_entry(struct virtqueue *vq)
                    835: {
                    836:        struct vq_entry *qe;
                    837:
                    838:        mutex_enter(&vq->vq_freelist_lock);
                    839:        if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
                    840:                mutex_exit(&vq->vq_freelist_lock);
                    841:                return NULL;
                    842:        }
                    843:        qe = SIMPLEQ_FIRST(&vq->vq_freelist);
                    844:        SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
                    845:        mutex_exit(&vq->vq_freelist_lock);
                    846:
                    847:        return qe;
                    848: }
                    849:
                    850: static void
                    851: vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
                    852: {
                    853:        mutex_enter(&vq->vq_freelist_lock);
                    854:        SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
                    855:        mutex_exit(&vq->vq_freelist_lock);
                    856:
                    857:        return;
                    858: }
                    859:
                    860: /*
                    861:  * Enqueue several dmamaps as a single request.
                    862:  */
                    863: /*
                    864:  * Typical usage:
                    865:  *  <queue size> number of followings are stored in arrays
                    866:  *  - command blocks (in dmamem) should be pre-allocated and mapped
                    867:  *  - dmamaps for command blocks should be pre-allocated and loaded
                    868:  *  - dmamaps for payload should be pre-allocated
                    869:  *      r = virtio_enqueue_prep(sc, vq, &slot);                // allocate a slot
                    870:  *     if (r)          // currently 0 or EAGAIN
                    871:  *       return r;
                    872:  *     r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
                    873:  *     if (r) {
                    874:  *       virtio_enqueue_abort(sc, vq, slot);
                    875:  *       return r;
                    876:  *     }
1.48      skrll     877:  *     r = virtio_enqueue_reserve(sc, vq, slot,
1.1       hannken   878:  *                                dmamap_payload[slot]->dm_nsegs+1);
                    879:  *                                                     // ^ +1 for command
                    880:  *     if (r) {        // currently 0 or EAGAIN
                    881:  *       bus_dmamap_unload(dmat, dmamap_payload[slot]);
                    882:  *       return r;                                     // do not call abort()
                    883:  *     }
                    884:  *     <setup and prepare commands>
                    885:  *     bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
                    886:  *     bus_dmamap_sync(dmat, dmamap_payload[slot],...);
                    887:  *     virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
                    888:  *     virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
                    889:  *     virtio_enqueue_commit(sc, vq, slot, true);
                    890:  */
                    891:
                    892: /*
                    893:  * enqueue_prep: allocate a slot number
                    894:  */
                    895: int
                    896: virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
                    897: {
                    898:        struct vq_entry *qe1;
                    899:
                    900:        KASSERT(slotp != NULL);
                    901:
                    902:        qe1 = vq_alloc_entry(vq);
                    903:        if (qe1 == NULL)
                    904:                return EAGAIN;
                    905:        /* next slot is not allocated yet */
                    906:        qe1->qe_next = -1;
                    907:        *slotp = qe1->qe_index;
                    908:
                    909:        return 0;
                    910: }
                    911:
                    912: /*
                    913:  * enqueue_reserve: allocate remaining slots and build the descriptor chain.
                    914:  */
                    915: int
                    916: virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
                    917:                       int slot, int nsegs)
                    918: {
                    919:        int indirect;
                    920:        struct vq_entry *qe1 = &vq->vq_entries[slot];
                    921:
                    922:        KASSERT(qe1->qe_next == -1);
                    923:        KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
                    924:
                    925:        if ((vq->vq_indirect != NULL) &&
                    926:            (nsegs >= MINSEG_INDIRECT) &&
                    927:            (nsegs <= vq->vq_maxnsegs))
                    928:                indirect = 1;
                    929:        else
                    930:                indirect = 0;
                    931:        qe1->qe_indirect = indirect;
                    932:
                    933:        if (indirect) {
                    934:                struct vring_desc *vd;
1.43      reinoud   935:                uint64_t addr;
1.1       hannken   936:                int i;
                    937:
                    938:                vd = &vq->vq_desc[qe1->qe_index];
1.43      reinoud   939:                addr = vq->vq_dmamap->dm_segs[0].ds_addr
1.1       hannken   940:                        + vq->vq_indirectoffset;
1.43      reinoud   941:                addr += sizeof(struct vring_desc)
1.1       hannken   942:                        * vq->vq_maxnsegs * qe1->qe_index;
1.43      reinoud   943:                vd->addr  = virtio_rw64(sc, addr);
                    944:                vd->len   = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
                    945:                vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
1.1       hannken   946:
                    947:                vd = vq->vq_indirect;
                    948:                vd += vq->vq_maxnsegs * qe1->qe_index;
                    949:                qe1->qe_desc_base = vd;
                    950:
                    951:                for (i = 0; i < nsegs-1; i++) {
1.43      reinoud   952:                        vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
1.1       hannken   953:                }
1.43      reinoud   954:                vd[i].flags  = virtio_rw16(sc, 0);
1.1       hannken   955:                qe1->qe_next = 0;
                    956:
                    957:                return 0;
                    958:        } else {
                    959:                struct vring_desc *vd;
                    960:                struct vq_entry *qe;
                    961:                int i, s;
                    962:
                    963:                vd = &vq->vq_desc[0];
                    964:                qe1->qe_desc_base = vd;
                    965:                qe1->qe_next = qe1->qe_index;
                    966:                s = slot;
                    967:                for (i = 0; i < nsegs - 1; i++) {
                    968:                        qe = vq_alloc_entry(vq);
                    969:                        if (qe == NULL) {
1.43      reinoud   970:                                vd[s].flags = virtio_rw16(sc, 0);
1.1       hannken   971:                                virtio_enqueue_abort(sc, vq, slot);
                    972:                                return EAGAIN;
                    973:                        }
1.43      reinoud   974:                        vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
                    975:                        vd[s].next  = virtio_rw16(sc, qe->qe_index);
1.1       hannken   976:                        s = qe->qe_index;
                    977:                }
1.43      reinoud   978:                vd[s].flags = virtio_rw16(sc, 0);
1.1       hannken   979:
                    980:                return 0;
                    981:        }
                    982: }
                    983:
                    984: /*
                    985:  * enqueue: enqueue a single dmamap.
                    986:  */
                    987: int
                    988: virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
                    989:               bus_dmamap_t dmamap, bool write)
                    990: {
                    991:        struct vq_entry *qe1 = &vq->vq_entries[slot];
                    992:        struct vring_desc *vd = qe1->qe_desc_base;
                    993:        int i;
                    994:        int s = qe1->qe_next;
                    995:
                    996:        KASSERT(s >= 0);
                    997:        KASSERT(dmamap->dm_nsegs > 0);
                    998:
                    999:        for (i = 0; i < dmamap->dm_nsegs; i++) {
1.43      reinoud  1000:                vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
                   1001:                vd[s].len  = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
1.1       hannken  1002:                if (!write)
1.43      reinoud  1003:                        vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
                   1004:                s = virtio_rw16(sc, vd[s].next);
1.1       hannken  1005:        }
                   1006:        qe1->qe_next = s;
                   1007:
                   1008:        return 0;
                   1009: }
                   1010:
                   1011: int
                   1012: virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
                   1013:                 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
                   1014:                 bool write)
                   1015: {
                   1016:        struct vq_entry *qe1 = &vq->vq_entries[slot];
                   1017:        struct vring_desc *vd = qe1->qe_desc_base;
                   1018:        int s = qe1->qe_next;
                   1019:
                   1020:        KASSERT(s >= 0);
                   1021:        KASSERT(dmamap->dm_nsegs == 1); /* XXX */
                   1022:        KASSERT((dmamap->dm_segs[0].ds_len > start) &&
                   1023:                (dmamap->dm_segs[0].ds_len >= start + len));
                   1024:
1.43      reinoud  1025:        vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
                   1026:        vd[s].len  = virtio_rw32(sc, len);
1.1       hannken  1027:        if (!write)
1.43      reinoud  1028:                vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
                   1029:        qe1->qe_next = virtio_rw16(sc, vd[s].next);
1.1       hannken  1030:
                   1031:        return 0;
                   1032: }
                   1033:
                   1034: /*
                   1035:  * enqueue_commit: add it to the aring.
                   1036:  */
                   1037: int
                   1038: virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
                   1039:                      bool notifynow)
                   1040: {
                   1041:        struct vq_entry *qe1;
                   1042:
                   1043:        if (slot < 0) {
                   1044:                mutex_enter(&vq->vq_aring_lock);
                   1045:                goto notify;
                   1046:        }
                   1047:        vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
                   1048:        qe1 = &vq->vq_entries[slot];
                   1049:        if (qe1->qe_indirect)
                   1050:                vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
                   1051:        mutex_enter(&vq->vq_aring_lock);
1.43      reinoud  1052:        vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] =
                   1053:                virtio_rw16(sc, slot);
1.1       hannken  1054:
                   1055: notify:
                   1056:        if (notifynow) {
1.43      reinoud  1057:                uint16_t o, n, t;
                   1058:                uint16_t flags;
                   1059:                o = virtio_rw16(sc, vq->vq_avail->idx);
                   1060:                n = vq->vq_avail_idx;
                   1061:
                   1062:                /* publish avail idx */
1.1       hannken  1063:                membar_producer();
1.43      reinoud  1064:                vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
1.1       hannken  1065:                vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
                   1066:                vq->vq_queued++;
1.43      reinoud  1067:
                   1068:                membar_consumer();
                   1069:                vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
                   1070:                if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
                   1071:                        t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
                   1072:                        if ((uint16_t) (n - t) < (uint16_t) (n - o))
                   1073:                                sc->sc_ops->kick(sc, vq->vq_index);
                   1074:                } else {
                   1075:                        flags = virtio_rw16(sc, vq->vq_used->flags);
                   1076:                        if (!(flags & VRING_USED_F_NO_NOTIFY))
                   1077:                                sc->sc_ops->kick(sc, vq->vq_index);
                   1078:                }
1.1       hannken  1079:                vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1.43      reinoud  1080:                vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
1.1       hannken  1081:        }
                   1082:        mutex_exit(&vq->vq_aring_lock);
                   1083:
                   1084:        return 0;
                   1085: }
                   1086:
                   1087: /*
                   1088:  * enqueue_abort: rollback.
                   1089:  */
                   1090: int
                   1091: virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
                   1092: {
                   1093:        struct vq_entry *qe = &vq->vq_entries[slot];
                   1094:        struct vring_desc *vd;
                   1095:        int s;
                   1096:
                   1097:        if (qe->qe_next < 0) {
                   1098:                vq_free_entry(vq, qe);
                   1099:                return 0;
                   1100:        }
                   1101:
                   1102:        s = slot;
                   1103:        vd = &vq->vq_desc[0];
1.43      reinoud  1104:        while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
                   1105:                s = virtio_rw16(sc, vd[s].next);
1.1       hannken  1106:                vq_free_entry(vq, qe);
                   1107:                qe = &vq->vq_entries[s];
                   1108:        }
                   1109:        vq_free_entry(vq, qe);
                   1110:        return 0;
                   1111: }
                   1112:
                   1113: /*
                   1114:  * Dequeue a request.
                   1115:  */
                   1116: /*
                   1117:  * dequeue: dequeue a request from uring; dmamap_sync for uring is
                   1118:  *         already done in the interrupt handler.
                   1119:  */
                   1120: int
                   1121: virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
                   1122:               int *slotp, int *lenp)
                   1123: {
                   1124:        uint16_t slot, usedidx;
                   1125:        struct vq_entry *qe;
                   1126:
1.43      reinoud  1127:        if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
1.1       hannken  1128:                return ENOENT;
                   1129:        mutex_enter(&vq->vq_uring_lock);
                   1130:        usedidx = vq->vq_used_idx++;
                   1131:        mutex_exit(&vq->vq_uring_lock);
                   1132:        usedidx %= vq->vq_num;
1.43      reinoud  1133:        slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
1.1       hannken  1134:        qe = &vq->vq_entries[slot];
                   1135:
                   1136:        if (qe->qe_indirect)
                   1137:                vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
                   1138:
                   1139:        if (slotp)
                   1140:                *slotp = slot;
                   1141:        if (lenp)
1.43      reinoud  1142:                *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
1.1       hannken  1143:
                   1144:        return 0;
                   1145: }
                   1146:
                   1147: /*
                   1148:  * dequeue_commit: complete dequeue; the slot is recycled for future use.
                   1149:  *                 if you forget to call this the slot will be leaked.
                   1150:  */
                   1151: int
                   1152: virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
                   1153: {
                   1154:        struct vq_entry *qe = &vq->vq_entries[slot];
                   1155:        struct vring_desc *vd = &vq->vq_desc[0];
                   1156:        int s = slot;
                   1157:
1.43      reinoud  1158:        while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
                   1159:                s = virtio_rw16(sc, vd[s].next);
1.1       hannken  1160:                vq_free_entry(vq, qe);
                   1161:                qe = &vq->vq_entries[s];
                   1162:        }
                   1163:        vq_free_entry(vq, qe);
                   1164:
                   1165:        return 0;
                   1166: }
1.18      pgoyette 1167:
1.22      jdolecek 1168: /*
                   1169:  * Attach a child, fill all the members.
                   1170:  */
                   1171: void
1.48      skrll    1172: virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1.22      jdolecek 1173:                    struct virtqueue *vqs,
                   1174:                    virtio_callback config_change,
                   1175:                    virtio_callback intr_hand,
                   1176:                    int req_flags, int req_features, const char *feat_bits)
                   1177: {
1.43      reinoud  1178:        char buf[1024];
1.22      jdolecek 1179:
                   1180:        sc->sc_child = child;
                   1181:        sc->sc_ipl = ipl;
                   1182:        sc->sc_vqs = vqs;
                   1183:        sc->sc_config_change = config_change;
                   1184:        sc->sc_intrhand = intr_hand;
                   1185:        sc->sc_flags = req_flags;
                   1186:
1.43      reinoud  1187:        virtio_negotiate_features(sc, req_features);
                   1188:        snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
                   1189:        aprint_normal(": features: %s\n", buf);
1.22      jdolecek 1190:        aprint_naive("\n");
                   1191: }
                   1192:
1.37      yamaguch 1193: void
                   1194: virtio_child_attach_set_vqs(struct virtio_softc *sc,
                   1195:     struct virtqueue *vqs, int nvq_pairs)
                   1196: {
1.39      yamaguch 1197:
                   1198:        KASSERT(nvq_pairs == 1 ||
1.43      reinoud  1199:            (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0);
1.37      yamaguch 1200:        if (nvq_pairs > 1)
                   1201:                sc->sc_child_mq = true;
                   1202:
                   1203:        sc->sc_vqs = vqs;
                   1204: }
                   1205:
1.22      jdolecek 1206: int
                   1207: virtio_child_attach_finish(struct virtio_softc *sc)
                   1208: {
                   1209:        int r;
                   1210:
1.44      reinoud  1211:        sc->sc_finished_called = true;
1.50      yamaguch 1212:        r = sc->sc_ops->alloc_interrupts(sc);
                   1213:        if (r != 0) {
                   1214:                aprint_error_dev(sc->sc_dev, "failed to allocate interrupts\n");
                   1215:                goto fail;
                   1216:        }
                   1217:
1.51      yamaguch 1218:        r = sc->sc_ops->setup_interrupts(sc, 0);
1.22      jdolecek 1219:        if (r != 0) {
                   1220:                aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1.52      yamaguch 1221:                goto fail;
1.31      jakllsch 1222:        }
                   1223:
                   1224:        KASSERT(sc->sc_soft_ih == NULL);
1.43      reinoud  1225:        if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
1.48      skrll    1226:                u_int flags = SOFTINT_NET;
1.43      reinoud  1227:                if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1.31      jakllsch 1228:                        flags |= SOFTINT_MPSAFE;
                   1229:
                   1230:                sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
                   1231:                if (sc->sc_soft_ih == NULL) {
                   1232:                        sc->sc_ops->free_interrupts(sc);
                   1233:                        aprint_error_dev(sc->sc_dev,
                   1234:                            "failed to establish soft interrupt\n");
                   1235:                        goto fail;
                   1236:                }
1.22      jdolecek 1237:        }
                   1238:
                   1239:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1.31      jakllsch 1240:        return 0;
1.22      jdolecek 1241:
1.31      jakllsch 1242: fail:
1.37      yamaguch 1243:        if (sc->sc_soft_ih) {
                   1244:                softint_disestablish(sc->sc_soft_ih);
                   1245:                sc->sc_soft_ih = NULL;
                   1246:        }
                   1247:
1.52      yamaguch 1248:        sc->sc_ops->free_interrupts(sc);
                   1249:
1.31      jakllsch 1250:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
                   1251:        return 1;
1.22      jdolecek 1252: }
                   1253:
                   1254: void
                   1255: virtio_child_detach(struct virtio_softc *sc)
                   1256: {
                   1257:        sc->sc_child = NULL;
                   1258:        sc->sc_vqs = NULL;
                   1259:
                   1260:        virtio_device_reset(sc);
                   1261:
1.31      jakllsch 1262:        sc->sc_ops->free_interrupts(sc);
                   1263:
                   1264:        if (sc->sc_soft_ih) {
                   1265:                softint_disestablish(sc->sc_soft_ih);
                   1266:                sc->sc_soft_ih = NULL;
                   1267:        }
1.22      jdolecek 1268: }
                   1269:
                   1270: void
                   1271: virtio_child_attach_failed(struct virtio_softc *sc)
                   1272: {
                   1273:        virtio_child_detach(sc);
                   1274:
                   1275:        virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
                   1276:
                   1277:        sc->sc_child = VIRTIO_CHILD_FAILED;
                   1278: }
                   1279:
                   1280: bus_dma_tag_t
                   1281: virtio_dmat(struct virtio_softc *sc)
                   1282: {
                   1283:        return sc->sc_dmat;
                   1284: }
                   1285:
                   1286: device_t
                   1287: virtio_child(struct virtio_softc *sc)
                   1288: {
                   1289:        return sc->sc_child;
                   1290: }
                   1291:
                   1292: int
                   1293: virtio_intrhand(struct virtio_softc *sc)
                   1294: {
1.54      uwe      1295:        return (*sc->sc_intrhand)(sc);
1.22      jdolecek 1296: }
                   1297:
1.43      reinoud  1298: uint64_t
1.22      jdolecek 1299: virtio_features(struct virtio_softc *sc)
                   1300: {
1.43      reinoud  1301:        return sc->sc_active_features;
1.22      jdolecek 1302: }
                   1303:
1.35      jakllsch 1304: int
1.43      reinoud  1305: virtio_attach_failed(struct virtio_softc *sc)
1.35      jakllsch 1306: {
1.43      reinoud  1307:        device_t self = sc->sc_dev;
1.35      jakllsch 1308:
1.43      reinoud  1309:        /* no error if its not connected, but its failed */
                   1310:        if (sc->sc_childdevid == 0)
                   1311:                return 1;
1.36      jmcneill 1312:
1.43      reinoud  1313:        if (sc->sc_child == NULL) {
                   1314:                aprint_error_dev(self,
                   1315:                        "no matching child driver; not configured\n");
                   1316:                return 1;
                   1317:        }
1.35      jakllsch 1318:
1.43      reinoud  1319:        if (sc->sc_child == VIRTIO_CHILD_FAILED) {
                   1320:                aprint_error_dev(self, "virtio configuration failed\n");
                   1321:                return 1;
                   1322:        }
1.44      reinoud  1323:
                   1324:        /* sanity check */
                   1325:        if (!sc->sc_finished_called) {
                   1326:                aprint_error_dev(self, "virtio internal error, child driver "
                   1327:                        "signaled OK but didn't initialize interrupts\n");
                   1328:                return 1;
                   1329:        }
                   1330:
1.43      reinoud  1331:        return 0;
                   1332: }
                   1333:
                   1334: void
                   1335: virtio_print_device_type(device_t self, int id, int revision)
                   1336: {
                   1337:        aprint_normal_dev(self, "%s device (rev. 0x%02x)\n",
                   1338:                  (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"),
                   1339:                  revision);
1.35      jakllsch 1340: }
                   1341:
1.43      reinoud  1342:
1.32      jakllsch 1343: MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1.48      skrll    1344:
1.18      pgoyette 1345: #ifdef _MODULE
                   1346: #include "ioconf.c"
                   1347: #endif
1.48      skrll    1348:
1.18      pgoyette 1349: static int
                   1350: virtio_modcmd(modcmd_t cmd, void *opaque)
                   1351: {
                   1352:        int error = 0;
1.48      skrll    1353:
1.18      pgoyette 1354: #ifdef _MODULE
                   1355:        switch (cmd) {
                   1356:        case MODULE_CMD_INIT:
1.48      skrll    1357:                error = config_init_component(cfdriver_ioconf_virtio,
                   1358:                    cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1.18      pgoyette 1359:                break;
                   1360:        case MODULE_CMD_FINI:
1.48      skrll    1361:                error = config_fini_component(cfdriver_ioconf_virtio,
1.18      pgoyette 1362:                    cfattach_ioconf_virtio, cfdata_ioconf_virtio);
                   1363:                break;
                   1364:        default:
                   1365:                error = ENOTTY;
                   1366:                break;
                   1367:        }
                   1368: #endif
1.48      skrll    1369:
                   1370:        return error;
1.18      pgoyette 1371: }

CVSweb <webmaster@jp.NetBSD.org>