[BACK]Return to ix_txrx.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / dev / pci / ixgbe

Annotation of src/sys/dev/pci/ixgbe/ix_txrx.c, Revision 1.55

1.55    ! msaitoh     1: /* $NetBSD: ix_txrx.c,v 1.54 2019/07/04 08:56:35 msaitoh Exp $ */
1.28      msaitoh     2:
1.1       msaitoh     3: /******************************************************************************
                      4:
1.28      msaitoh     5:   Copyright (c) 2001-2017, Intel Corporation
1.1       msaitoh     6:   All rights reserved.
1.28      msaitoh     7:
                      8:   Redistribution and use in source and binary forms, with or without
1.1       msaitoh     9:   modification, are permitted provided that the following conditions are met:
1.28      msaitoh    10:
                     11:    1. Redistributions of source code must retain the above copyright notice,
1.1       msaitoh    12:       this list of conditions and the following disclaimer.
1.28      msaitoh    13:
                     14:    2. Redistributions in binary form must reproduce the above copyright
                     15:       notice, this list of conditions and the following disclaimer in the
1.1       msaitoh    16:       documentation and/or other materials provided with the distribution.
1.28      msaitoh    17:
                     18:    3. Neither the name of the Intel Corporation nor the names of its
                     19:       contributors may be used to endorse or promote products derived from
1.1       msaitoh    20:       this software without specific prior written permission.
1.28      msaitoh    21:
1.1       msaitoh    22:   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1.28      msaitoh    23:   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     24:   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     25:   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
                     26:   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     27:   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     28:   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     29:   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     30:   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1.1       msaitoh    31:   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     32:   POSSIBILITY OF SUCH DAMAGE.
                     33:
                     34: ******************************************************************************/
1.39      msaitoh    35: /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
1.28      msaitoh    36:
1.1       msaitoh    37: /*
                     38:  * Copyright (c) 2011 The NetBSD Foundation, Inc.
                     39:  * All rights reserved.
                     40:  *
                     41:  * This code is derived from software contributed to The NetBSD Foundation
                     42:  * by Coyote Point Systems, Inc.
                     43:  *
                     44:  * Redistribution and use in source and binary forms, with or without
                     45:  * modification, are permitted provided that the following conditions
                     46:  * are met:
                     47:  * 1. Redistributions of source code must retain the above copyright
                     48:  *    notice, this list of conditions and the following disclaimer.
                     49:  * 2. Redistributions in binary form must reproduce the above copyright
                     50:  *    notice, this list of conditions and the following disclaimer in the
                     51:  *    documentation and/or other materials provided with the distribution.
                     52:  *
                     53:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     54:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     55:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     56:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     57:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     58:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     59:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     60:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     61:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     62:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     63:  * POSSIBILITY OF SUCH DAMAGE.
                     64:  */
                     65:
1.8       msaitoh    66: #include "opt_inet.h"
                     67: #include "opt_inet6.h"
                     68:
1.1       msaitoh    69: #include "ixgbe.h"
                     70:
                     71: /*
1.28      msaitoh    72:  * HW RSC control:
                     73:  *  this feature only works with
                     74:  *  IPv4, and only on 82599 and later.
                     75:  *  Also this will cause IP forwarding to
                     76:  *  fail and that can't be controlled by
                     77:  *  the stack as LRO can. For all these
                     78:  *  reasons I've deemed it best to leave
                     79:  *  this off and not bother with a tuneable
                     80:  *  interface, this would need to be compiled
                     81:  *  to enable.
                     82:  */
1.1       msaitoh    83: static bool ixgbe_rsc_enable = FALSE;
                     84:
1.3       msaitoh    85: /*
1.28      msaitoh    86:  * For Flow Director: this is the
                     87:  * number of TX packets we sample
                     88:  * for the filter pool, this means
                     89:  * every 20th packet will be probed.
                     90:  *
                     91:  * This feature can be disabled by
                     92:  * setting this to 0.
                     93:  */
1.3       msaitoh    94: static int atr_sample_rate = 20;
                     95:
1.28      msaitoh    96: /************************************************************************
1.3       msaitoh    97:  *  Local Function prototypes
1.28      msaitoh    98:  ************************************************************************/
                     99: static void          ixgbe_setup_transmit_ring(struct tx_ring *);
                    100: static void          ixgbe_free_transmit_buffers(struct tx_ring *);
                    101: static int           ixgbe_setup_receive_ring(struct rx_ring *);
                    102: static void          ixgbe_free_receive_buffers(struct rx_ring *);
                    103: static void          ixgbe_rx_checksum(u32, struct mbuf *, u32,
                    104:                                        struct ixgbe_hw_stats *);
                    105: static void          ixgbe_refresh_mbufs(struct rx_ring *, int);
1.38      knakahar  106: static void          ixgbe_drain(struct ifnet *, struct tx_ring *);
1.28      msaitoh   107: static int           ixgbe_xmit(struct tx_ring *, struct mbuf *);
                    108: static int           ixgbe_tx_ctx_setup(struct tx_ring *,
                    109:                                         struct mbuf *, u32 *, u32 *);
                    110: static int           ixgbe_tso_setup(struct tx_ring *,
                    111:                                      struct mbuf *, u32 *, u32 *);
1.1       msaitoh   112: static __inline void ixgbe_rx_discard(struct rx_ring *, int);
                    113: static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
1.28      msaitoh   114:                                     struct mbuf *, u32);
                    115: static int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
                    116:                                       struct ixgbe_dma_alloc *, int);
                    117: static void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
1.1       msaitoh   118:
                    119: static void    ixgbe_setup_hw_rsc(struct rx_ring *);
                    120:
1.28      msaitoh   121: /************************************************************************
                    122:  * ixgbe_legacy_start_locked - Transmit entry point
1.1       msaitoh   123:  *
1.28      msaitoh   124:  *   Called by the stack to initiate a transmit.
                    125:  *   The driver will remain in this routine as long as there are
                    126:  *   packets to transmit and transmit resources are available.
                    127:  *   In case resources are not available, the stack is notified
                    128:  *   and the packet is requeued.
                    129:  ************************************************************************/
                    130: int
                    131: ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
1.1       msaitoh   132: {
1.45      msaitoh   133:        int rc;
1.1       msaitoh   134:        struct mbuf    *m_head;
                    135:        struct adapter *adapter = txr->adapter;
                    136:
                    137:        IXGBE_TX_LOCK_ASSERT(txr);
                    138:
1.52      msaitoh   139:        if (adapter->link_active != LINK_STATE_UP) {
1.38      knakahar  140:                /*
                    141:                 * discard all packets buffered in IFQ to avoid
                    142:                 * sending old packets at next link up timing.
                    143:                 */
                    144:                ixgbe_drain(ifp, txr);
                    145:                return (ENETDOWN);
                    146:        }
1.1       msaitoh   147:        if ((ifp->if_flags & IFF_RUNNING) == 0)
1.28      msaitoh   148:                return (ENETDOWN);
1.47      msaitoh   149:        if (txr->txr_no_space)
                    150:                return (ENETDOWN);
                    151:
1.1       msaitoh   152:        while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
                    153:                if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
                    154:                        break;
                    155:
                    156:                IFQ_POLL(&ifp->if_snd, m_head);
                    157:                if (m_head == NULL)
                    158:                        break;
                    159:
                    160:                if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
                    161:                        break;
                    162:                }
                    163:                IFQ_DEQUEUE(&ifp->if_snd, m_head);
                    164:                if (rc != 0) {
                    165:                        m_freem(m_head);
                    166:                        continue;
                    167:                }
                    168:
                    169:                /* Send a copy of the frame to the BPF listener */
1.48      msaitoh   170:                bpf_mtap(ifp, m_head, BPF_D_OUT);
1.1       msaitoh   171:        }
1.44      msaitoh   172:
1.28      msaitoh   173:        return IXGBE_SUCCESS;
                    174: } /* ixgbe_legacy_start_locked */
                    175:
                    176: /************************************************************************
                    177:  * ixgbe_legacy_start
                    178:  *
                    179:  *   Called by the stack, this always uses the first tx ring,
                    180:  *   and should not be used with multiqueue tx enabled.
                    181:  ************************************************************************/
1.1       msaitoh   182: void
1.28      msaitoh   183: ixgbe_legacy_start(struct ifnet *ifp)
1.1       msaitoh   184: {
                    185:        struct adapter *adapter = ifp->if_softc;
1.28      msaitoh   186:        struct tx_ring *txr = adapter->tx_rings;
1.1       msaitoh   187:
                    188:        if (ifp->if_flags & IFF_RUNNING) {
                    189:                IXGBE_TX_LOCK(txr);
1.28      msaitoh   190:                ixgbe_legacy_start_locked(ifp, txr);
1.1       msaitoh   191:                IXGBE_TX_UNLOCK(txr);
                    192:        }
1.28      msaitoh   193: } /* ixgbe_legacy_start */
1.1       msaitoh   194:
1.28      msaitoh   195: /************************************************************************
                    196:  * ixgbe_mq_start - Multiqueue Transmit Entry Point
                    197:  *
                    198:  *   (if_transmit function)
                    199:  ************************************************************************/
1.1       msaitoh   200: int
                    201: ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
                    202: {
                    203:        struct adapter  *adapter = ifp->if_softc;
                    204:        struct tx_ring  *txr;
1.50      msaitoh   205:        int             i;
1.28      msaitoh   206: #ifdef RSS
1.1       msaitoh   207:        uint32_t bucket_id;
                    208: #endif
                    209:
                    210:        /*
                    211:         * When doing RSS, map it to the same outbound queue
                    212:         * as the incoming flow would be mapped to.
                    213:         *
                    214:         * If everything is setup correctly, it should be the
                    215:         * same bucket that the current CPU we're on is.
                    216:         */
1.28      msaitoh   217: #ifdef RSS
1.1       msaitoh   218:        if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
1.28      msaitoh   219:                if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
                    220:                    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
                    221:                    &bucket_id) == 0)) {
1.1       msaitoh   222:                        i = bucket_id % adapter->num_queues;
1.8       msaitoh   223: #ifdef IXGBE_DEBUG
                    224:                        if (bucket_id > adapter->num_queues)
1.28      msaitoh   225:                                if_printf(ifp,
                    226:                                    "bucket_id (%d) > num_queues (%d)\n",
                    227:                                    bucket_id, adapter->num_queues);
1.8       msaitoh   228: #endif
                    229:                } else
1.1       msaitoh   230:                        i = m->m_pkthdr.flowid % adapter->num_queues;
1.3       msaitoh   231:        } else
1.28      msaitoh   232: #endif /* 0 */
1.51      knakahar  233:                i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues;
1.3       msaitoh   234:
                    235:        /* Check for a hung queue and pick alternative */
1.54      msaitoh   236:        if (((1ULL << i) & adapter->active_queues) == 0)
1.18      msaitoh   237:                i = ffs64(adapter->active_queues);
1.1       msaitoh   238:
                    239:        txr = &adapter->tx_rings[i];
                    240:
1.50      msaitoh   241:        if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1.18      msaitoh   242:                m_freem(m);
                    243:                txr->pcq_drops.ev_count++;
1.50      msaitoh   244:                return ENOBUFS;
1.18      msaitoh   245:        }
1.1       msaitoh   246:        if (IXGBE_TX_TRYLOCK(txr)) {
                    247:                ixgbe_mq_start_locked(ifp, txr);
                    248:                IXGBE_TX_UNLOCK(txr);
1.34      knakahar  249:        } else {
                    250:                if (adapter->txrx_use_workqueue) {
1.44      msaitoh   251:                        u_int *enqueued;
                    252:
1.34      knakahar  253:                        /*
                    254:                         * This function itself is not called in interrupt
                    255:                         * context, however it can be called in fast softint
                    256:                         * context right after receiving forwarding packets.
                    257:                         * So, it is required to protect workqueue from twice
                    258:                         * enqueuing when the machine uses both spontaneous
                    259:                         * packets and forwarding packets.
                    260:                         */
1.44      msaitoh   261:                        enqueued = percpu_getref(adapter->txr_wq_enqueued);
1.34      knakahar  262:                        if (*enqueued == 0) {
                    263:                                *enqueued = 1;
                    264:                                percpu_putref(adapter->txr_wq_enqueued);
1.44      msaitoh   265:                                workqueue_enqueue(adapter->txr_wq,
                    266:                                    &txr->wq_cookie, curcpu());
1.34      knakahar  267:                        } else
                    268:                                percpu_putref(adapter->txr_wq_enqueued);
                    269:                } else
                    270:                        softint_schedule(txr->txr_si);
                    271:        }
1.1       msaitoh   272:
                    273:        return (0);
1.28      msaitoh   274: } /* ixgbe_mq_start */
1.1       msaitoh   275:
1.28      msaitoh   276: /************************************************************************
                    277:  * ixgbe_mq_start_locked
                    278:  ************************************************************************/
1.1       msaitoh   279: int
                    280: ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
                    281: {
1.28      msaitoh   282:        struct mbuf    *next;
                    283:        int            enqueued = 0, err = 0;
1.1       msaitoh   284:
1.52      msaitoh   285:        if (txr->adapter->link_active != LINK_STATE_UP) {
1.38      knakahar  286:                /*
                    287:                 * discard all packets buffered in txr_interq to avoid
                    288:                 * sending old packets at next link up timing.
                    289:                 */
                    290:                ixgbe_drain(ifp, txr);
                    291:                return (ENETDOWN);
                    292:        }
1.28      msaitoh   293:        if ((ifp->if_flags & IFF_RUNNING) == 0)
                    294:                return (ENETDOWN);
1.47      msaitoh   295:        if (txr->txr_no_space)
                    296:                return (ENETDOWN);
1.1       msaitoh   297:
                    298:        /* Process the queue */
1.18      msaitoh   299:        while ((next = pcq_get(txr->txr_interq)) != NULL) {
                    300:                if ((err = ixgbe_xmit(txr, next)) != 0) {
                    301:                        m_freem(next);
                    302:                        /* All errors are counted in ixgbe_xmit() */
1.1       msaitoh   303:                        break;
                    304:                }
                    305:                enqueued++;
1.3       msaitoh   306: #if __FreeBSD_version >= 1100036
1.4       msaitoh   307:                /*
                    308:                 * Since we're looking at the tx ring, we can check
                    309:                 * to see if we're a VF by examing our tail register
                    310:                 * address.
                    311:                 */
1.28      msaitoh   312:                if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
                    313:                    (next->m_flags & M_MCAST))
1.3       msaitoh   314:                        if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
                    315: #endif
1.1       msaitoh   316:                /* Send a copy of the frame to the BPF listener */
1.48      msaitoh   317:                bpf_mtap(ifp, next, BPF_D_OUT);
1.1       msaitoh   318:                if ((ifp->if_flags & IFF_RUNNING) == 0)
                    319:                        break;
                    320:        }
                    321:
1.28      msaitoh   322:        if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
1.1       msaitoh   323:                ixgbe_txeof(txr);
                    324:
                    325:        return (err);
1.28      msaitoh   326: } /* ixgbe_mq_start_locked */
1.1       msaitoh   327:
1.28      msaitoh   328: /************************************************************************
                    329:  * ixgbe_deferred_mq_start
                    330:  *
1.34      knakahar  331:  *   Called from a softint and workqueue (indirectly) to drain queued
                    332:  *   transmit packets.
1.28      msaitoh   333:  ************************************************************************/
1.1       msaitoh   334: void
1.18      msaitoh   335: ixgbe_deferred_mq_start(void *arg)
1.1       msaitoh   336: {
                    337:        struct tx_ring *txr = arg;
                    338:        struct adapter *adapter = txr->adapter;
1.28      msaitoh   339:        struct ifnet   *ifp = adapter->ifp;
1.1       msaitoh   340:
                    341:        IXGBE_TX_LOCK(txr);
1.18      msaitoh   342:        if (pcq_peek(txr->txr_interq) != NULL)
1.1       msaitoh   343:                ixgbe_mq_start_locked(ifp, txr);
                    344:        IXGBE_TX_UNLOCK(txr);
1.28      msaitoh   345: } /* ixgbe_deferred_mq_start */
1.3       msaitoh   346:
1.28      msaitoh   347: /************************************************************************
1.34      knakahar  348:  * ixgbe_deferred_mq_start_work
                    349:  *
                    350:  *   Called from a workqueue to drain queued transmit packets.
                    351:  ************************************************************************/
                    352: void
                    353: ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
                    354: {
                    355:        struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
                    356:        struct adapter *adapter = txr->adapter;
                    357:        u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
                    358:        *enqueued = 0;
                    359:        percpu_putref(adapter->txr_wq_enqueued);
                    360:
                    361:        ixgbe_deferred_mq_start(txr);
                    362: } /* ixgbe_deferred_mq_start */
                    363:
1.38      knakahar  364: /************************************************************************
                    365:  * ixgbe_drain_all
                    366:  ************************************************************************/
                    367: void
                    368: ixgbe_drain_all(struct adapter *adapter)
                    369: {
                    370:        struct ifnet *ifp = adapter->ifp;
                    371:        struct ix_queue *que = adapter->queues;
                    372:
                    373:        for (int i = 0; i < adapter->num_queues; i++, que++) {
                    374:                struct tx_ring  *txr = que->txr;
                    375:
                    376:                IXGBE_TX_LOCK(txr);
                    377:                ixgbe_drain(ifp, txr);
                    378:                IXGBE_TX_UNLOCK(txr);
                    379:        }
                    380: }
1.34      knakahar  381:
                    382: /************************************************************************
1.28      msaitoh   383:  * ixgbe_xmit
1.1       msaitoh   384:  *
1.28      msaitoh   385:  *   Maps the mbufs to tx descriptors, allowing the
                    386:  *   TX engine to transmit the packets.
1.1       msaitoh   387:  *
1.28      msaitoh   388:  *   Return 0 on success, positive on failure
                    389:  ************************************************************************/
1.1       msaitoh   390: static int
                    391: ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
                    392: {
1.28      msaitoh   393:        struct adapter          *adapter = txr->adapter;
                    394:        struct ixgbe_tx_buf     *txbuf;
1.1       msaitoh   395:        union ixgbe_adv_tx_desc *txd = NULL;
1.28      msaitoh   396:        struct ifnet            *ifp = adapter->ifp;
                    397:        int                     i, j, error;
                    398:        int                     first;
                    399:        u32                     olinfo_status = 0, cmd_type_len;
                    400:        bool                    remap = TRUE;
                    401:        bus_dmamap_t            map;
1.1       msaitoh   402:
                    403:        /* Basic descriptor defines */
1.28      msaitoh   404:        cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1.1       msaitoh   405:            IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
                    406:
1.29      knakahar  407:        if (vlan_has_tag(m_head))
1.28      msaitoh   408:                cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1.1       msaitoh   409:
1.28      msaitoh   410:        /*
                    411:         * Important to capture the first descriptor
                    412:         * used because it will contain the index of
                    413:         * the one we tell the hardware to report back
                    414:         */
                    415:        first = txr->next_avail_desc;
1.1       msaitoh   416:        txbuf = &txr->tx_buffers[first];
                    417:        map = txbuf->map;
                    418:
                    419:        /*
                    420:         * Map the packet for DMA.
                    421:         */
1.22      msaitoh   422: retry:
1.28      msaitoh   423:        error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
                    424:            BUS_DMA_NOWAIT);
1.1       msaitoh   425:
                    426:        if (__predict_false(error)) {
1.22      msaitoh   427:                struct mbuf *m;
1.1       msaitoh   428:
                    429:                switch (error) {
                    430:                case EAGAIN:
1.35      msaitoh   431:                        txr->q_eagain_tx_dma_setup++;
1.1       msaitoh   432:                        return EAGAIN;
                    433:                case ENOMEM:
1.35      msaitoh   434:                        txr->q_enomem_tx_dma_setup++;
1.1       msaitoh   435:                        return EAGAIN;
                    436:                case EFBIG:
1.22      msaitoh   437:                        /* Try it again? - one try */
                    438:                        if (remap == TRUE) {
                    439:                                remap = FALSE;
                    440:                                /*
                    441:                                 * XXX: m_defrag will choke on
                    442:                                 * non-MCLBYTES-sized clusters
                    443:                                 */
1.35      msaitoh   444:                                txr->q_efbig_tx_dma_setup++;
1.22      msaitoh   445:                                m = m_defrag(m_head, M_NOWAIT);
                    446:                                if (m == NULL) {
1.35      msaitoh   447:                                        txr->q_mbuf_defrag_failed++;
1.22      msaitoh   448:                                        return ENOBUFS;
                    449:                                }
                    450:                                m_head = m;
                    451:                                goto retry;
                    452:                        } else {
1.35      msaitoh   453:                                txr->q_efbig2_tx_dma_setup++;
1.22      msaitoh   454:                                return error;
                    455:                        }
1.1       msaitoh   456:                case EINVAL:
1.35      msaitoh   457:                        txr->q_einval_tx_dma_setup++;
1.1       msaitoh   458:                        return error;
                    459:                default:
1.35      msaitoh   460:                        txr->q_other_tx_dma_setup++;
1.1       msaitoh   461:                        return error;
                    462:                }
                    463:        }
                    464:
                    465:        /* Make certain there are enough descriptors */
1.10      msaitoh   466:        if (txr->tx_avail < (map->dm_nsegs + 2)) {
1.47      msaitoh   467:                txr->txr_no_space = true;
1.1       msaitoh   468:                txr->no_desc_avail.ev_count++;
                    469:                ixgbe_dmamap_unload(txr->txtag, txbuf->map);
                    470:                return EAGAIN;
                    471:        }
                    472:
                    473:        /*
1.4       msaitoh   474:         * Set up the appropriate offload context
                    475:         * this will consume the first descriptor
                    476:         */
1.1       msaitoh   477:        error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
                    478:        if (__predict_false(error)) {
                    479:                return (error);
                    480:        }
                    481:
                    482:        /* Do the flow director magic */
1.28      msaitoh   483:        if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
                    484:            (txr->atr_sample) && (!adapter->fdir_reinit)) {
1.1       msaitoh   485:                ++txr->atr_count;
                    486:                if (txr->atr_count >= atr_sample_rate) {
                    487:                        ixgbe_atr(txr, m_head);
                    488:                        txr->atr_count = 0;
                    489:                }
                    490:        }
                    491:
1.8       msaitoh   492:        olinfo_status |= IXGBE_ADVTXD_CC;
1.1       msaitoh   493:        i = txr->next_avail_desc;
                    494:        for (j = 0; j < map->dm_nsegs; j++) {
                    495:                bus_size_t seglen;
                    496:                bus_addr_t segaddr;
                    497:
                    498:                txbuf = &txr->tx_buffers[i];
                    499:                txd = &txr->tx_base[i];
                    500:                seglen = map->dm_segs[j].ds_len;
                    501:                segaddr = htole64(map->dm_segs[j].ds_addr);
                    502:
                    503:                txd->read.buffer_addr = segaddr;
1.40      msaitoh   504:                txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
1.1       msaitoh   505:                txd->read.olinfo_status = htole32(olinfo_status);
                    506:
                    507:                if (++i == txr->num_desc)
                    508:                        i = 0;
                    509:        }
                    510:
1.28      msaitoh   511:        txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1.1       msaitoh   512:        txr->tx_avail -= map->dm_nsegs;
                    513:        txr->next_avail_desc = i;
                    514:
                    515:        txbuf->m_head = m_head;
                    516:        /*
1.4       msaitoh   517:         * Here we swap the map so the last descriptor,
                    518:         * which gets the completion interrupt has the
                    519:         * real map, and the first descriptor gets the
                    520:         * unused map from this descriptor.
                    521:         */
1.1       msaitoh   522:        txr->tx_buffers[first].map = txbuf->map;
                    523:        txbuf->map = map;
                    524:        bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
                    525:            BUS_DMASYNC_PREWRITE);
                    526:
1.28      msaitoh   527:        /* Set the EOP descriptor that will be marked done */
                    528:        txbuf = &txr->tx_buffers[first];
1.1       msaitoh   529:        txbuf->eop = txd;
                    530:
1.28      msaitoh   531:        ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1.1       msaitoh   532:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                    533:        /*
                    534:         * Advance the Transmit Descriptor Tail (Tdt), this tells the
                    535:         * hardware that this frame is available to transmit.
                    536:         */
                    537:        ++txr->total_packets.ev_count;
1.3       msaitoh   538:        IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
                    539:
1.23      msaitoh   540:        /*
                    541:         * XXXX NOMPSAFE: ifp->if_data should be percpu.
                    542:         */
                    543:        ifp->if_obytes += m_head->m_pkthdr.len;
                    544:        if (m_head->m_flags & M_MCAST)
                    545:                ifp->if_omcasts++;
                    546:
1.45      msaitoh   547:        /* Mark queue as having work */
                    548:        if (txr->busy == 0)
                    549:                txr->busy = 1;
                    550:
1.28      msaitoh   551:        return (0);
                    552: } /* ixgbe_xmit */
1.1       msaitoh   553:
1.38      knakahar  554: /************************************************************************
                    555:  * ixgbe_drain
                    556:  ************************************************************************/
                    557: static void
                    558: ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
                    559: {
                    560:        struct mbuf *m;
                    561:
                    562:        IXGBE_TX_LOCK_ASSERT(txr);
                    563:
                    564:        if (txr->me == 0) {
                    565:                while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
                    566:                        IFQ_DEQUEUE(&ifp->if_snd, m);
                    567:                        m_freem(m);
                    568:                        IF_DROP(&ifp->if_snd);
                    569:                }
                    570:        }
                    571:
                    572:        while ((m = pcq_get(txr->txr_interq)) != NULL) {
                    573:                m_freem(m);
                    574:                txr->pcq_drops.ev_count++;
                    575:        }
                    576: }
1.16      msaitoh   577:
1.28      msaitoh   578: /************************************************************************
                    579:  * ixgbe_allocate_transmit_buffers
1.1       msaitoh   580:  *
1.28      msaitoh   581:  *   Allocate memory for tx_buffer structures. The tx_buffer stores all
                    582:  *   the information needed to transmit a packet on the wire. This is
                    583:  *   called only once at attach, setup is done every reset.
                    584:  ************************************************************************/
                    585: static int
1.1       msaitoh   586: ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
                    587: {
1.28      msaitoh   588:        struct adapter      *adapter = txr->adapter;
                    589:        device_t            dev = adapter->dev;
1.1       msaitoh   590:        struct ixgbe_tx_buf *txbuf;
1.28      msaitoh   591:        int                 error, i;
1.1       msaitoh   592:
                    593:        /*
                    594:         * Setup DMA descriptor areas.
                    595:         */
1.28      msaitoh   596:        error = ixgbe_dma_tag_create(
                    597:                 /*      parent */ adapter->osdep.dmat,
                    598:                 /*   alignment */ 1,
                    599:                 /*      bounds */ 0,
                    600:                 /*     maxsize */ IXGBE_TSO_SIZE,
                    601:                 /*   nsegments */ adapter->num_segs,
                    602:                 /*  maxsegsize */ PAGE_SIZE,
                    603:                 /*       flags */ 0,
                    604:                                   &txr->txtag);
                    605:        if (error != 0) {
1.1       msaitoh   606:                aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
                    607:                goto fail;
                    608:        }
                    609:
1.28      msaitoh   610:        txr->tx_buffers =
1.1       msaitoh   611:            (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
1.28      msaitoh   612:            adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
                    613:        if (txr->tx_buffers == NULL) {
1.1       msaitoh   614:                aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
                    615:                error = ENOMEM;
                    616:                goto fail;
                    617:        }
                    618:
1.28      msaitoh   619:        /* Create the descriptor buffer dma maps */
1.1       msaitoh   620:        txbuf = txr->tx_buffers;
                    621:        for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
                    622:                error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
                    623:                if (error != 0) {
                    624:                        aprint_error_dev(dev,
                    625:                            "Unable to create TX DMA map (%d)\n", error);
                    626:                        goto fail;
                    627:                }
                    628:        }
                    629:
                    630:        return 0;
                    631: fail:
                    632:        /* We free all, it handles case where we are in the middle */
1.15      msaitoh   633: #if 0 /* XXX was FreeBSD */
1.1       msaitoh   634:        ixgbe_free_transmit_structures(adapter);
1.15      msaitoh   635: #else
                    636:        ixgbe_free_transmit_buffers(txr);
                    637: #endif
1.1       msaitoh   638:        return (error);
1.28      msaitoh   639: } /* ixgbe_allocate_transmit_buffers */
1.1       msaitoh   640:
1.28      msaitoh   641: /************************************************************************
                    642:  * ixgbe_setup_transmit_ring - Initialize a transmit ring.
                    643:  ************************************************************************/
1.1       msaitoh   644: static void
                    645: ixgbe_setup_transmit_ring(struct tx_ring *txr)
                    646: {
1.28      msaitoh   647:        struct adapter        *adapter = txr->adapter;
                    648:        struct ixgbe_tx_buf   *txbuf;
1.1       msaitoh   649: #ifdef DEV_NETMAP
                    650:        struct netmap_adapter *na = NA(adapter->ifp);
1.28      msaitoh   651:        struct netmap_slot    *slot;
1.1       msaitoh   652: #endif /* DEV_NETMAP */
                    653:
                    654:        /* Clear the old ring contents */
                    655:        IXGBE_TX_LOCK(txr);
1.28      msaitoh   656:
1.1       msaitoh   657: #ifdef DEV_NETMAP
1.28      msaitoh   658:        if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
                    659:                /*
                    660:                 * (under lock): if in netmap mode, do some consistency
                    661:                 * checks and set slot to entry 0 of the netmap ring.
                    662:                 */
                    663:                slot = netmap_reset(na, NR_TX, txr->me, 0);
                    664:        }
1.1       msaitoh   665: #endif /* DEV_NETMAP */
1.28      msaitoh   666:
1.1       msaitoh   667:        bzero((void *)txr->tx_base,
1.28      msaitoh   668:            (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
1.1       msaitoh   669:        /* Reset indices */
                    670:        txr->next_avail_desc = 0;
                    671:        txr->next_to_clean = 0;
                    672:
                    673:        /* Free any existing tx buffers. */
1.28      msaitoh   674:        txbuf = txr->tx_buffers;
1.5       msaitoh   675:        for (int i = 0; i < txr->num_desc; i++, txbuf++) {
1.1       msaitoh   676:                if (txbuf->m_head != NULL) {
                    677:                        bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
                    678:                            0, txbuf->m_head->m_pkthdr.len,
                    679:                            BUS_DMASYNC_POSTWRITE);
                    680:                        ixgbe_dmamap_unload(txr->txtag, txbuf->map);
                    681:                        m_freem(txbuf->m_head);
                    682:                        txbuf->m_head = NULL;
                    683:                }
1.28      msaitoh   684:
1.1       msaitoh   685: #ifdef DEV_NETMAP
                    686:                /*
                    687:                 * In netmap mode, set the map for the packet buffer.
                    688:                 * NOTE: Some drivers (not this one) also need to set
                    689:                 * the physical buffer address in the NIC ring.
                    690:                 * Slots in the netmap ring (indexed by "si") are
                    691:                 * kring->nkr_hwofs positions "ahead" wrt the
                    692:                 * corresponding slot in the NIC ring. In some drivers
                    693:                 * (not here) nkr_hwofs can be negative. Function
                    694:                 * netmap_idx_n2k() handles wraparounds properly.
                    695:                 */
1.28      msaitoh   696:                if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1.53      msaitoh   697:                        int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
1.5       msaitoh   698:                        netmap_load_map(na, txr->txtag,
                    699:                            txbuf->map, NMB(na, slot + si));
1.1       msaitoh   700:                }
                    701: #endif /* DEV_NETMAP */
1.28      msaitoh   702:
1.1       msaitoh   703:                /* Clear the EOP descriptor pointer */
                    704:                txbuf->eop = NULL;
1.28      msaitoh   705:        }
1.1       msaitoh   706:
                    707:        /* Set the rate at which we sample packets */
1.28      msaitoh   708:        if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1.1       msaitoh   709:                txr->atr_sample = atr_sample_rate;
                    710:
                    711:        /* Set number of descriptors available */
                    712:        txr->tx_avail = adapter->num_tx_desc;
                    713:
                    714:        ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
                    715:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                    716:        IXGBE_TX_UNLOCK(txr);
1.28      msaitoh   717: } /* ixgbe_setup_transmit_ring */
1.1       msaitoh   718:
1.28      msaitoh   719: /************************************************************************
                    720:  * ixgbe_setup_transmit_structures - Initialize all transmit rings.
                    721:  ************************************************************************/
1.1       msaitoh   722: int
                    723: ixgbe_setup_transmit_structures(struct adapter *adapter)
                    724: {
                    725:        struct tx_ring *txr = adapter->tx_rings;
                    726:
                    727:        for (int i = 0; i < adapter->num_queues; i++, txr++)
                    728:                ixgbe_setup_transmit_ring(txr);
                    729:
                    730:        return (0);
1.28      msaitoh   731: } /* ixgbe_setup_transmit_structures */
1.1       msaitoh   732:
1.28      msaitoh   733: /************************************************************************
                    734:  * ixgbe_free_transmit_structures - Free all transmit rings.
                    735:  ************************************************************************/
1.1       msaitoh   736: void
                    737: ixgbe_free_transmit_structures(struct adapter *adapter)
                    738: {
                    739:        struct tx_ring *txr = adapter->tx_rings;
                    740:
                    741:        for (int i = 0; i < adapter->num_queues; i++, txr++) {
                    742:                ixgbe_free_transmit_buffers(txr);
                    743:                ixgbe_dma_free(adapter, &txr->txdma);
                    744:                IXGBE_TX_LOCK_DESTROY(txr);
                    745:        }
                    746:        free(adapter->tx_rings, M_DEVBUF);
1.28      msaitoh   747: } /* ixgbe_free_transmit_structures */
1.1       msaitoh   748:
1.28      msaitoh   749: /************************************************************************
                    750:  * ixgbe_free_transmit_buffers
1.1       msaitoh   751:  *
1.28      msaitoh   752:  *   Free transmit ring related data structures.
                    753:  ************************************************************************/
1.1       msaitoh   754: static void
                    755: ixgbe_free_transmit_buffers(struct tx_ring *txr)
                    756: {
1.28      msaitoh   757:        struct adapter      *adapter = txr->adapter;
1.1       msaitoh   758:        struct ixgbe_tx_buf *tx_buffer;
1.28      msaitoh   759:        int                 i;
1.1       msaitoh   760:
1.14      msaitoh   761:        INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
1.1       msaitoh   762:
                    763:        if (txr->tx_buffers == NULL)
                    764:                return;
                    765:
                    766:        tx_buffer = txr->tx_buffers;
                    767:        for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
                    768:                if (tx_buffer->m_head != NULL) {
                    769:                        bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
                    770:                            0, tx_buffer->m_head->m_pkthdr.len,
                    771:                            BUS_DMASYNC_POSTWRITE);
                    772:                        ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
                    773:                        m_freem(tx_buffer->m_head);
                    774:                        tx_buffer->m_head = NULL;
                    775:                        if (tx_buffer->map != NULL) {
                    776:                                ixgbe_dmamap_destroy(txr->txtag,
                    777:                                    tx_buffer->map);
                    778:                                tx_buffer->map = NULL;
                    779:                        }
                    780:                } else if (tx_buffer->map != NULL) {
                    781:                        ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
                    782:                        ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
                    783:                        tx_buffer->map = NULL;
                    784:                }
                    785:        }
1.18      msaitoh   786:        if (txr->txr_interq != NULL) {
                    787:                struct mbuf *m;
                    788:
                    789:                while ((m = pcq_get(txr->txr_interq)) != NULL)
                    790:                        m_freem(m);
                    791:                pcq_destroy(txr->txr_interq);
                    792:        }
1.1       msaitoh   793:        if (txr->tx_buffers != NULL) {
                    794:                free(txr->tx_buffers, M_DEVBUF);
                    795:                txr->tx_buffers = NULL;
                    796:        }
                    797:        if (txr->txtag != NULL) {
                    798:                ixgbe_dma_tag_destroy(txr->txtag);
                    799:                txr->txtag = NULL;
                    800:        }
1.28      msaitoh   801: } /* ixgbe_free_transmit_buffers */
1.1       msaitoh   802:
1.28      msaitoh   803: /************************************************************************
                    804:  * ixgbe_tx_ctx_setup
1.1       msaitoh   805:  *
1.28      msaitoh   806:  *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
                    807:  ************************************************************************/
1.1       msaitoh   808: static int
                    809: ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
                    810:     u32 *cmd_type_len, u32 *olinfo_status)
                    811: {
1.28      msaitoh   812:        struct adapter                   *adapter = txr->adapter;
1.1       msaitoh   813:        struct ixgbe_adv_tx_context_desc *TXD;
1.28      msaitoh   814:        struct ether_vlan_header         *eh;
1.8       msaitoh   815: #ifdef INET
1.28      msaitoh   816:        struct ip                        *ip;
1.8       msaitoh   817: #endif
                    818: #ifdef INET6
1.28      msaitoh   819:        struct ip6_hdr                   *ip6;
1.8       msaitoh   820: #endif
1.28      msaitoh   821:        int                              ehdrlen, ip_hlen = 0;
                    822:        int                              offload = TRUE;
                    823:        int                              ctxd = txr->next_avail_desc;
                    824:        u32                              vlan_macip_lens = 0;
                    825:        u32                              type_tucmd_mlhl = 0;
                    826:        u16                              vtag = 0;
                    827:        u16                              etype;
                    828:        u8                               ipproto = 0;
                    829:        char                             *l3d;
1.8       msaitoh   830:
1.1       msaitoh   831:
                    832:        /* First check if TSO is to be used */
1.28      msaitoh   833:        if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
1.17      msaitoh   834:                int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
                    835:
1.21      msaitoh   836:                if (rv != 0)
1.17      msaitoh   837:                        ++adapter->tso_err.ev_count;
1.21      msaitoh   838:                return rv;
1.17      msaitoh   839:        }
1.1       msaitoh   840:
                    841:        if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
                    842:                offload = FALSE;
                    843:
                    844:        /* Indicate the whole packet as payload when not doing TSO */
1.28      msaitoh   845:        *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
1.1       msaitoh   846:
                    847:        /* Now ready a context descriptor */
1.28      msaitoh   848:        TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1.1       msaitoh   849:
                    850:        /*
1.28      msaitoh   851:         * In advanced descriptors the vlan tag must
                    852:         * be placed into the context descriptor. Hence
                    853:         * we need to make one even if not doing offloads.
                    854:         */
1.29      knakahar  855:        if (vlan_has_tag(mp)) {
                    856:                vtag = htole16(vlan_get_tag(mp));
1.1       msaitoh   857:                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1.28      msaitoh   858:        } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
                    859:                   (offload == FALSE))
1.4       msaitoh   860:                return (0);
1.1       msaitoh   861:
                    862:        /*
                    863:         * Determine where frame payload starts.
                    864:         * Jump over vlan headers if already present,
                    865:         * helpful for QinQ too.
                    866:         */
                    867:        KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
                    868:        eh = mtod(mp, struct ether_vlan_header *);
                    869:        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
                    870:                KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
                    871:                etype = ntohs(eh->evl_proto);
                    872:                ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
                    873:        } else {
                    874:                etype = ntohs(eh->evl_encap_proto);
                    875:                ehdrlen = ETHER_HDR_LEN;
                    876:        }
                    877:
                    878:        /* Set the ether header length */
                    879:        vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
                    880:
1.3       msaitoh   881:        if (offload == FALSE)
                    882:                goto no_offloads;
                    883:
1.8       msaitoh   884:        /*
1.28      msaitoh   885:         * If the first mbuf only includes the ethernet header,
                    886:         * jump to the next one
                    887:         * XXX: This assumes the stack splits mbufs containing headers
                    888:         *      on header boundaries
1.8       msaitoh   889:         * XXX: And assumes the entire IP header is contained in one mbuf
                    890:         */
                    891:        if (mp->m_len == ehdrlen && mp->m_next)
                    892:                l3d = mtod(mp->m_next, char *);
                    893:        else
                    894:                l3d = mtod(mp, char *) + ehdrlen;
                    895:
1.1       msaitoh   896:        switch (etype) {
1.9       msaitoh   897: #ifdef INET
1.1       msaitoh   898:        case ETHERTYPE_IP:
1.8       msaitoh   899:                ip = (struct ip *)(l3d);
                    900:                ip_hlen = ip->ip_hl << 2;
                    901:                ipproto = ip->ip_p;
                    902:                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1.1       msaitoh   903:                KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
1.8       msaitoh   904:                    ip->ip_sum == 0);
1.1       msaitoh   905:                break;
1.9       msaitoh   906: #endif
                    907: #ifdef INET6
1.1       msaitoh   908:        case ETHERTYPE_IPV6:
1.8       msaitoh   909:                ip6 = (struct ip6_hdr *)(l3d);
                    910:                ip_hlen = sizeof(struct ip6_hdr);
                    911:                ipproto = ip6->ip6_nxt;
1.1       msaitoh   912:                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
                    913:                break;
1.9       msaitoh   914: #endif
1.1       msaitoh   915:        default:
1.11      msaitoh   916:                offload = false;
1.1       msaitoh   917:                break;
                    918:        }
                    919:
                    920:        if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
                    921:                *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
                    922:
                    923:        vlan_macip_lens |= ip_hlen;
                    924:
1.8       msaitoh   925:        /* No support for offloads for non-L4 next headers */
                    926:        switch (ipproto) {
1.36      msaitoh   927:        case IPPROTO_TCP:
                    928:                if (mp->m_pkthdr.csum_flags &
                    929:                    (M_CSUM_TCPv4 | M_CSUM_TCPv6))
                    930:                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                    931:                else
                    932:                        offload = false;
                    933:                break;
                    934:        case IPPROTO_UDP:
                    935:                if (mp->m_pkthdr.csum_flags &
                    936:                    (M_CSUM_UDPv4 | M_CSUM_UDPv6))
                    937:                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
                    938:                else
1.11      msaitoh   939:                        offload = false;
1.36      msaitoh   940:                break;
                    941:        default:
                    942:                offload = false;
                    943:                break;
1.8       msaitoh   944:        }
                    945:
                    946:        if (offload) /* Insert L4 checksum into data descriptors */
1.1       msaitoh   947:                *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
                    948:
1.3       msaitoh   949: no_offloads:
                    950:        type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
                    951:
1.1       msaitoh   952:        /* Now copy bits into descriptor */
                    953:        TXD->vlan_macip_lens = htole32(vlan_macip_lens);
                    954:        TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
                    955:        TXD->seqnum_seed = htole32(0);
                    956:        TXD->mss_l4len_idx = htole32(0);
                    957:
                    958:        /* We've consumed the first desc, adjust counters */
                    959:        if (++ctxd == txr->num_desc)
                    960:                ctxd = 0;
                    961:        txr->next_avail_desc = ctxd;
                    962:        --txr->tx_avail;
                    963:
1.28      msaitoh   964:        return (0);
                    965: } /* ixgbe_tx_ctx_setup */
1.1       msaitoh   966:
1.28      msaitoh   967: /************************************************************************
                    968:  * ixgbe_tso_setup
1.1       msaitoh   969:  *
1.28      msaitoh   970:  *   Setup work for hardware segmentation offload (TSO) on
                    971:  *   adapters using advanced tx descriptors
                    972:  ************************************************************************/
1.1       msaitoh   973: static int
1.28      msaitoh   974: ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
                    975:     u32 *olinfo_status)
1.1       msaitoh   976: {
                    977:        struct ixgbe_adv_tx_context_desc *TXD;
1.28      msaitoh   978:        struct ether_vlan_header         *eh;
1.1       msaitoh   979: #ifdef INET6
1.28      msaitoh   980:        struct ip6_hdr                   *ip6;
1.1       msaitoh   981: #endif
                    982: #ifdef INET
1.28      msaitoh   983:        struct ip                        *ip;
1.1       msaitoh   984: #endif
1.28      msaitoh   985:        struct tcphdr                    *th;
                    986:        int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
                    987:        u32                              vlan_macip_lens = 0;
                    988:        u32                              type_tucmd_mlhl = 0;
                    989:        u32                              mss_l4len_idx = 0, paylen;
                    990:        u16                              vtag = 0, eh_type;
1.1       msaitoh   991:
                    992:        /*
                    993:         * Determine where frame payload starts.
                    994:         * Jump over vlan headers if already present
                    995:         */
                    996:        eh = mtod(mp, struct ether_vlan_header *);
                    997:        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
                    998:                ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
                    999:                eh_type = eh->evl_proto;
                   1000:        } else {
                   1001:                ehdrlen = ETHER_HDR_LEN;
                   1002:                eh_type = eh->evl_encap_proto;
                   1003:        }
                   1004:
                   1005:        switch (ntohs(eh_type)) {
                   1006: #ifdef INET
                   1007:        case ETHERTYPE_IP:
                   1008:                ip = (struct ip *)(mp->m_data + ehdrlen);
                   1009:                if (ip->ip_p != IPPROTO_TCP)
                   1010:                        return (ENXIO);
                   1011:                ip->ip_sum = 0;
                   1012:                ip_hlen = ip->ip_hl << 2;
                   1013:                th = (struct tcphdr *)((char *)ip + ip_hlen);
                   1014:                th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
                   1015:                    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
                   1016:                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                   1017:                /* Tell transmit desc to also do IPv4 checksum. */
                   1018:                *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
                   1019:                break;
                   1020: #endif
1.28      msaitoh  1021: #ifdef INET6
                   1022:        case ETHERTYPE_IPV6:
                   1023:                ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
                   1024:                /* XXX-BZ For now we do not pretend to support ext. hdrs. */
                   1025:                if (ip6->ip6_nxt != IPPROTO_TCP)
                   1026:                        return (ENXIO);
                   1027:                ip_hlen = sizeof(struct ip6_hdr);
                   1028:                ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
                   1029:                th = (struct tcphdr *)((char *)ip6 + ip_hlen);
                   1030:                th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
                   1031:                    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
                   1032:                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
                   1033:                break;
                   1034: #endif
1.1       msaitoh  1035:        default:
                   1036:                panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
                   1037:                    __func__, ntohs(eh_type));
                   1038:                break;
                   1039:        }
                   1040:
                   1041:        ctxd = txr->next_avail_desc;
1.28      msaitoh  1042:        TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1.1       msaitoh  1043:
                   1044:        tcp_hlen = th->th_off << 2;
                   1045:
                   1046:        /* This is used in the transmit desc in encap */
                   1047:        paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
                   1048:
                   1049:        /* VLAN MACLEN IPLEN */
1.29      knakahar 1050:        if (vlan_has_tag(mp)) {
                   1051:                vtag = htole16(vlan_get_tag(mp));
1.28      msaitoh  1052:                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1.1       msaitoh  1053:        }
                   1054:
                   1055:        vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
                   1056:        vlan_macip_lens |= ip_hlen;
                   1057:        TXD->vlan_macip_lens = htole32(vlan_macip_lens);
                   1058:
                   1059:        /* ADV DTYPE TUCMD */
                   1060:        type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
                   1061:        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                   1062:        TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
                   1063:
                   1064:        /* MSS L4LEN IDX */
                   1065:        mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
                   1066:        mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
                   1067:        TXD->mss_l4len_idx = htole32(mss_l4len_idx);
                   1068:
                   1069:        TXD->seqnum_seed = htole32(0);
                   1070:
                   1071:        if (++ctxd == txr->num_desc)
                   1072:                ctxd = 0;
                   1073:
                   1074:        txr->tx_avail--;
                   1075:        txr->next_avail_desc = ctxd;
                   1076:        *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
                   1077:        *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
                   1078:        *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
                   1079:        ++txr->tso_tx.ev_count;
1.28      msaitoh  1080:
1.1       msaitoh  1081:        return (0);
1.28      msaitoh  1082: } /* ixgbe_tso_setup */
1.1       msaitoh  1083:
1.3       msaitoh  1084:
1.28      msaitoh  1085: /************************************************************************
                   1086:  * ixgbe_txeof
1.1       msaitoh  1087:  *
1.28      msaitoh  1088:  *   Examine each tx_buffer in the used queue. If the hardware is done
                   1089:  *   processing the packet then free associated resources. The
                   1090:  *   tx_buffer is put back on the free queue.
                   1091:  ************************************************************************/
1.32      msaitoh  1092: bool
1.1       msaitoh  1093: ixgbe_txeof(struct tx_ring *txr)
                   1094: {
                   1095:        struct adapter          *adapter = txr->adapter;
                   1096:        struct ifnet            *ifp = adapter->ifp;
1.28      msaitoh  1097:        struct ixgbe_tx_buf     *buf;
                   1098:        union ixgbe_adv_tx_desc *txd;
1.1       msaitoh  1099:        u32                     work, processed = 0;
1.7       msaitoh  1100:        u32                     limit = adapter->tx_process_limit;
1.1       msaitoh  1101:
                   1102:        KASSERT(mutex_owned(&txr->tx_mtx));
                   1103:
                   1104: #ifdef DEV_NETMAP
1.28      msaitoh  1105:        if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
                   1106:            (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
                   1107:                struct netmap_adapter *na = NA(adapter->ifp);
1.53      msaitoh  1108:                struct netmap_kring *kring = na->tx_rings[txr->me];
1.1       msaitoh  1109:                txd = txr->tx_base;
                   1110:                bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
                   1111:                    BUS_DMASYNC_POSTREAD);
                   1112:                /*
                   1113:                 * In netmap mode, all the work is done in the context
                   1114:                 * of the client thread. Interrupt handlers only wake up
                   1115:                 * clients, which may be sleeping on individual rings
                   1116:                 * or on a global resource for all rings.
                   1117:                 * To implement tx interrupt mitigation, we wake up the client
                   1118:                 * thread roughly every half ring, even if the NIC interrupts
                   1119:                 * more frequently. This is implemented as follows:
                   1120:                 * - ixgbe_txsync() sets kring->nr_kflags with the index of
                   1121:                 *   the slot that should wake up the thread (nkr_num_slots
                   1122:                 *   means the user thread should not be woken up);
                   1123:                 * - the driver ignores tx interrupts unless netmap_mitigate=0
                   1124:                 *   or the slot has the DD bit set.
                   1125:                 */
1.53      msaitoh  1126:                if (kring->nr_kflags < kring->nkr_num_slots &&
                   1127:                    txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) {
1.1       msaitoh  1128:                        netmap_tx_irq(ifp, txr->me);
                   1129:                }
1.32      msaitoh  1130:                return false;
1.1       msaitoh  1131:        }
                   1132: #endif /* DEV_NETMAP */
                   1133:
                   1134:        if (txr->tx_avail == txr->num_desc) {
1.45      msaitoh  1135:                txr->busy = 0;
1.32      msaitoh  1136:                return false;
1.1       msaitoh  1137:        }
                   1138:
                   1139:        /* Get work starting point */
                   1140:        work = txr->next_to_clean;
                   1141:        buf = &txr->tx_buffers[work];
                   1142:        txd = &txr->tx_base[work];
                   1143:        work -= txr->num_desc; /* The distance to ring end */
1.28      msaitoh  1144:        ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1.1       msaitoh  1145:            BUS_DMASYNC_POSTREAD);
1.8       msaitoh  1146:
1.1       msaitoh  1147:        do {
1.8       msaitoh  1148:                union ixgbe_adv_tx_desc *eop = buf->eop;
1.1       msaitoh  1149:                if (eop == NULL) /* No work */
                   1150:                        break;
                   1151:
                   1152:                if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
                   1153:                        break;  /* I/O not complete */
                   1154:
                   1155:                if (buf->m_head) {
1.28      msaitoh  1156:                        txr->bytes += buf->m_head->m_pkthdr.len;
                   1157:                        bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1.1       msaitoh  1158:                            0, buf->m_head->m_pkthdr.len,
                   1159:                            BUS_DMASYNC_POSTWRITE);
1.28      msaitoh  1160:                        ixgbe_dmamap_unload(txr->txtag, buf->map);
1.1       msaitoh  1161:                        m_freem(buf->m_head);
                   1162:                        buf->m_head = NULL;
                   1163:                }
                   1164:                buf->eop = NULL;
1.47      msaitoh  1165:                txr->txr_no_space = false;
1.1       msaitoh  1166:                ++txr->tx_avail;
                   1167:
                   1168:                /* We clean the range if multi segment */
                   1169:                while (txd != eop) {
                   1170:                        ++txd;
                   1171:                        ++buf;
                   1172:                        ++work;
                   1173:                        /* wrap the ring? */
                   1174:                        if (__predict_false(!work)) {
                   1175:                                work -= txr->num_desc;
                   1176:                                buf = txr->tx_buffers;
                   1177:                                txd = txr->tx_base;
                   1178:                        }
                   1179:                        if (buf->m_head) {
                   1180:                                txr->bytes +=
                   1181:                                    buf->m_head->m_pkthdr.len;
                   1182:                                bus_dmamap_sync(txr->txtag->dt_dmat,
                   1183:                                    buf->map,
                   1184:                                    0, buf->m_head->m_pkthdr.len,
                   1185:                                    BUS_DMASYNC_POSTWRITE);
                   1186:                                ixgbe_dmamap_unload(txr->txtag,
                   1187:                                    buf->map);
                   1188:                                m_freem(buf->m_head);
                   1189:                                buf->m_head = NULL;
                   1190:                        }
                   1191:                        ++txr->tx_avail;
                   1192:                        buf->eop = NULL;
                   1193:
                   1194:                }
                   1195:                ++txr->packets;
                   1196:                ++processed;
                   1197:                ++ifp->if_opackets;
                   1198:
                   1199:                /* Try the next packet */
                   1200:                ++txd;
                   1201:                ++buf;
                   1202:                ++work;
                   1203:                /* reset with a wrap */
                   1204:                if (__predict_false(!work)) {
                   1205:                        work -= txr->num_desc;
                   1206:                        buf = txr->tx_buffers;
                   1207:                        txd = txr->tx_base;
                   1208:                }
                   1209:                prefetch(txd);
                   1210:        } while (__predict_true(--limit));
                   1211:
                   1212:        ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
                   1213:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1214:
                   1215:        work += txr->num_desc;
                   1216:        txr->next_to_clean = work;
                   1217:
1.45      msaitoh  1218:        /*
                   1219:         * Queue Hang detection, we know there's
                   1220:         * work outstanding or the first return
                   1221:         * would have been taken, so increment busy
                   1222:         * if nothing managed to get cleaned, then
                   1223:         * in local_timer it will be checked and
                   1224:         * marked as HUNG if it exceeds a MAX attempt.
                   1225:         */
                   1226:        if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
                   1227:                ++txr->busy;
                   1228:        /*
                   1229:         * If anything gets cleaned we reset state to 1,
                   1230:         * note this will turn off HUNG if its set.
                   1231:         */
                   1232:        if (processed)
                   1233:                txr->busy = 1;
                   1234:
1.43      msaitoh  1235:        if (txr->tx_avail == txr->num_desc)
1.45      msaitoh  1236:                txr->busy = 0;
1.43      msaitoh  1237:
1.32      msaitoh  1238:        return ((limit > 0) ? false : true);
1.28      msaitoh  1239: } /* ixgbe_txeof */
1.1       msaitoh  1240:
1.28      msaitoh  1241: /************************************************************************
                   1242:  * ixgbe_rsc_count
                   1243:  *
                   1244:  *   Used to detect a descriptor that has been merged by Hardware RSC.
                   1245:  ************************************************************************/
1.1       msaitoh  1246: static inline u32
                   1247: ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
                   1248: {
                   1249:        return (le32toh(rx->wb.lower.lo_dword.data) &
                   1250:            IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1.28      msaitoh  1251: } /* ixgbe_rsc_count */
1.1       msaitoh  1252:
1.28      msaitoh  1253: /************************************************************************
                   1254:  * ixgbe_setup_hw_rsc
1.1       msaitoh  1255:  *
1.28      msaitoh  1256:  *   Initialize Hardware RSC (LRO) feature on 82599
                   1257:  *   for an RX ring, this is toggled by the LRO capability
                   1258:  *   even though it is transparent to the stack.
                   1259:  *
                   1260:  *   NOTE: Since this HW feature only works with IPv4 and
                   1261:  *         testing has shown soft LRO to be as effective,
                   1262:  *         this feature will be disabled by default.
                   1263:  ************************************************************************/
1.1       msaitoh  1264: static void
                   1265: ixgbe_setup_hw_rsc(struct rx_ring *rxr)
                   1266: {
1.28      msaitoh  1267:        struct  adapter  *adapter = rxr->adapter;
                   1268:        struct  ixgbe_hw *hw = &adapter->hw;
                   1269:        u32              rscctrl, rdrxctl;
1.1       msaitoh  1270:
                   1271:        /* If turning LRO/RSC off we need to disable it */
                   1272:        if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
                   1273:                rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
                   1274:                rscctrl &= ~IXGBE_RSCCTL_RSCEN;
                   1275:                return;
                   1276:        }
                   1277:
                   1278:        rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
                   1279:        rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1.28      msaitoh  1280: #ifdef DEV_NETMAP
                   1281:        /* Always strip CRC unless Netmap disabled it */
                   1282:        if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
                   1283:            !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
                   1284:            ix_crcstrip)
1.1       msaitoh  1285: #endif /* DEV_NETMAP */
1.28      msaitoh  1286:                rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1.1       msaitoh  1287:        rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
                   1288:        IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
                   1289:
                   1290:        rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
                   1291:        rscctrl |= IXGBE_RSCCTL_RSCEN;
                   1292:        /*
1.28      msaitoh  1293:         * Limit the total number of descriptors that
                   1294:         * can be combined, so it does not exceed 64K
                   1295:         */
1.1       msaitoh  1296:        if (rxr->mbuf_sz == MCLBYTES)
                   1297:                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
                   1298:        else if (rxr->mbuf_sz == MJUMPAGESIZE)
                   1299:                rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
                   1300:        else if (rxr->mbuf_sz == MJUM9BYTES)
                   1301:                rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
                   1302:        else  /* Using 16K cluster */
                   1303:                rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
                   1304:
                   1305:        IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
                   1306:
                   1307:        /* Enable TCP header recognition */
                   1308:        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1.28      msaitoh  1309:            (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1.1       msaitoh  1310:
                   1311:        /* Disable RSC for ACK packets */
                   1312:        IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
                   1313:            (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
                   1314:
                   1315:        rxr->hw_rsc = TRUE;
1.28      msaitoh  1316: } /* ixgbe_setup_hw_rsc */
1.8       msaitoh  1317:
1.28      msaitoh  1318: /************************************************************************
                   1319:  * ixgbe_refresh_mbufs
1.1       msaitoh  1320:  *
1.28      msaitoh  1321:  *   Refresh mbuf buffers for RX descriptor rings
                   1322:  *    - now keeps its own state so discards due to resource
                   1323:  *      exhaustion are unnecessary, if an mbuf cannot be obtained
                   1324:  *      it just returns, keeping its placeholder, thus it can simply
                   1325:  *      be recalled to try again.
                   1326:  ************************************************************************/
1.1       msaitoh  1327: static void
                   1328: ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
                   1329: {
1.28      msaitoh  1330:        struct adapter      *adapter = rxr->adapter;
                   1331:        struct ixgbe_rx_buf *rxbuf;
                   1332:        struct mbuf         *mp;
                   1333:        int                 i, j, error;
                   1334:        bool                refreshed = false;
1.1       msaitoh  1335:
                   1336:        i = j = rxr->next_to_refresh;
                   1337:        /* Control the loop with one beyond */
                   1338:        if (++j == rxr->num_desc)
                   1339:                j = 0;
                   1340:
                   1341:        while (j != limit) {
                   1342:                rxbuf = &rxr->rx_buffers[i];
                   1343:                if (rxbuf->buf == NULL) {
1.49      msaitoh  1344:                        mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1.1       msaitoh  1345:                            MT_DATA, M_PKTHDR, rxr->mbuf_sz);
                   1346:                        if (mp == NULL) {
                   1347:                                rxr->no_jmbuf.ev_count++;
                   1348:                                goto update;
                   1349:                        }
                   1350:                        if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
                   1351:                                m_adj(mp, ETHER_ALIGN);
                   1352:                } else
                   1353:                        mp = rxbuf->buf;
                   1354:
                   1355:                mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
                   1356:
                   1357:                /* If we're dealing with an mbuf that was copied rather
                   1358:                 * than replaced, there's no need to go through busdma.
                   1359:                 */
                   1360:                if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
                   1361:                        /* Get the memory mapping */
1.4       msaitoh  1362:                        ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1.1       msaitoh  1363:                        error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
                   1364:                            rxbuf->pmap, mp, BUS_DMA_NOWAIT);
                   1365:                        if (error != 0) {
1.55    ! msaitoh  1366:                                device_printf(adapter->dev, "Refresh mbufs: "
        !          1367:                                    "payload dmamap load failure - %d\n",
        !          1368:                                    error);
1.1       msaitoh  1369:                                m_free(mp);
                   1370:                                rxbuf->buf = NULL;
                   1371:                                goto update;
                   1372:                        }
                   1373:                        rxbuf->buf = mp;
                   1374:                        bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
                   1375:                            0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
                   1376:                        rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
                   1377:                            htole64(rxbuf->pmap->dm_segs[0].ds_addr);
                   1378:                } else {
                   1379:                        rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
                   1380:                        rxbuf->flags &= ~IXGBE_RX_COPY;
                   1381:                }
                   1382:
                   1383:                refreshed = true;
                   1384:                /* Next is precalculated */
                   1385:                i = j;
                   1386:                rxr->next_to_refresh = i;
                   1387:                if (++j == rxr->num_desc)
                   1388:                        j = 0;
                   1389:        }
1.28      msaitoh  1390:
1.1       msaitoh  1391: update:
                   1392:        if (refreshed) /* Update hardware tail index */
1.28      msaitoh  1393:                IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
                   1394:
1.1       msaitoh  1395:        return;
1.28      msaitoh  1396: } /* ixgbe_refresh_mbufs */
1.1       msaitoh  1397:
1.28      msaitoh  1398: /************************************************************************
                   1399:  * ixgbe_allocate_receive_buffers
1.1       msaitoh  1400:  *
1.28      msaitoh  1401:  *   Allocate memory for rx_buffer structures. Since we use one
                   1402:  *   rx_buffer per received packet, the maximum number of rx_buffer's
                   1403:  *   that we'll need is equal to the number of receive descriptors
                   1404:  *   that we've allocated.
                   1405:  ************************************************************************/
                   1406: static int
1.1       msaitoh  1407: ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
                   1408: {
1.53      msaitoh  1409:        struct adapter      *adapter = rxr->adapter;
1.28      msaitoh  1410:        device_t            dev = adapter->dev;
                   1411:        struct ixgbe_rx_buf *rxbuf;
                   1412:        int                 bsize, error;
1.1       msaitoh  1413:
                   1414:        bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1.28      msaitoh  1415:        rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
                   1416:            M_NOWAIT | M_ZERO);
                   1417:        if (rxr->rx_buffers == NULL) {
1.1       msaitoh  1418:                aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
                   1419:                error = ENOMEM;
                   1420:                goto fail;
                   1421:        }
                   1422:
1.28      msaitoh  1423:        error = ixgbe_dma_tag_create(
                   1424:                 /*      parent */ adapter->osdep.dmat,
                   1425:                 /*   alignment */ 1,
                   1426:                 /*      bounds */ 0,
                   1427:                 /*     maxsize */ MJUM16BYTES,
                   1428:                 /*   nsegments */ 1,
                   1429:                 /*  maxsegsize */ MJUM16BYTES,
                   1430:                 /*       flags */ 0,
                   1431:                                   &rxr->ptag);
                   1432:        if (error != 0) {
1.1       msaitoh  1433:                aprint_error_dev(dev, "Unable to create RX DMA tag\n");
                   1434:                goto fail;
                   1435:        }
                   1436:
1.5       msaitoh  1437:        for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1.1       msaitoh  1438:                rxbuf = &rxr->rx_buffers[i];
1.4       msaitoh  1439:                error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1.1       msaitoh  1440:                if (error) {
                   1441:                        aprint_error_dev(dev, "Unable to create RX dma map\n");
                   1442:                        goto fail;
                   1443:                }
                   1444:        }
                   1445:
                   1446:        return (0);
                   1447:
                   1448: fail:
                   1449:        /* Frees all, but can handle partial completion */
                   1450:        ixgbe_free_receive_structures(adapter);
1.28      msaitoh  1451:
1.1       msaitoh  1452:        return (error);
1.28      msaitoh  1453: } /* ixgbe_allocate_receive_buffers */
1.1       msaitoh  1454:
1.28      msaitoh  1455: /************************************************************************
1.30      msaitoh  1456:  * ixgbe_free_receive_ring
1.28      msaitoh  1457:  ************************************************************************/
                   1458: static void
1.1       msaitoh  1459: ixgbe_free_receive_ring(struct rx_ring *rxr)
1.27      msaitoh  1460: {
1.5       msaitoh  1461:        for (int i = 0; i < rxr->num_desc; i++) {
1.27      msaitoh  1462:                ixgbe_rx_discard(rxr, i);
1.1       msaitoh  1463:        }
1.28      msaitoh  1464: } /* ixgbe_free_receive_ring */
1.1       msaitoh  1465:
1.28      msaitoh  1466: /************************************************************************
                   1467:  * ixgbe_setup_receive_ring
1.1       msaitoh  1468:  *
1.28      msaitoh  1469:  *   Initialize a receive ring and its buffers.
                   1470:  ************************************************************************/
1.1       msaitoh  1471: static int
                   1472: ixgbe_setup_receive_ring(struct rx_ring *rxr)
                   1473: {
1.28      msaitoh  1474:        struct adapter        *adapter;
                   1475:        struct ixgbe_rx_buf   *rxbuf;
1.1       msaitoh  1476: #ifdef LRO
1.28      msaitoh  1477:        struct ifnet          *ifp;
                   1478:        struct lro_ctrl       *lro = &rxr->lro;
1.1       msaitoh  1479: #endif /* LRO */
                   1480: #ifdef DEV_NETMAP
                   1481:        struct netmap_adapter *na = NA(rxr->adapter->ifp);
1.28      msaitoh  1482:        struct netmap_slot    *slot;
1.1       msaitoh  1483: #endif /* DEV_NETMAP */
1.28      msaitoh  1484:        int                   rsize, error = 0;
1.1       msaitoh  1485:
                   1486:        adapter = rxr->adapter;
                   1487: #ifdef LRO
                   1488:        ifp = adapter->ifp;
                   1489: #endif /* LRO */
                   1490:
                   1491:        /* Clear the ring contents */
                   1492:        IXGBE_RX_LOCK(rxr);
1.28      msaitoh  1493:
1.1       msaitoh  1494: #ifdef DEV_NETMAP
1.28      msaitoh  1495:        if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
                   1496:                slot = netmap_reset(na, NR_RX, rxr->me, 0);
1.1       msaitoh  1497: #endif /* DEV_NETMAP */
1.28      msaitoh  1498:
1.1       msaitoh  1499:        rsize = roundup2(adapter->num_rx_desc *
                   1500:            sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
                   1501:        bzero((void *)rxr->rx_base, rsize);
                   1502:        /* Cache the size */
                   1503:        rxr->mbuf_sz = adapter->rx_mbuf_sz;
                   1504:
                   1505:        /* Free current RX buffer structs and their mbufs */
                   1506:        ixgbe_free_receive_ring(rxr);
                   1507:
1.49      msaitoh  1508:        IXGBE_RX_UNLOCK(rxr);
                   1509:        /*
                   1510:         * Now reinitialize our supply of jumbo mbufs.  The number
                   1511:         * or size of jumbo mbufs may have changed.
                   1512:         * Assume all of rxr->ptag are the same.
                   1513:         */
                   1514:        ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr,
                   1515:            (2 * adapter->num_rx_desc), adapter->rx_mbuf_sz);
                   1516:
                   1517:        IXGBE_RX_LOCK(rxr);
                   1518:
1.1       msaitoh  1519:        /* Now replenish the mbufs */
                   1520:        for (int j = 0; j != rxr->num_desc; ++j) {
1.28      msaitoh  1521:                struct mbuf *mp;
1.1       msaitoh  1522:
                   1523:                rxbuf = &rxr->rx_buffers[j];
1.28      msaitoh  1524:
1.1       msaitoh  1525: #ifdef DEV_NETMAP
                   1526:                /*
                   1527:                 * In netmap mode, fill the map and set the buffer
                   1528:                 * address in the NIC ring, considering the offset
                   1529:                 * between the netmap and NIC rings (see comment in
                   1530:                 * ixgbe_setup_transmit_ring() ). No need to allocate
                   1531:                 * an mbuf, so end the block with a continue;
                   1532:                 */
1.28      msaitoh  1533:                if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1.53      msaitoh  1534:                        int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
1.1       msaitoh  1535:                        uint64_t paddr;
                   1536:                        void *addr;
                   1537:
                   1538:                        addr = PNMB(na, slot + sj, &paddr);
                   1539:                        netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
                   1540:                        /* Update descriptor and the cached value */
                   1541:                        rxr->rx_base[j].read.pkt_addr = htole64(paddr);
                   1542:                        rxbuf->addr = htole64(paddr);
                   1543:                        continue;
                   1544:                }
                   1545: #endif /* DEV_NETMAP */
1.28      msaitoh  1546:
                   1547:                rxbuf->flags = 0;
1.49      msaitoh  1548:                rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1.1       msaitoh  1549:                    MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
                   1550:                if (rxbuf->buf == NULL) {
                   1551:                        error = ENOBUFS;
1.28      msaitoh  1552:                        goto fail;
1.1       msaitoh  1553:                }
                   1554:                mp = rxbuf->buf;
                   1555:                mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
                   1556:                /* Get the memory mapping */
1.28      msaitoh  1557:                error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
                   1558:                    mp, BUS_DMA_NOWAIT);
1.1       msaitoh  1559:                if (error != 0)
                   1560:                         goto fail;
                   1561:                bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
                   1562:                    0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
                   1563:                /* Update the descriptor and the cached value */
                   1564:                rxr->rx_base[j].read.pkt_addr =
                   1565:                    htole64(rxbuf->pmap->dm_segs[0].ds_addr);
                   1566:                rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
                   1567:        }
                   1568:
                   1569:
                   1570:        /* Setup our descriptor indices */
                   1571:        rxr->next_to_check = 0;
                   1572:        rxr->next_to_refresh = 0;
                   1573:        rxr->lro_enabled = FALSE;
                   1574:        rxr->rx_copies.ev_count = 0;
1.13      msaitoh  1575: #if 0 /* NetBSD */
1.1       msaitoh  1576:        rxr->rx_bytes.ev_count = 0;
1.13      msaitoh  1577: #if 1  /* Fix inconsistency */
                   1578:        rxr->rx_packets.ev_count = 0;
                   1579: #endif
                   1580: #endif
1.1       msaitoh  1581:        rxr->vtag_strip = FALSE;
                   1582:
                   1583:        ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
                   1584:            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   1585:
                   1586:        /*
1.28      msaitoh  1587:         * Now set up the LRO interface
                   1588:         */
1.1       msaitoh  1589:        if (ixgbe_rsc_enable)
                   1590:                ixgbe_setup_hw_rsc(rxr);
                   1591: #ifdef LRO
                   1592:        else if (ifp->if_capenable & IFCAP_LRO) {
                   1593:                device_t dev = adapter->dev;
                   1594:                int err = tcp_lro_init(lro);
                   1595:                if (err) {
                   1596:                        device_printf(dev, "LRO Initialization failed!\n");
                   1597:                        goto fail;
                   1598:                }
                   1599:                INIT_DEBUGOUT("RX Soft LRO Initialized\n");
                   1600:                rxr->lro_enabled = TRUE;
                   1601:                lro->ifp = adapter->ifp;
                   1602:        }
                   1603: #endif /* LRO */
                   1604:
                   1605:        IXGBE_RX_UNLOCK(rxr);
1.28      msaitoh  1606:
1.1       msaitoh  1607:        return (0);
                   1608:
                   1609: fail:
                   1610:        ixgbe_free_receive_ring(rxr);
                   1611:        IXGBE_RX_UNLOCK(rxr);
1.28      msaitoh  1612:
1.1       msaitoh  1613:        return (error);
1.28      msaitoh  1614: } /* ixgbe_setup_receive_ring */
1.1       msaitoh  1615:
1.28      msaitoh  1616: /************************************************************************
                   1617:  * ixgbe_setup_receive_structures - Initialize all receive rings.
                   1618:  ************************************************************************/
1.1       msaitoh  1619: int
                   1620: ixgbe_setup_receive_structures(struct adapter *adapter)
                   1621: {
                   1622:        struct rx_ring *rxr = adapter->rx_rings;
1.28      msaitoh  1623:        int            j;
1.1       msaitoh  1624:
                   1625:        for (j = 0; j < adapter->num_queues; j++, rxr++)
                   1626:                if (ixgbe_setup_receive_ring(rxr))
                   1627:                        goto fail;
                   1628:
                   1629:        return (0);
                   1630: fail:
                   1631:        /*
                   1632:         * Free RX buffers allocated so far, we will only handle
                   1633:         * the rings that completed, the failing case will have
                   1634:         * cleaned up for itself. 'j' failed, so its the terminus.
                   1635:         */
                   1636:        for (int i = 0; i < j; ++i) {
                   1637:                rxr = &adapter->rx_rings[i];
1.27      msaitoh  1638:                IXGBE_RX_LOCK(rxr);
1.1       msaitoh  1639:                ixgbe_free_receive_ring(rxr);
1.27      msaitoh  1640:                IXGBE_RX_UNLOCK(rxr);
1.1       msaitoh  1641:        }
                   1642:
                   1643:        return (ENOBUFS);
1.28      msaitoh  1644: } /* ixgbe_setup_receive_structures */
1.1       msaitoh  1645:
1.3       msaitoh  1646:
1.28      msaitoh  1647: /************************************************************************
                   1648:  * ixgbe_free_receive_structures - Free all receive rings.
                   1649:  ************************************************************************/
1.1       msaitoh  1650: void
                   1651: ixgbe_free_receive_structures(struct adapter *adapter)
                   1652: {
                   1653:        struct rx_ring *rxr = adapter->rx_rings;
                   1654:
                   1655:        INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
                   1656:
                   1657:        for (int i = 0; i < adapter->num_queues; i++, rxr++) {
                   1658:                ixgbe_free_receive_buffers(rxr);
                   1659: #ifdef LRO
                   1660:                /* Free LRO memory */
1.28      msaitoh  1661:                tcp_lro_free(&rxr->lro);
1.1       msaitoh  1662: #endif /* LRO */
                   1663:                /* Free the ring memory as well */
                   1664:                ixgbe_dma_free(adapter, &rxr->rxdma);
                   1665:                IXGBE_RX_LOCK_DESTROY(rxr);
                   1666:        }
                   1667:
                   1668:        free(adapter->rx_rings, M_DEVBUF);
1.28      msaitoh  1669: } /* ixgbe_free_receive_structures */
1.1       msaitoh  1670:
                   1671:
1.28      msaitoh  1672: /************************************************************************
                   1673:  * ixgbe_free_receive_buffers - Free receive ring data structures
                   1674:  ************************************************************************/
1.1       msaitoh  1675: static void
                   1676: ixgbe_free_receive_buffers(struct rx_ring *rxr)
                   1677: {
1.28      msaitoh  1678:        struct adapter      *adapter = rxr->adapter;
                   1679:        struct ixgbe_rx_buf *rxbuf;
1.1       msaitoh  1680:
                   1681:        INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
                   1682:
                   1683:        /* Cleanup any existing buffers */
                   1684:        if (rxr->rx_buffers != NULL) {
                   1685:                for (int i = 0; i < adapter->num_rx_desc; i++) {
                   1686:                        rxbuf = &rxr->rx_buffers[i];
1.27      msaitoh  1687:                        ixgbe_rx_discard(rxr, i);
1.1       msaitoh  1688:                        if (rxbuf->pmap != NULL) {
                   1689:                                ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
                   1690:                                rxbuf->pmap = NULL;
                   1691:                        }
                   1692:                }
                   1693:                if (rxr->rx_buffers != NULL) {
                   1694:                        free(rxr->rx_buffers, M_DEVBUF);
                   1695:                        rxr->rx_buffers = NULL;
                   1696:                }
                   1697:        }
                   1698:
                   1699:        if (rxr->ptag != NULL) {
                   1700:                ixgbe_dma_tag_destroy(rxr->ptag);
                   1701:                rxr->ptag = NULL;
                   1702:        }
                   1703:
                   1704:        return;
1.28      msaitoh  1705: } /* ixgbe_free_receive_buffers */
1.1       msaitoh  1706:
1.28      msaitoh  1707: /************************************************************************
                   1708:  * ixgbe_rx_input
                   1709:  ************************************************************************/
1.1       msaitoh  1710: static __inline void
1.28      msaitoh  1711: ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
                   1712:     u32 ptype)
1.1       msaitoh  1713: {
1.20      msaitoh  1714:        struct adapter  *adapter = ifp->if_softc;
1.1       msaitoh  1715:
                   1716: #ifdef LRO
                   1717:        struct ethercom *ec = &adapter->osdep.ec;
                   1718:
1.28      msaitoh  1719:        /*
                   1720:         * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
                   1721:         * should be computed by hardware. Also it should not have VLAN tag in
                   1722:         * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
                   1723:         */
1.1       msaitoh  1724:         if (rxr->lro_enabled &&
                   1725:             (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
                   1726:             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
                   1727:             ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
                   1728:             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
                   1729:             (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
                   1730:             (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
                   1731:             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
                   1732:             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
                   1733:                 /*
                   1734:                  * Send to the stack if:
                   1735:                  **  - LRO not enabled, or
                   1736:                  **  - no LRO resources, or
                   1737:                  **  - lro enqueue fails
                   1738:                  */
                   1739:                 if (rxr->lro.lro_cnt != 0)
                   1740:                         if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
                   1741:                                 return;
                   1742:         }
                   1743: #endif /* LRO */
                   1744:
1.20      msaitoh  1745:        if_percpuq_enqueue(adapter->ipq, m);
1.28      msaitoh  1746: } /* ixgbe_rx_input */
1.1       msaitoh  1747:
1.28      msaitoh  1748: /************************************************************************
                   1749:  * ixgbe_rx_discard
                   1750:  ************************************************************************/
1.1       msaitoh  1751: static __inline void
                   1752: ixgbe_rx_discard(struct rx_ring *rxr, int i)
                   1753: {
1.28      msaitoh  1754:        struct ixgbe_rx_buf *rbuf;
1.1       msaitoh  1755:
                   1756:        rbuf = &rxr->rx_buffers[i];
                   1757:
                   1758:        /*
1.28      msaitoh  1759:         * With advanced descriptors the writeback
                   1760:         * clobbers the buffer addrs, so its easier
                   1761:         * to just free the existing mbufs and take
                   1762:         * the normal refresh path to get new buffers
                   1763:         * and mapping.
                   1764:         */
1.1       msaitoh  1765:
1.26      msaitoh  1766:        if (rbuf->fmp != NULL) {/* Partial chain ? */
1.27      msaitoh  1767:                bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
                   1768:                    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1.1       msaitoh  1769:                m_freem(rbuf->fmp);
                   1770:                rbuf->fmp = NULL;
                   1771:                rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
                   1772:        } else if (rbuf->buf) {
1.27      msaitoh  1773:                bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
                   1774:                    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1.1       msaitoh  1775:                m_free(rbuf->buf);
                   1776:                rbuf->buf = NULL;
                   1777:        }
1.4       msaitoh  1778:        ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1.1       msaitoh  1779:
                   1780:        rbuf->flags = 0;
                   1781:
                   1782:        return;
1.28      msaitoh  1783: } /* ixgbe_rx_discard */
1.1       msaitoh  1784:
                   1785:
1.28      msaitoh  1786: /************************************************************************
                   1787:  * ixgbe_rxeof
1.1       msaitoh  1788:  *
1.28      msaitoh  1789:  *   Executes in interrupt context. It replenishes the
                   1790:  *   mbufs in the descriptor and sends data which has
                   1791:  *   been dma'ed into host memory to upper layer.
1.1       msaitoh  1792:  *
1.28      msaitoh  1793:  *   Return TRUE for more work, FALSE for all clean.
                   1794:  ************************************************************************/
1.1       msaitoh  1795: bool
                   1796: ixgbe_rxeof(struct ix_queue *que)
                   1797: {
                   1798:        struct adapter          *adapter = que->adapter;
                   1799:        struct rx_ring          *rxr = que->rxr;
                   1800:        struct ifnet            *ifp = adapter->ifp;
                   1801: #ifdef LRO
                   1802:        struct lro_ctrl         *lro = &rxr->lro;
                   1803: #endif /* LRO */
1.28      msaitoh  1804:        union ixgbe_adv_rx_desc *cur;
                   1805:        struct ixgbe_rx_buf     *rbuf, *nbuf;
1.1       msaitoh  1806:        int                     i, nextp, processed = 0;
                   1807:        u32                     staterr = 0;
1.7       msaitoh  1808:        u32                     count = adapter->rx_process_limit;
1.1       msaitoh  1809: #ifdef RSS
                   1810:        u16                     pkt_info;
                   1811: #endif
                   1812:
                   1813:        IXGBE_RX_LOCK(rxr);
                   1814:
                   1815: #ifdef DEV_NETMAP
1.28      msaitoh  1816:        if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
                   1817:                /* Same as the txeof routine: wakeup clients on intr. */
                   1818:                if (netmap_rx_irq(ifp, rxr->me, &processed)) {
                   1819:                        IXGBE_RX_UNLOCK(rxr);
                   1820:                        return (FALSE);
                   1821:                }
1.1       msaitoh  1822:        }
                   1823: #endif /* DEV_NETMAP */
                   1824:
                   1825:        for (i = rxr->next_to_check; count != 0;) {
1.28      msaitoh  1826:                struct mbuf *sendmp, *mp;
                   1827:                u32         rsc, ptype;
                   1828:                u16         len;
                   1829:                u16         vtag = 0;
                   1830:                bool        eop;
1.53      msaitoh  1831:
1.1       msaitoh  1832:                /* Sync the ring. */
                   1833:                ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
                   1834:                    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
                   1835:
                   1836:                cur = &rxr->rx_base[i];
                   1837:                staterr = le32toh(cur->wb.upper.status_error);
                   1838: #ifdef RSS
                   1839:                pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
                   1840: #endif
                   1841:
                   1842:                if ((staterr & IXGBE_RXD_STAT_DD) == 0)
                   1843:                        break;
                   1844:
                   1845:                count--;
                   1846:                sendmp = NULL;
                   1847:                nbuf = NULL;
                   1848:                rsc = 0;
                   1849:                cur->wb.upper.status_error = 0;
                   1850:                rbuf = &rxr->rx_buffers[i];
                   1851:                mp = rbuf->buf;
                   1852:
                   1853:                len = le16toh(cur->wb.upper.length);
                   1854:                ptype = le32toh(cur->wb.lower.lo_dword.data) &
                   1855:                    IXGBE_RXDADV_PKTTYPE_MASK;
                   1856:                eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
                   1857:
                   1858:                /* Make sure bad packets are discarded */
                   1859:                if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1.3       msaitoh  1860: #if __FreeBSD_version >= 1100036
1.28      msaitoh  1861:                        if (adapter->feat_en & IXGBE_FEATURE_VF)
1.4       msaitoh  1862:                                if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1.3       msaitoh  1863: #endif
1.1       msaitoh  1864:                        rxr->rx_discarded.ev_count++;
                   1865:                        ixgbe_rx_discard(rxr, i);
                   1866:                        goto next_desc;
                   1867:                }
                   1868:
1.27      msaitoh  1869:                bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
                   1870:                    rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
                   1871:
1.1       msaitoh  1872:                /*
1.28      msaitoh  1873:                 * On 82599 which supports a hardware
                   1874:                 * LRO (called HW RSC), packets need
                   1875:                 * not be fragmented across sequential
                   1876:                 * descriptors, rather the next descriptor
                   1877:                 * is indicated in bits of the descriptor.
                   1878:                 * This also means that we might proceses
                   1879:                 * more than one packet at a time, something
                   1880:                 * that has never been true before, it
                   1881:                 * required eliminating global chain pointers
                   1882:                 * in favor of what we are doing here.  -jfv
                   1883:                 */
1.1       msaitoh  1884:                if (!eop) {
                   1885:                        /*
1.28      msaitoh  1886:                         * Figure out the next descriptor
                   1887:                         * of this frame.
                   1888:                         */
1.1       msaitoh  1889:                        if (rxr->hw_rsc == TRUE) {
                   1890:                                rsc = ixgbe_rsc_count(cur);
                   1891:                                rxr->rsc_num += (rsc - 1);
                   1892:                        }
                   1893:                        if (rsc) { /* Get hardware index */
1.28      msaitoh  1894:                                nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1.1       msaitoh  1895:                                    IXGBE_RXDADV_NEXTP_SHIFT);
                   1896:                        } else { /* Just sequential */
                   1897:                                nextp = i + 1;
                   1898:                                if (nextp == adapter->num_rx_desc)
                   1899:                                        nextp = 0;
                   1900:                        }
                   1901:                        nbuf = &rxr->rx_buffers[nextp];
                   1902:                        prefetch(nbuf);
                   1903:                }
                   1904:                /*
1.28      msaitoh  1905:                 * Rather than using the fmp/lmp global pointers
                   1906:                 * we now keep the head of a packet chain in the
                   1907:                 * buffer struct and pass this along from one
                   1908:                 * descriptor to the next, until we get EOP.
                   1909:                 */
1.1       msaitoh  1910:                mp->m_len = len;
                   1911:                /*
1.28      msaitoh  1912:                 * See if there is a stored head
                   1913:                 * that determines what we are
                   1914:                 */
1.1       msaitoh  1915:                sendmp = rbuf->fmp;
                   1916:                if (sendmp != NULL) {  /* secondary frag */
                   1917:                        rbuf->buf = rbuf->fmp = NULL;
                   1918:                        mp->m_flags &= ~M_PKTHDR;
                   1919:                        sendmp->m_pkthdr.len += mp->m_len;
                   1920:                } else {
                   1921:                        /*
                   1922:                         * Optimize.  This might be a small packet,
                   1923:                         * maybe just a TCP ACK.  Do a fast copy that
                   1924:                         * is cache aligned into a new mbuf, and
                   1925:                         * leave the old mbuf+cluster for re-use.
                   1926:                         */
                   1927:                        if (eop && len <= IXGBE_RX_COPY_LEN) {
                   1928:                                sendmp = m_gethdr(M_NOWAIT, MT_DATA);
                   1929:                                if (sendmp != NULL) {
1.28      msaitoh  1930:                                        sendmp->m_data += IXGBE_RX_COPY_ALIGN;
                   1931:                                        ixgbe_bcopy(mp->m_data, sendmp->m_data,
                   1932:                                            len);
1.1       msaitoh  1933:                                        sendmp->m_len = len;
                   1934:                                        rxr->rx_copies.ev_count++;
                   1935:                                        rbuf->flags |= IXGBE_RX_COPY;
                   1936:                                }
                   1937:                        }
                   1938:                        if (sendmp == NULL) {
                   1939:                                rbuf->buf = rbuf->fmp = NULL;
                   1940:                                sendmp = mp;
                   1941:                        }
                   1942:
                   1943:                        /* first desc of a non-ps chain */
                   1944:                        sendmp->m_flags |= M_PKTHDR;
                   1945:                        sendmp->m_pkthdr.len = mp->m_len;
                   1946:                }
                   1947:                ++processed;
                   1948:
                   1949:                /* Pass the head pointer on */
                   1950:                if (eop == 0) {
                   1951:                        nbuf->fmp = sendmp;
                   1952:                        sendmp = NULL;
                   1953:                        mp->m_next = nbuf->buf;
                   1954:                } else { /* Sending this frame */
                   1955:                        m_set_rcvif(sendmp, ifp);
1.31      msaitoh  1956:                        ++rxr->packets;
1.1       msaitoh  1957:                        rxr->rx_packets.ev_count++;
                   1958:                        /* capture data for AIM */
                   1959:                        rxr->bytes += sendmp->m_pkthdr.len;
                   1960:                        rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
                   1961:                        /* Process vlan info */
1.28      msaitoh  1962:                        if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1.1       msaitoh  1963:                                vtag = le16toh(cur->wb.upper.vlan);
                   1964:                        if (vtag) {
1.29      knakahar 1965:                                vlan_set_tag(sendmp, vtag);
1.1       msaitoh  1966:                        }
                   1967:                        if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
                   1968:                                ixgbe_rx_checksum(staterr, sendmp, ptype,
1.3       msaitoh  1969:                                   &adapter->stats.pf);
1.1       msaitoh  1970:                        }
1.8       msaitoh  1971:
1.6       msaitoh  1972: #if 0 /* FreeBSD */
1.28      msaitoh  1973:                        /*
                   1974:                         * In case of multiqueue, we have RXCSUM.PCSD bit set
                   1975:                         * and never cleared. This means we have RSS hash
                   1976:                         * available to be used.
                   1977:                         */
                   1978:                        if (adapter->num_queues > 1) {
                   1979:                                sendmp->m_pkthdr.flowid =
                   1980:                                    le32toh(cur->wb.lower.hi_dword.rss);
1.44      msaitoh  1981:                                switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
                   1982:                                case IXGBE_RXDADV_RSSTYPE_IPV4:
1.28      msaitoh  1983:                                        M_HASHTYPE_SET(sendmp,
                   1984:                                            M_HASHTYPE_RSS_IPV4);
                   1985:                                        break;
1.44      msaitoh  1986:                                case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1.28      msaitoh  1987:                                        M_HASHTYPE_SET(sendmp,
                   1988:                                            M_HASHTYPE_RSS_TCP_IPV4);
                   1989:                                        break;
1.44      msaitoh  1990:                                case IXGBE_RXDADV_RSSTYPE_IPV6:
1.28      msaitoh  1991:                                        M_HASHTYPE_SET(sendmp,
                   1992:                                            M_HASHTYPE_RSS_IPV6);
                   1993:                                        break;
1.44      msaitoh  1994:                                case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1.28      msaitoh  1995:                                        M_HASHTYPE_SET(sendmp,
                   1996:                                            M_HASHTYPE_RSS_TCP_IPV6);
                   1997:                                        break;
1.44      msaitoh  1998:                                case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1.28      msaitoh  1999:                                        M_HASHTYPE_SET(sendmp,
                   2000:                                            M_HASHTYPE_RSS_IPV6_EX);
                   2001:                                        break;
1.44      msaitoh  2002:                                case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1.28      msaitoh  2003:                                        M_HASHTYPE_SET(sendmp,
                   2004:                                            M_HASHTYPE_RSS_TCP_IPV6_EX);
                   2005:                                        break;
1.6       msaitoh  2006: #if __FreeBSD_version > 1100000
1.44      msaitoh  2007:                                case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1.28      msaitoh  2008:                                        M_HASHTYPE_SET(sendmp,
                   2009:                                            M_HASHTYPE_RSS_UDP_IPV4);
                   2010:                                        break;
1.44      msaitoh  2011:                                case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1.28      msaitoh  2012:                                        M_HASHTYPE_SET(sendmp,
                   2013:                                            M_HASHTYPE_RSS_UDP_IPV6);
                   2014:                                        break;
1.44      msaitoh  2015:                                case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1.28      msaitoh  2016:                                        M_HASHTYPE_SET(sendmp,
                   2017:                                            M_HASHTYPE_RSS_UDP_IPV6_EX);
                   2018:                                        break;
                   2019: #endif
1.44      msaitoh  2020:                                default:
1.28      msaitoh  2021:                                        M_HASHTYPE_SET(sendmp,
                   2022:                                            M_HASHTYPE_OPAQUE_HASH);
                   2023:                                }
                   2024:                        } else {
                   2025:                                sendmp->m_pkthdr.flowid = que->msix;
1.1       msaitoh  2026:                                M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
                   2027:                        }
1.8       msaitoh  2028: #endif
1.1       msaitoh  2029:                }
                   2030: next_desc:
                   2031:                ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
                   2032:                    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
                   2033:
                   2034:                /* Advance our pointers to the next descriptor. */
                   2035:                if (++i == rxr->num_desc)
                   2036:                        i = 0;
                   2037:
                   2038:                /* Now send to the stack or do LRO */
                   2039:                if (sendmp != NULL) {
                   2040:                        rxr->next_to_check = i;
1.28      msaitoh  2041:                        IXGBE_RX_UNLOCK(rxr);
1.1       msaitoh  2042:                        ixgbe_rx_input(rxr, ifp, sendmp, ptype);
1.28      msaitoh  2043:                        IXGBE_RX_LOCK(rxr);
1.1       msaitoh  2044:                        i = rxr->next_to_check;
                   2045:                }
                   2046:
1.28      msaitoh  2047:                /* Every 8 descriptors we go to refresh mbufs */
1.1       msaitoh  2048:                if (processed == 8) {
                   2049:                        ixgbe_refresh_mbufs(rxr, i);
                   2050:                        processed = 0;
                   2051:                }
                   2052:        }
                   2053:
                   2054:        /* Refresh any remaining buf structs */
                   2055:        if (ixgbe_rx_unrefreshed(rxr))
                   2056:                ixgbe_refresh_mbufs(rxr, i);
                   2057:
                   2058:        rxr->next_to_check = i;
                   2059:
1.28      msaitoh  2060:        IXGBE_RX_UNLOCK(rxr);
                   2061:
1.1       msaitoh  2062: #ifdef LRO
                   2063:        /*
                   2064:         * Flush any outstanding LRO work
                   2065:         */
1.10      msaitoh  2066:        tcp_lro_flush_all(lro);
1.1       msaitoh  2067: #endif /* LRO */
                   2068:
                   2069:        /*
1.28      msaitoh  2070:         * Still have cleaning to do?
                   2071:         */
1.1       msaitoh  2072:        if ((staterr & IXGBE_RXD_STAT_DD) != 0)
1.28      msaitoh  2073:                return (TRUE);
                   2074:
                   2075:        return (FALSE);
                   2076: } /* ixgbe_rxeof */
1.1       msaitoh  2077:
                   2078:
1.28      msaitoh  2079: /************************************************************************
                   2080:  * ixgbe_rx_checksum
1.1       msaitoh  2081:  *
1.28      msaitoh  2082:  *   Verify that the hardware indicated that the checksum is valid.
                   2083:  *   Inform the stack about the status of checksum so that stack
                   2084:  *   doesn't spend time verifying the checksum.
                   2085:  ************************************************************************/
1.1       msaitoh  2086: static void
                   2087: ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
                   2088:     struct ixgbe_hw_stats *stats)
                   2089: {
1.28      msaitoh  2090:        u16  status = (u16)staterr;
                   2091:        u8   errors = (u8)(staterr >> 24);
1.1       msaitoh  2092: #if 0
1.28      msaitoh  2093:        bool sctp = false;
1.1       msaitoh  2094:
                   2095:        if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
                   2096:            (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
1.8       msaitoh  2097:                sctp = true;
1.1       msaitoh  2098: #endif
                   2099:
1.8       msaitoh  2100:        /* IPv4 checksum */
1.1       msaitoh  2101:        if (status & IXGBE_RXD_STAT_IPCS) {
                   2102:                stats->ipcs.ev_count++;
                   2103:                if (!(errors & IXGBE_RXD_ERR_IPE)) {
                   2104:                        /* IP Checksum Good */
                   2105:                        mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
                   2106:                } else {
                   2107:                        stats->ipcs_bad.ev_count++;
                   2108:                        mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
                   2109:                }
                   2110:        }
1.8       msaitoh  2111:        /* TCP/UDP/SCTP checksum */
1.1       msaitoh  2112:        if (status & IXGBE_RXD_STAT_L4CS) {
                   2113:                stats->l4cs.ev_count++;
                   2114:                int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
                   2115:                if (!(errors & IXGBE_RXD_ERR_TCPE)) {
                   2116:                        mp->m_pkthdr.csum_flags |= type;
                   2117:                } else {
                   2118:                        stats->l4cs_bad.ev_count++;
                   2119:                        mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
                   2120:                }
                   2121:        }
1.28      msaitoh  2122: } /* ixgbe_rx_checksum */
1.1       msaitoh  2123:
1.28      msaitoh  2124: /************************************************************************
                   2125:  * ixgbe_dma_malloc
                   2126:  ************************************************************************/
1.1       msaitoh  2127: int
                   2128: ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
                   2129:                struct ixgbe_dma_alloc *dma, const int mapflags)
                   2130: {
                   2131:        device_t dev = adapter->dev;
1.28      msaitoh  2132:        int      r, rsegs;
1.1       msaitoh  2133:
1.28      msaitoh  2134:        r = ixgbe_dma_tag_create(
                   2135:             /*      parent */ adapter->osdep.dmat,
                   2136:             /*   alignment */ DBA_ALIGN,
                   2137:             /*      bounds */ 0,
                   2138:             /*     maxsize */ size,
                   2139:             /*   nsegments */ 1,
                   2140:             /*  maxsegsize */ size,
                   2141:             /*       flags */ BUS_DMA_ALLOCNOW,
1.1       msaitoh  2142:                               &dma->dma_tag);
                   2143:        if (r != 0) {
                   2144:                aprint_error_dev(dev,
1.44      msaitoh  2145:                    "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
                   2146:                    r);
1.1       msaitoh  2147:                goto fail_0;
                   2148:        }
                   2149:
1.28      msaitoh  2150:        r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
                   2151:            dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
                   2152:            &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
1.1       msaitoh  2153:        if (r != 0) {
                   2154:                aprint_error_dev(dev,
                   2155:                    "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
                   2156:                goto fail_1;
                   2157:        }
                   2158:
                   2159:        r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
                   2160:            size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
                   2161:        if (r != 0) {
                   2162:                aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
                   2163:                    __func__, r);
                   2164:                goto fail_2;
                   2165:        }
                   2166:
                   2167:        r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
                   2168:        if (r != 0) {
                   2169:                aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
                   2170:                    __func__, r);
                   2171:                goto fail_3;
                   2172:        }
                   2173:
1.28      msaitoh  2174:        r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
                   2175:            dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
1.1       msaitoh  2176:        if (r != 0) {
                   2177:                aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
                   2178:                    __func__, r);
                   2179:                goto fail_4;
                   2180:        }
                   2181:        dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
                   2182:        dma->dma_size = size;
                   2183:        return 0;
                   2184: fail_4:
                   2185:        ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
                   2186: fail_3:
                   2187:        bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
                   2188: fail_2:
                   2189:        bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
                   2190: fail_1:
                   2191:        ixgbe_dma_tag_destroy(dma->dma_tag);
                   2192: fail_0:
                   2193:
1.28      msaitoh  2194:        return (r);
                   2195: } /* ixgbe_dma_malloc */
                   2196:
                   2197: /************************************************************************
                   2198:  * ixgbe_dma_free
                   2199:  ************************************************************************/
1.3       msaitoh  2200: void
1.1       msaitoh  2201: ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
                   2202: {
                   2203:        bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
                   2204:            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
                   2205:        ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
                   2206:        bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
                   2207:        ixgbe_dma_tag_destroy(dma->dma_tag);
1.28      msaitoh  2208: } /* ixgbe_dma_free */
1.1       msaitoh  2209:
                   2210:
1.28      msaitoh  2211: /************************************************************************
                   2212:  * ixgbe_allocate_queues
1.1       msaitoh  2213:  *
1.28      msaitoh  2214:  *   Allocate memory for the transmit and receive rings, and then
                   2215:  *   the descriptors associated with each, called only once at attach.
                   2216:  ************************************************************************/
1.1       msaitoh  2217: int
                   2218: ixgbe_allocate_queues(struct adapter *adapter)
                   2219: {
                   2220:        device_t        dev = adapter->dev;
                   2221:        struct ix_queue *que;
                   2222:        struct tx_ring  *txr;
                   2223:        struct rx_ring  *rxr;
1.28      msaitoh  2224:        int             rsize, tsize, error = IXGBE_SUCCESS;
                   2225:        int             txconf = 0, rxconf = 0;
1.1       msaitoh  2226:
1.28      msaitoh  2227:        /* First, allocate the top level queue structs */
                   2228:        adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
                   2229:             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
                   2230:         if (adapter->queues == NULL) {
                   2231:                aprint_error_dev(dev, "Unable to allocate queue memory\n");
1.1       msaitoh  2232:                 error = ENOMEM;
                   2233:                 goto fail;
                   2234:         }
                   2235:
1.28      msaitoh  2236:        /* Second, allocate the TX ring struct memory */
                   2237:        adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
                   2238:            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
                   2239:        if (adapter->tx_rings == NULL) {
1.1       msaitoh  2240:                aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
                   2241:                error = ENOMEM;
                   2242:                goto tx_fail;
                   2243:        }
                   2244:
1.28      msaitoh  2245:        /* Third, allocate the RX ring */
                   2246:        adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
                   2247:            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
                   2248:        if (adapter->rx_rings == NULL) {
1.1       msaitoh  2249:                aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
                   2250:                error = ENOMEM;
                   2251:                goto rx_fail;
                   2252:        }
                   2253:
                   2254:        /* For the ring itself */
1.28      msaitoh  2255:        tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
                   2256:            DBA_ALIGN);
1.1       msaitoh  2257:
                   2258:        /*
                   2259:         * Now set up the TX queues, txconf is needed to handle the
                   2260:         * possibility that things fail midcourse and we need to
                   2261:         * undo memory gracefully
1.28      msaitoh  2262:         */
1.1       msaitoh  2263:        for (int i = 0; i < adapter->num_queues; i++, txconf++) {
                   2264:                /* Set up some basics */
                   2265:                txr = &adapter->tx_rings[i];
                   2266:                txr->adapter = adapter;
1.28      msaitoh  2267:                txr->txr_interq = NULL;
                   2268:                /* In case SR-IOV is enabled, align the index properly */
1.5       msaitoh  2269: #ifdef PCI_IOV
1.28      msaitoh  2270:                txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
                   2271:                    i);
1.5       msaitoh  2272: #else
1.1       msaitoh  2273:                txr->me = i;
1.5       msaitoh  2274: #endif
1.1       msaitoh  2275:                txr->num_desc = adapter->num_tx_desc;
                   2276:
                   2277:                /* Initialize the TX side lock */
                   2278:                mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
                   2279:
1.28      msaitoh  2280:                if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
                   2281:                    BUS_DMA_NOWAIT)) {
1.1       msaitoh  2282:                        aprint_error_dev(dev,
                   2283:                            "Unable to allocate TX Descriptor memory\n");
                   2284:                        error = ENOMEM;
                   2285:                        goto err_tx_desc;
                   2286:                }
                   2287:                txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
                   2288:                bzero((void *)txr->tx_base, tsize);
                   2289:
1.28      msaitoh  2290:                /* Now allocate transmit buffers for the ring */
                   2291:                if (ixgbe_allocate_transmit_buffers(txr)) {
1.1       msaitoh  2292:                        aprint_error_dev(dev,
                   2293:                            "Critical Failure setting up transmit buffers\n");
                   2294:                        error = ENOMEM;
                   2295:                        goto err_tx_desc;
                   2296:                }
1.28      msaitoh  2297:                if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
                   2298:                        /* Allocate a buf ring */
                   2299:                        txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
                   2300:                        if (txr->txr_interq == NULL) {
                   2301:                                aprint_error_dev(dev,
                   2302:                                    "Critical Failure setting up buf ring\n");
                   2303:                                error = ENOMEM;
                   2304:                                goto err_tx_desc;
                   2305:                        }
                   2306:                }
1.1       msaitoh  2307:        }
                   2308:
                   2309:        /*
                   2310:         * Next the RX queues...
1.53      msaitoh  2311:         */
1.28      msaitoh  2312:        rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
                   2313:            DBA_ALIGN);
1.1       msaitoh  2314:        for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
                   2315:                rxr = &adapter->rx_rings[i];
                   2316:                /* Set up some basics */
                   2317:                rxr->adapter = adapter;
1.5       msaitoh  2318: #ifdef PCI_IOV
1.28      msaitoh  2319:                /* In case SR-IOV is enabled, align the index properly */
                   2320:                rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
                   2321:                    i);
1.5       msaitoh  2322: #else
1.1       msaitoh  2323:                rxr->me = i;
1.5       msaitoh  2324: #endif
1.1       msaitoh  2325:                rxr->num_desc = adapter->num_rx_desc;
                   2326:
                   2327:                /* Initialize the RX side lock */
                   2328:                mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
                   2329:
1.28      msaitoh  2330:                if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
                   2331:                    BUS_DMA_NOWAIT)) {
1.1       msaitoh  2332:                        aprint_error_dev(dev,
                   2333:                            "Unable to allocate RxDescriptor memory\n");
                   2334:                        error = ENOMEM;
                   2335:                        goto err_rx_desc;
                   2336:                }
                   2337:                rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
                   2338:                bzero((void *)rxr->rx_base, rsize);
                   2339:
1.28      msaitoh  2340:                /* Allocate receive buffers for the ring */
1.1       msaitoh  2341:                if (ixgbe_allocate_receive_buffers(rxr)) {
                   2342:                        aprint_error_dev(dev,
                   2343:                            "Critical Failure setting up receive buffers\n");
                   2344:                        error = ENOMEM;
                   2345:                        goto err_rx_desc;
                   2346:                }
                   2347:        }
                   2348:
                   2349:        /*
1.28      msaitoh  2350:         * Finally set up the queue holding structs
                   2351:         */
1.1       msaitoh  2352:        for (int i = 0; i < adapter->num_queues; i++) {
                   2353:                que = &adapter->queues[i];
                   2354:                que->adapter = adapter;
1.3       msaitoh  2355:                que->me = i;
1.1       msaitoh  2356:                que->txr = &adapter->tx_rings[i];
                   2357:                que->rxr = &adapter->rx_rings[i];
1.33      knakahar 2358:
1.37      knakahar 2359:                mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
                   2360:                que->disabled_count = 0;
1.1       msaitoh  2361:        }
                   2362:
                   2363:        return (0);
                   2364:
                   2365: err_rx_desc:
                   2366:        for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
                   2367:                ixgbe_dma_free(adapter, &rxr->rxdma);
                   2368: err_tx_desc:
                   2369:        for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
                   2370:                ixgbe_dma_free(adapter, &txr->txdma);
                   2371:        free(adapter->rx_rings, M_DEVBUF);
                   2372: rx_fail:
                   2373:        free(adapter->tx_rings, M_DEVBUF);
                   2374: tx_fail:
                   2375:        free(adapter->queues, M_DEVBUF);
                   2376: fail:
                   2377:        return (error);
1.28      msaitoh  2378: } /* ixgbe_allocate_queues */

CVSweb <webmaster@jp.NetBSD.org>