[BACK]Return to if_ave.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / sociox

Annotation of src/sys/arch/arm/sociox/if_ave.c, Revision 1.18

1.18    ! thorpej     1: /*     $NetBSD: if_ave.c,v 1.17 2020/09/23 23:38:24 nisimura Exp $     */
1.1       nisimura    2:
                      3: /*-
                      4:  * Copyright (c) 2020 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Tohru Nishimura.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
1.6       nisimura   33:  * Socionext UniPhier AVE GbE driver
                     34:  *
                     35:  * There are two groups for 64bit paddr model and 32bit paddr.
1.1       nisimura   36:  */
                     37:
                     38: #include <sys/cdefs.h>
1.18    ! thorpej    39: __KERNEL_RCSID(0, "$NetBSD: if_ave.c,v 1.17 2020/09/23 23:38:24 nisimura Exp $");
1.1       nisimura   40:
                     41: #include <sys/param.h>
                     42: #include <sys/bus.h>
                     43: #include <sys/intr.h>
                     44: #include <sys/device.h>
                     45: #include <sys/callout.h>
1.16      nisimura   46: #include <sys/ioctl.h>
                     47: #include <sys/malloc.h>
1.1       nisimura   48: #include <sys/mbuf.h>
                     49: #include <sys/errno.h>
                     50: #include <sys/rndsource.h>
                     51: #include <sys/kernel.h>
                     52: #include <sys/systm.h>
                     53:
                     54: #include <net/if.h>
                     55: #include <net/if_media.h>
                     56: #include <net/if_dl.h>
                     57: #include <net/if_ether.h>
                     58: #include <dev/mii/mii.h>
                     59: #include <dev/mii/miivar.h>
                     60: #include <net/bpf.h>
                     61:
                     62: #include <dev/fdt/fdtvar.h>
                     63:
1.11      nisimura   64: #define NOT_MP_SAFE    (0)
1.1       nisimura   65:
1.17      nisimura   66: /*
                     67:  * AVE has two different, rather obscure, descriptor formats. 32-bit
                     68:  * paddr descriptor layout occupies 8 bytes while 64-bit paddr descriptor
                     69:  * does 12 bytes. AVE is a derivative of Synopsys DesignWare Core
                     70:  * EMAC.
                     71:  */
                     72: struct tdes {
                     73:        uint32_t t0, t1, t2;
                     74: };
                     75:
                     76: struct rdes {
                     77:        uint32_t r0, r1, r2;
                     78: };
                     79:
                     80: struct tdes32 { uint32_t t0, t1; };
                     81: struct rdes32 { uint32_t r0, r1; };
                     82:
                     83: #define T0_OWN         (1U<<31)        /* desc is ready to Tx */
                     84: #define T0_IOC         (1U<<29)        /* post interrupt on Tx completes */
                     85: #define T0_NOCSUM      (1U<<28)        /* inhibit checksum operation */
                     86: #define T0_DONEOK      (1U<<27)        /* status - Tx completed ok */
                     87: #define T0_FS          (1U<<26)        /* first segment of frame */
                     88: #define T0_LS          (1U<<25)        /* last segment of frame */
                     89: #define T0_OWC         (1U<<21)        /* status - out of win. late coll. */
                     90: #define T0_ECOL                (1U<<20)        /* status - excess collision */
                     91: #define T0_TBS_MASK    0xffff          /* T0 segment length 15:0 */
                     92: /* T1 segment address 31:0 */
                     93: /* T2 segment address 63:32 */
                     94: #define R0_OWN         (1U<<31)        /* desc is empty */
                     95: #define R0_CSUM                (1U<<21)        /* receive checksum done */
                     96: #define R0_CERR                (1U<<20)        /* csum found negative */
                     97: #define R0_FL_MASK     0x07ff          /* R0 frame length 10:0 */
                     98: /* R1 frame address 31:0 */
                     99: /* R2 frame address 63:32 */
                    100:
1.1       nisimura  101: #define AVEID          0x000           /* hardware ID */
                    102: #define AVEHWVER       0x004           /* hardware version */
                    103: #define AVEGR          0x008           /* chip global control */
                    104: #define  GR_RXRST      (1U<<5)         /* RxFIFO reset */
                    105: #define  GR_PHYRST     (1U<<4)         /* external PHY reset */
                    106: #define  GR_GRST       (1U<<0)         /* full chip reset */
                    107: #define AVECFG         0x00c           /* hw configuration */
                    108: #define  CFG_FLE       (1U<<31)        /* filter function enable */
                    109: #define  CFG_CKE       (1U<<30)        /* checksum enable */
1.11      nisimura  110: #define  CFG_MII       (1U<<27)        /* 1: RMII/MII, 0: RGMII */
1.1       nisimura  111: #define  CFG_IPFCKE    (1U<<24)        /* IP framgment csum enable */
                    112: #define AVEGIMR                0x100           /* global interrupt mask */
                    113: #define AVEGISR                0x104           /* global interrupt status */
                    114: #define  GISR_PHY      (1U<<24)        /* PHY status change detected */
                    115: #define  GISR_TXCI     (1U<<16)        /* transmission completed */
1.8       nisimura  116: #define  GISR_RXF2L    (1U<<8)         /* Rx frame length beyond limit */
                    117: #define  GISR_RXOVF    (1U<<7)         /* RxFIFO oveflow detected */
                    118: #define  GISR_RXDROP   (1U<<6)         /* PAUSE frame has been dropped */
1.1       nisimura  119: #define  GISR_RXIT     (1U<<5)         /* receive itimer notify */
                    120: #define AVETXC         0x200           /* transmit control */
1.10      nisimura  121: #define  TXC_FCE       (1U<<18)        /* generate PAUSE to moderate Rx lvl */
1.1       nisimura  122: #define  TXC_SPD1000   (1U<<17)        /* use 1000Mbps */
                    123: #define  TXC_SPD100    (1U<<16)        /* use 100Mbps */
                    124: #define AVERXC         0x204           /* receive control */
                    125: #define  RXC_EN                (1U<<30)        /* enable receive circuit */
                    126: #define  RXC_USEFDX    (1U<<22)        /* use full-duplex */
1.10      nisimura  127: #define  RXC_FCE       (1U<<21)        /* accept PAUSE to throttle Tx */
1.1       nisimura  128: #define  RXC_AFE       (1U<<19)        /* use address filter (!promisc) */
                    129: #define  RXC_DRPEN     (1U<<18)        /* drop receiving PAUSE frames */
                    130: /* RXC 15:0 max frame length to accept */
                    131: #define AVEMACL                0x208           /* MAC address lower */
                    132: #define AVEMACH                0x20c           /* MAC address upper */
                    133: #define AVEMDIOC       0x214           /* MDIO control */
                    134: #define  MDIOC_RD      (1U<<3)         /* read op */
                    135: #define  MDIOC_WR      (1U<<2)         /* write op */
                    136: #define AVEMDADR       0x218           /* MDIO address -- 13:8 phy id */
                    137: #define AVEMDWRD       0x21c           /* MDIO write data - 15:0 */
                    138: #define AVEMDIOS       0x220           /* MDIO status */
                    139: #define  MDIOS_BUSY    (1U<<0)         /* MDIO in progress */
                    140: #define AVEMDRDD       0x224           /* MDIO read data */
                    141: #define AVEDESCC       0x300           /* descriptor control */
                    142: #define  DESCC_RD0     (1U<<3)         /* activate Rx0 descriptor to run */
                    143: #define DESCC_RSTP     (1U<<2)         /* pause Rx descriptor */
                    144: #define  DESCC_TD      (1U<<0)         /* activate Tx descriptor to run */
1.17      nisimura  145: /* 31:16 status report to read */
1.1       nisimura  146: #define AVETXDES       0x304           /* Tx descriptor control */
1.17      nisimura  147: /* 27:16 Tx descriptor byte count, 11:0 start address offset */
1.1       nisimura  148: #define AVERXDES0      0x308           /* Rx0 descriptor control */
1.17      nisimura  149: /* 30:16 Rx descriptor byte count, 14:0 start address offset */
1.1       nisimura  150: #define AVEITIRQC      0x34c           /* interval IRQ control */
                    151: #define  ITIRQC_R0E    (1U<<27)        /* enable Rx0 interval timer */
1.17      nisimura  152: #define  INTMVAL       (20<<16)        /* 15:0 interval timer count */
1.1       nisimura  153:
                    154: #define AVEAFB         0x0800          /* address filter base */
                    155: #define AVEAFMSKB      0x0d00          /* byte mask base */
                    156: #define  MSKBYTE0      0xfffffff3f     /* zeros in 7:6 */
                    157: #define  MSKBYTE1      0x003ffffff     /* ones in 25:0 */
1.7       nisimura  158: #define  genmask0(x)   (MSKBYTE0 & (~0U << (x)))
1.1       nisimura  159: #define AVEAFMSKV      0x0e00          /* bit mask base */
                    160: #define AVEAFRING      0x0f00          /* entry ring number selector */
                    161: #define AVEAFEN                0x0ffc          /* entry enable bit vector */
                    162:
1.17      nisimura  163: /* AVE has internal cache coherent memory tol hold descriptor arrays. */
                    164: #define AVETDB         0x1000          /* 64-bit Tx desc store, upto 256 */
                    165: #define AVERDB         0x1c00          /* 64-bit Rx desc store, upto 2048 */
                    166: #define AVE32TDB       0x1000          /* 32-bit Tx store base, upto 256 */
                    167: #define AVE32RDB       0x1800          /* 32-bit Rx store base, upto 2048 */
1.1       nisimura  168:
1.7       nisimura  169: #define AVERMIIC       0x8028          /* RMII control */
1.8       nisimura  170: #define  RMIIC_RST     (1U<<16)        /* reset operation */
                    171: #define AVELINKSEL     0x8034          /* RMII speed selection */
                    172: #define  LINKSEL_SPD100        (1U<<0)         /* use 100Mbps */
1.7       nisimura  173:
1.17      nisimura  174: #define CSR_READ(sc, off) \
                    175:            bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
                    176: #define CSR_WRITE(sc, off, val) \
                    177:            bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
1.1       nisimura  178:
1.13      nisimura  179: #define MD_NTXSEGS             16              /* fixed */
                    180: #define MD_TXQUEUELEN          (MD_NTXDESC / MD_NTXSEGS)
                    181: #define MD_TXQUEUELEN_MASK     (MD_TXQUEUELEN - 1)
                    182: #define MD_TXQUEUE_GC          (MD_TXQUEUELEN / 4)
                    183: #define MD_NTXDESC             256             /* this is max HW limit */
                    184: #define MD_NTXDESC_MASK        (MD_NTXDESC - 1)
                    185: #define MD_NEXTTX(x)           (((x) + 1) & MD_NTXDESC_MASK)
                    186: #define MD_NEXTTXS(x)          (((x) + 1) & MD_TXQUEUELEN_MASK)
                    187:
                    188: #define MD_NRXDESC             256             /* tunable */
                    189: #define MD_NRXDESC_MASK        (MD_NRXDESC - 1)
                    190: #define MD_NEXTRX(x)           (((x) + 1) & MD_NRXDESC_MASK)
1.1       nisimura  191:
                    192: struct ave_txsoft {
                    193:        struct mbuf *txs_mbuf;          /* head of our mbuf chain */
                    194:        bus_dmamap_t txs_dmamap;        /* our DMA map */
                    195:        int txs_firstdesc;              /* first descriptor in packet */
                    196:        int txs_lastdesc;               /* last descriptor in packet */
                    197:        int txs_ndesc;                  /* # of descriptors used */
                    198: };
                    199:
                    200: struct ave_rxsoft {
                    201:        struct mbuf *rxs_mbuf;          /* head of our mbuf chain */
                    202:        bus_dmamap_t rxs_dmamap;        /* our DMA map */
                    203: };
                    204:
1.14      nisimura  205: struct desops;
                    206:
1.1       nisimura  207: struct ave_softc {
                    208:        device_t sc_dev;                /* generic device information */
                    209:        bus_space_tag_t sc_st;          /* bus space tag */
                    210:        bus_space_handle_t sc_sh;       /* bus space handle */
                    211:        bus_size_t sc_mapsize;          /* csr map size */
                    212:        bus_dma_tag_t sc_dmat;          /* bus DMA tag */
                    213:        struct ethercom sc_ethercom;    /* Ethernet common data */
                    214:        struct mii_data sc_mii;         /* MII */
                    215:        callout_t sc_tick_ch;           /* PHY monitor callout */
                    216:        int sc_flowflags;               /* 802.3x PAUSE flow control */
                    217:        void *sc_ih;                    /* interrupt cookie */
                    218:        int sc_phy_id;                  /* PHY address */
1.12      nisimura  219:        uint32_t sc_100mii;             /* 1<<27: RMII/MII, 0: RGMII */
1.1       nisimura  220:        uint32_t sc_rxc;                /* software copy of AVERXC */
1.4       nisimura  221:        int sc_model;                   /* 64 paddr model or otherwise 32 */
1.1       nisimura  222:
                    223:        bus_dmamap_t sc_cddmamap;       /* control data DMA map */
                    224: #define sc_cddma       sc_cddmamap->dm_segs[0].ds_addr
                    225:
1.6       nisimura  226:        struct tdes *sc_txdescs;        /* PTR to tdes [NTXDESC] store */
                    227:        struct rdes *sc_rxdescs;        /* PTR to rdes [NRXDESC] store */
1.5       nisimura  228:        struct tdes32 *sc_txd32;
                    229:        struct rdes32 *sc_rxd32;
1.14      nisimura  230:        struct desops *sc_desops;       /* descriptor management */
1.1       nisimura  231:
1.13      nisimura  232:        struct ave_txsoft sc_txsoft[MD_TXQUEUELEN];
                    233:        struct ave_rxsoft sc_rxsoft[MD_NRXDESC];
1.1       nisimura  234:        int sc_txfree;                  /* number of free Tx descriptors */
                    235:        int sc_txnext;                  /* next ready Tx descriptor */
                    236:        int sc_txsfree;                 /* number of free Tx jobs */
                    237:        int sc_txsnext;                 /* next ready Tx job */
                    238:        int sc_txsdirty;                /* dirty Tx jobs */
                    239:        int sc_rxptr;                   /* next ready Rx descriptor/descsoft */
                    240:        uint32_t sc_t0csum;             /* t0 field checksum designation */
                    241:
                    242:        krndsource_t rnd_source;        /* random source */
                    243: };
                    244:
                    245: static int ave_fdt_match(device_t, cfdata_t, void *);
                    246: static void ave_fdt_attach(device_t, device_t, void *);
                    247:
                    248: CFATTACH_DECL_NEW(ave_fdt, sizeof(struct ave_softc),
                    249:     ave_fdt_match, ave_fdt_attach, NULL, NULL);
                    250:
                    251: static void ave_reset(struct ave_softc *);
                    252: static int ave_init(struct ifnet *);
                    253: static void ave_start(struct ifnet *);
                    254: static void ave_stop(struct ifnet *, int);
                    255: static void ave_watchdog(struct ifnet *);
                    256: static int ave_ioctl(struct ifnet *, u_long, void *);
                    257: static void ave_set_rcvfilt(struct ave_softc *);
                    258: static void ave_write_filt(struct ave_softc *, int, const uint8_t *);
                    259: static void ave_ifmedia_sts(struct ifnet *, struct ifmediareq *);
                    260: static void mii_statchg(struct ifnet *);
                    261: static void lnkchg(struct ave_softc *);
                    262: static void phy_tick(void *);
                    263: static int mii_readreg(device_t, int, int, uint16_t *);
                    264: static int mii_writereg(device_t, int, int, uint16_t);
                    265: static int ave_intr(void *);
                    266: static void txreap(struct ave_softc *);
                    267: static void rxintr(struct ave_softc *);
                    268: static int add_rxbuf(struct ave_softc *, int);
                    269:
1.14      nisimura  270: struct desops {
                    271:        void (*make_tdes)(void *, int, int, int);
                    272:        void (*mark_txfs)(void *, int);
                    273:        void (*mark_txls)(void *, int);
                    274:        void (*mark_txic)(void *, int);
                    275:        int  (*read_tdes0)(void *, int);
                    276:        int  (*read_rdes0)(void *, int);
                    277:        int  (*read_rlen)(void *, int);
                    278:        void (*init_rdes)(void *, int);
                    279: };
                    280: #define MAKE_TDES(sc,x,s,o) (*(sc)->sc_desops->make_tdes)((sc),(x),(s),(o))
                    281: #define MARK_TXFS(sc,x) (*(sc)->sc_desops->mark_txfs)((sc),(x))
                    282: #define MARK_TXLS(sc,x) (*(sc)->sc_desops->mark_txls)((sc),(x))
                    283: #define MARK_TXIC(sc,x) (*(sc)->sc_desops->mark_txic)((sc),(x))
                    284: #define READ_TDES0(sc,x) (*(sc)->sc_desops->read_tdes0)((sc),(x))
                    285: #define READ_RDES0(sc,x) (*(sc)->sc_desops->read_rdes0)((sc),(x))
                    286: #define INIT_RDES(sc,x) (*(sc)->sc_desops->init_rdes)((sc),(x))
                    287: /* received frame length is stored in RDES0 10:0 */
                    288:
                    289: static void make_tdes(void *, int, int, int);
                    290: static void mark_txfs(void *, int);
                    291: static void mark_txls(void *, int);
                    292: static void mark_txic(void *, int);
                    293: static int read_tdes0(void *, int);
                    294: static int read_rdes0(void *, int);
                    295: static void init_rdes(void *, int);
                    296: struct desops ave64ops = {
                    297:        make_tdes,
                    298:        mark_txfs,
                    299:        mark_txls,
                    300:        mark_txic,
                    301:        read_tdes0,
                    302:        read_rdes0,
                    303:        NULL,
                    304:        init_rdes,
                    305: };
                    306: static void omake_tdes(void *, int, int, int);
                    307: static void omark_txfs(void *, int);
                    308: static void omark_txls(void *, int);
                    309: static void omark_txic(void *, int);
                    310: static int oread_tdes0(void *, int);
                    311: static int oread_rdes0(void *, int);
                    312: static void oinit_rdes(void *, int);
                    313: struct desops ave32ops = {
                    314:        omake_tdes,
                    315:        omark_txfs,
                    316:        omark_txls,
                    317:        omark_txic,
                    318:        oread_tdes0,
                    319:        oread_rdes0,
                    320:        NULL,
                    321:        oinit_rdes,
                    322: };
                    323:
1.18    ! thorpej   324: static const struct device_compatible_entry compat_data[] = {
        !           325:        { .compat = "socionext,unifier-ld20-ave4", .value = 64 },
        !           326:        { .compat = "socionext,unifier-pro4-ave4", .value = 32 },
        !           327:        { .compat = "socionext,unifier-pxs2-ave4", .value = 32 },
        !           328:        { .compat = "socionext,unifier-ld11-ave4", .value = 32 },
        !           329:        { .compat = "socionext,unifier-pxs3-ave4", .value = 32 },
        !           330:
        !           331:        { 0 }
1.4       nisimura  332: };
                    333:
1.1       nisimura  334: static int
                    335: ave_fdt_match(device_t parent, cfdata_t cf, void *aux)
                    336: {
                    337:        struct fdt_attach_args * const faa = aux;
                    338:
1.4       nisimura  339:        return of_match_compat_data(faa->faa_phandle, compat_data);
1.1       nisimura  340: }
                    341:
                    342: static void
                    343: ave_fdt_attach(device_t parent, device_t self, void *aux)
                    344: {
                    345:        struct ave_softc * const sc = device_private(self);
                    346:        struct fdt_attach_args * const faa = aux;
                    347:        const int phandle = faa->faa_phandle;
                    348:        struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
                    349:        struct mii_data * const mii = &sc->sc_mii;
                    350:        struct ifmedia * const ifm = &mii->mii_media;
                    351:        bus_space_tag_t bst = faa->faa_bst;
                    352:        bus_space_handle_t bsh;
                    353:        bus_addr_t addr;
                    354:        bus_size_t size;
                    355:        char intrstr[128];
                    356:        const char *phy_mode;
                    357:        uint32_t hwimp, hwver, csr;
                    358:        uint8_t enaddr[ETHER_ADDR_LEN];
                    359:        int i, error = 0;
                    360:
1.3       nisimura  361:        if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0
                    362:            || bus_space_map(faa->faa_bst, addr, size, 0, &bsh) != 0) {
                    363:                aprint_error(": unable to map device\n");
1.1       nisimura  364:                return;
                    365:        }
                    366:        if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
                    367:                aprint_error(": failed to decode interrupt\n");
                    368:                return;
                    369:        }
1.11      nisimura  370:        sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET, NOT_MP_SAFE,
1.3       nisimura  371:            ave_intr, sc);
                    372:        if (sc->sc_ih == NULL) {
                    373:                aprint_error_dev(self, "couldn't establish interrupt on %s\n",
                    374:                    intrstr);
                    375:                goto fail;
                    376:        }
1.1       nisimura  377:
                    378:        sc->sc_dev = self;
                    379:        sc->sc_st = bst;
                    380:        sc->sc_sh = bsh;
                    381:        sc->sc_mapsize = size;
                    382:        sc->sc_dmat = faa->faa_dmat;
                    383:
1.3       nisimura  384:        hwimp = CSR_READ(sc, AVEID);
                    385:        hwver = CSR_READ(sc, AVEHWVER);
1.18    ! thorpej   386:        sc->sc_model = of_search_compatible(phandle, compat_data)->value;
1.3       nisimura  387:
1.6       nisimura  388:        phy_mode = fdtbus_get_string(phandle, "phy-mode");
1.16      nisimura  389:        if (phy_mode == NULL)
1.6       nisimura  390:                aprint_error(": missing 'phy-mode' property\n");
                    391:
1.1       nisimura  392:        aprint_naive("\n");
                    393:        aprint_normal(": Gigabit Ethernet Controller\n");
1.16      nisimura  394:        aprint_normal_dev(self, "UniPhier %c%c%c%c AVE%d GbE (%d.%d)\n",
1.1       nisimura  395:            hwimp >> 24, hwimp >> 16, hwimp >> 8, hwimp,
1.16      nisimura  396:            sc->sc_model, hwver >> 8, hwver & 0xff);
1.1       nisimura  397:        aprint_normal_dev(self, "interrupt on %s\n", intrstr);
                    398:
1.16      nisimura  399:        sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0);
1.14      nisimura  400:        sc->sc_desops = (sc->sc_model == 64) ? &ave64ops : &ave32ops;
1.1       nisimura  401:
                    402:        CSR_WRITE(sc, AVEGR, GR_GRST | GR_PHYRST);
                    403:        DELAY(20);
                    404:        CSR_WRITE(sc, AVEGR, GR_GRST);
                    405:        DELAY(40);
                    406:        CSR_WRITE(sc, AVEGR, 0);
                    407:        DELAY(40);
                    408:        CSR_WRITE(sc, AVEGIMR, 0);
                    409:
1.17      nisimura  410:        /* Ethernet MAC address is auto-loaded from EEPROM. */
1.1       nisimura  411:        csr = CSR_READ(sc, AVEMACL);
                    412:        enaddr[0] = csr;
                    413:        enaddr[1] = csr >> 8;
                    414:        enaddr[2] = csr >> 16;
                    415:        enaddr[3] = csr >> 24;
                    416:        csr = CSR_READ(sc, AVEMACH);
                    417:        enaddr[4] = csr;
                    418:        enaddr[5] = csr >> 8;
                    419:        aprint_normal_dev(self,
                    420:            "Ethernet address %s\n", ether_sprintf(enaddr));
                    421:
1.9       nisimura  422:        sc->sc_flowflags = 0;
                    423:        sc->sc_rxc = 0;
                    424:
1.1       nisimura  425:        mii->mii_ifp = ifp;
                    426:        mii->mii_readreg = mii_readreg;
                    427:        mii->mii_writereg = mii_writereg;
                    428:        mii->mii_statchg = mii_statchg;
                    429:        sc->sc_phy_id = MII_PHY_ANY;
                    430:
                    431:        sc->sc_ethercom.ec_mii = mii;
1.16      nisimura  432:        ifmedia_init(ifm, 0, ether_mediachange, ave_ifmedia_sts);
1.1       nisimura  433:        mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
                    434:            MII_OFFSET_ANY, MIIF_DOPAUSE);
                    435:        if (LIST_FIRST(&mii->mii_phys) == NULL) {
                    436:                ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
                    437:                ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
                    438:        } else
                    439:                ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
                    440:        ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
                    441:
                    442:        strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
                    443:        ifp->if_softc = sc;
                    444:        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
                    445:        ifp->if_ioctl = ave_ioctl;
                    446:        ifp->if_start = ave_start;
                    447:        ifp->if_watchdog = ave_watchdog;
                    448:        ifp->if_init = ave_init;
                    449:        ifp->if_stop = ave_stop;
                    450:        IFQ_SET_READY(&ifp->if_snd);
                    451:
                    452:        sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
                    453:        ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
                    454:
                    455:        if_attach(ifp);
                    456:        if_deferred_start_init(ifp, NULL);
                    457:        ether_ifattach(ifp, enaddr);
                    458:
                    459:        callout_init(&sc->sc_tick_ch, 0);
                    460:        callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
                    461:
                    462:        /*
1.6       nisimura  463:         * HW has a dedicated store to hold Tx/Rx descriptor arrays.
                    464:         * so no need to build Tx/Rx descriptor control_data.
                    465:         * go straight to make dmamap to hold Tx segments and Rx frames.
1.1       nisimura  466:         */
1.13      nisimura  467:        for (i = 0; i < MD_TXQUEUELEN; i++) {
1.1       nisimura  468:                if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1.13      nisimura  469:                    MD_NTXSEGS, MCLBYTES, 0, 0,
1.1       nisimura  470:                    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1.6       nisimura  471:                        aprint_error_dev(self,
1.1       nisimura  472:                            "unable to create tx DMA map %d, error = %d\n",
                    473:                            i, error);
                    474:                        goto fail_4;
                    475:                }
                    476:        }
1.13      nisimura  477:        for (i = 0; i < MD_NRXDESC; i++) {
1.1       nisimura  478:                if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
                    479:                    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1.6       nisimura  480:                        aprint_error_dev(self,
1.1       nisimura  481:                            "unable to create rx DMA map %d, error = %d\n",
                    482:                            i, error);
                    483:                        goto fail_5;
                    484:                }
                    485:                sc->sc_rxsoft[i].rxs_mbuf = NULL;
                    486:        }
                    487:
                    488:        if (pmf_device_register(sc->sc_dev, NULL, NULL))
1.6       nisimura  489:                pmf_class_network_register(self, ifp);
1.1       nisimura  490:        else
1.6       nisimura  491:                aprint_error_dev(self,
1.1       nisimura  492:                        "couldn't establish power handler\n");
                    493:
1.6       nisimura  494:        rnd_attach_source(&sc->rnd_source, device_xname(self),
1.1       nisimura  495:            RND_TYPE_NET, RND_FLAG_DEFAULT);
                    496:
                    497:        return;
                    498:
                    499:   fail_5:
1.13      nisimura  500:        for (i = 0; i < MD_NRXDESC; i++) {
1.1       nisimura  501:                if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
                    502:                        bus_dmamap_destroy(sc->sc_dmat,
                    503:                            sc->sc_rxsoft[i].rxs_dmamap);
                    504:        }
                    505:   fail_4:
1.13      nisimura  506:        for (i = 0; i < MD_TXQUEUELEN; i++) {
1.1       nisimura  507:                if (sc->sc_txsoft[i].txs_dmamap != NULL)
                    508:                        bus_dmamap_destroy(sc->sc_dmat,
                    509:                            sc->sc_txsoft[i].txs_dmamap);
                    510:        }
                    511:   /* no fail_3|2|1 */
                    512:   fail:
                    513:        fdtbus_intr_disestablish(phandle, sc->sc_ih);
                    514:        bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_mapsize);
                    515:        return;
                    516: }
                    517:
                    518: static void
                    519: ave_reset(struct ave_softc *sc)
                    520: {
1.8       nisimura  521:        uint32_t csr;
1.1       nisimura  522:
                    523:        CSR_WRITE(sc, AVERXC, 0);       /* stop Rx first */
                    524:        CSR_WRITE(sc, AVEDESCC, 0);     /* stop Tx/Rx descriptor engine */
1.15      nisimura  525:        if (sc->sc_100mii) {
1.8       nisimura  526:                csr = CSR_READ(sc, AVERMIIC);
                    527:                CSR_WRITE(sc, AVERMIIC, csr &~ RMIIC_RST);
                    528:                DELAY(10);
                    529:                CSR_WRITE(sc , AVERMIIC, csr);
                    530:        }
1.1       nisimura  531:        CSR_WRITE(sc, AVEGR, GR_RXRST); /* assert RxFIFO reset operation */
                    532:        DELAY(50);
1.8       nisimura  533:        CSR_WRITE(sc, AVEGR, 0);
1.6       nisimura  534:        CSR_WRITE(sc, AVEGISR, GISR_RXOVF); /* clear OVF condition */
1.1       nisimura  535: }
                    536:
                    537: static int
                    538: ave_init(struct ifnet *ifp)
                    539: {
                    540:        struct ave_softc *sc = ifp->if_softc;
                    541:        extern const uint8_t etherbroadcastaddr[];
                    542:        const uint8_t promisc[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
                    543:        uint32_t csr;
                    544:        int i;
                    545:
                    546:        CSR_WRITE(sc, AVEGIMR, 0);
                    547:
1.11      nisimura  548:        /* Cancel pending I/O. */
1.1       nisimura  549:        ave_stop(ifp, 0);
                    550:
                    551:        /* make sure Rx circuit sane & stable state */
                    552:        ave_reset(sc);
                    553:
1.12      nisimura  554:        CSR_WRITE(sc, AVECFG, CFG_FLE | sc->sc_100mii);
1.1       nisimura  555:
                    556:        /* set Tx/Rx descriptor ring base addr offset and total size */
1.13      nisimura  557:        CSR_WRITE(sc, AVETXDES,  0U|(sizeof(struct tdes)*MD_NTXDESC) << 16);
                    558:        CSR_WRITE(sc, AVERXDES0, 0U|(sizeof(struct rdes)*MD_NRXDESC) << 16);
1.1       nisimura  559:
                    560:        /* set ptr to Tx/Rx descriptor store */
                    561:        sc->sc_txdescs = (void *)((uintptr_t)sc->sc_sh + AVETDB);
                    562:        sc->sc_rxdescs = (void *)((uintptr_t)sc->sc_sh + AVERDB);
1.5       nisimura  563:        sc->sc_txd32 =   (void *)((uintptr_t)sc->sc_sh + AVE32TDB);
                    564:        sc->sc_rxd32 =   (void *)((uintptr_t)sc->sc_sh + AVE32RDB);
1.1       nisimura  565:
1.11      nisimura  566:        /* build sane Tx and load Rx descriptors with mbuf */
1.13      nisimura  567:        for (i = 0; i < MD_NTXDESC; i++) {
1.9       nisimura  568:                struct tdes *tdes = &sc->sc_txdescs[i];
                    569:                tdes->t2 = tdes->t1 = 0;
                    570:                tdes->t0 = T0_OWN;
                    571:        }
1.13      nisimura  572:        for (i = 0; i < MD_NRXDESC; i++)
1.1       nisimura  573:                (void)add_rxbuf(sc, i);
                    574:
                    575:        /*
                    576:         * address filter usage
                    577:         * 0 - promisc.
                    578:         * 1 - my own MAC station address
                    579:         * 2 - broadcast address
                    580:         */
                    581:        CSR_WRITE(sc, AVEAFEN, 0); /* clear all 17 entries first */
                    582:        ave_write_filt(sc, 0, promisc);
                    583:        ave_write_filt(sc, 1, CLLADDR(ifp->if_sadl));
                    584:        ave_write_filt(sc, 2, etherbroadcastaddr);
                    585:
                    586:        /* accept multicast frame or run promisc mode */
                    587:        ave_set_rcvfilt(sc);
                    588:
1.16      nisimura  589:        (void)ether_mediachange(ifp);
1.9       nisimura  590:
1.1       nisimura  591:        csr = CSR_READ(sc, AVECFG);
                    592:        if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
                    593:                sc->sc_t0csum = 0;
                    594:                csr |= (CFG_CKE | CFG_IPFCKE);
                    595:        } else
                    596:                sc->sc_t0csum = T0_NOCSUM;
                    597:        if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
                    598:                csr |= (CFG_CKE | CFG_IPFCKE);
                    599:        CSR_WRITE(sc, AVECFG, csr);
                    600:
                    601:        sc->sc_rxc = 1518 | RXC_AFE | RXC_DRPEN;
                    602:        CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
                    603:
                    604:        /* activate Tx/Rx descriptor engine */
                    605:        CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0);
                    606:
                    607:        /* enable Rx ring0 timer */
                    608:        csr = CSR_READ(sc, AVEITIRQC) & 0xffff;
                    609:        CSR_WRITE(sc, AVEITIRQC, csr | ITIRQC_R0E | INTMVAL);
                    610:
                    611:        CSR_WRITE(sc, AVEGIMR, /* PHY interrupt is not maskable */
1.8       nisimura  612:            GISR_TXCI | GISR_RXIT | GISR_RXDROP | GISR_RXOVF | GISR_RXF2L);
1.1       nisimura  613:
                    614:        ifp->if_flags |= IFF_RUNNING;
                    615:        ifp->if_flags &= ~IFF_OACTIVE;
                    616:
                    617:        /* start one second timer */
                    618:        callout_schedule(&sc->sc_tick_ch, hz);
                    619:
                    620:        return 0;
                    621: }
                    622:
                    623: static void
                    624: ave_stop(struct ifnet *ifp, int disable)
                    625: {
                    626:        struct ave_softc *sc = ifp->if_softc;
                    627:
                    628:        /* Stop the one second clock. */
                    629:        callout_stop(&sc->sc_tick_ch);
                    630:
                    631:        /* Down the MII. */
                    632:        mii_down(&sc->sc_mii);
                    633:
                    634:        /* Mark the interface down and cancel the watchdog timer. */
                    635:        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
                    636:        ifp->if_timer = 0;
                    637: }
                    638:
                    639: static void
                    640: ave_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
                    641: {
                    642:        struct ave_softc *sc = ifp->if_softc;
                    643:        struct mii_data *mii = &sc->sc_mii;
                    644:
                    645:        mii_pollstat(mii);
                    646:        ifmr->ifm_status = mii->mii_media_status;
                    647:        ifmr->ifm_active = sc->sc_flowflags |
                    648:            (mii->mii_media_active & ~IFM_ETH_FMASK);
                    649: }
                    650:
                    651: void
                    652: mii_statchg(struct ifnet *ifp)
                    653: {
                    654:        struct ave_softc *sc = ifp->if_softc;
                    655:        struct mii_data *mii = &sc->sc_mii;
1.16      nisimura  656:        struct ifmedia *ifm = &mii->mii_media;
                    657:        uint32_t txcr, rxcr, lsel;
1.1       nisimura  658:
1.9       nisimura  659:        /* Get flow control negotiation result. */
1.1       nisimura  660:        if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
                    661:            (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
                    662:                sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
                    663:
                    664:        txcr = CSR_READ(sc, AVETXC);
                    665:        rxcr = CSR_READ(sc, AVERXC);
                    666:        CSR_WRITE(sc, AVERXC, rxcr &~ RXC_EN); /* stop Rx first */
1.7       nisimura  667:
1.16      nisimura  668:        /* Adjust speed 1000/100/10. */
                    669:        txcr &= ~(TXC_SPD1000 | TXC_SPD100);
                    670:        if ((sc->sc_100mii == 0) /* RGMII model */
                    671:             && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_1000_T)
                    672:                txcr |= TXC_SPD1000;
                    673:        else if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
                    674:                txcr |= TXC_SPD100;
                    675:
                    676:        /* Adjust LINKSEL when RMII/MII too. */
                    677:        if (sc->sc_100mii) {
                    678:                lsel = CSR_READ(sc, AVELINKSEL) &~ LINKSEL_SPD100;
                    679:                if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
                    680:                        lsel |= LINKSEL_SPD100;
                    681:                CSR_WRITE(sc, AVELINKSEL, lsel);
                    682:        }
                    683:
                    684:        /* Adjust duplexity and 802.3x PAUSE flow control. */
1.9       nisimura  685:        txcr &= ~TXC_FCE;
1.16      nisimura  686:        rxcr &= ~(RXC_FCE & RXC_USEFDX);
1.9       nisimura  687:        if (mii->mii_media_active & IFM_FDX) {
                    688:                if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
                    689:                        txcr |= TXC_FCE;
                    690:                if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
1.16      nisimura  691:                        rxcr |= RXC_FCE | RXC_USEFDX;
1.7       nisimura  692:        }
                    693:
1.1       nisimura  694:        sc->sc_rxc = rxcr;
                    695:        CSR_WRITE(sc, AVETXC, txcr);
1.7       nisimura  696:        CSR_WRITE(sc, AVERXC, rxcr | RXC_EN);
1.1       nisimura  697:
                    698: printf("%ctxfe, %crxfe\n",
                    699:        (txcr & TXC_FCE) ? '+' : '-', (rxcr & RXC_FCE) ? '+' : '-');
                    700: }
                    701:
                    702: static void
                    703: lnkchg(struct ave_softc *sc)
                    704: {
                    705:        struct ifmediareq ifmr;
                    706:
                    707:        ave_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
                    708: }
                    709:
                    710: static void
                    711: phy_tick(void *arg)
                    712: {
                    713:        struct ave_softc *sc = arg;
                    714:        struct mii_data *mii = &sc->sc_mii;
                    715:        int s;
                    716:
                    717:        s = splnet();
                    718:        mii_tick(mii);
                    719:        splx(s);
                    720:
                    721:        callout_schedule(&sc->sc_tick_ch, hz);
                    722: }
                    723:
                    724: static int
                    725: mii_readreg(device_t self, int phy, int reg, uint16_t *val)
                    726: {
                    727:        struct ave_softc *sc = device_private(self);
                    728:        uint32_t ctrl, stat;
                    729:
                    730:        CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
                    731:        ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_WR;
                    732:        CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_RD);
                    733:        stat = CSR_READ(sc, AVEMDIOS);
                    734:        while (stat & MDIOS_BUSY) {
                    735:                DELAY(10);
                    736:                stat = CSR_READ(sc, AVEMDIOS);
                    737:        }
                    738:        *val = CSR_READ(sc, AVEMDRDD);
                    739:        return 0;
                    740: }
                    741:
                    742: static int
                    743: mii_writereg(device_t self, int phy, int reg, uint16_t val)
                    744: {
                    745:        struct ave_softc *sc = device_private(self);
                    746:        uint32_t ctrl, stat;
                    747:
                    748:        CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
                    749:        CSR_WRITE(sc, AVEMDWRD, val);
                    750:        ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_RD;
                    751:        CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_WR);
                    752:        stat = CSR_READ(sc, AVEMDIOS);
                    753:        while (stat & MDIOS_BUSY) {
                    754:                DELAY(10);
                    755:                stat = CSR_READ(sc, AVEMDIOS);
                    756:        }
                    757:        return 0;
                    758: }
                    759:
                    760: static int
                    761: ave_ioctl(struct ifnet *ifp, u_long cmd, void *data)
                    762: {
                    763:        struct ave_softc *sc = ifp->if_softc;
                    764:        struct ifreq *ifr = (struct ifreq *)data;
                    765:        struct ifmedia *ifm;
                    766:        int s, error;
                    767:
                    768:        s = splnet();
                    769:
                    770:        switch (cmd) {
                    771:        case SIOCSIFMEDIA:
                    772:                /* Flow control requires full-duplex mode. */
                    773:                if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
                    774:                    (ifr->ifr_media & IFM_FDX) == 0)
                    775:                        ifr->ifr_media &= ~IFM_ETH_FMASK;
                    776:                if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
                    777:                        if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
                    778:                                /* We can do both TXPAUSE and RXPAUSE. */
                    779:                                ifr->ifr_media |=
                    780:                                    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
                    781:                        }
                    782:                        sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
                    783:                }
                    784:                ifm = &sc->sc_mii.mii_media;
                    785:                error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
                    786:                break;
                    787:        default:
                    788:                if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
                    789:                        break;
                    790:
                    791:                error = 0;
                    792:
                    793:                if (cmd == SIOCSIFCAP)
                    794:                        error = (*ifp->if_init)(ifp);
                    795:                if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
                    796:                        ;
                    797:                else if (ifp->if_flags & IFF_RUNNING) {
                    798:                        /*
                    799:                         * Multicast list has changed; set the hardware filter
                    800:                         * accordingly.
                    801:                         */
                    802:                        ave_set_rcvfilt(sc);
                    803:                }
                    804:                break;
                    805:        }
                    806:
                    807:        splx(s);
                    808:        return error;
                    809: }
                    810:
                    811: static void
                    812: ave_write_filt(struct ave_softc *sc, int i, const uint8_t *en)
                    813: {
1.7       nisimura  814:        uint32_t macl, mach, n, mskbyte0;
1.1       nisimura  815:
1.7       nisimura  816:        /* pick v4mcast or v6mcast length */
                    817:        n = (en[0] == 0x01) ? 3 : (en[0] == 0x33) ? 2 : ETHER_ADDR_LEN;
1.9       nisimura  818:        /* slot 0 is reserved for promisc mode */
1.7       nisimura  819:        mskbyte0 = (i > 0) ? genmask0(n) : MSKBYTE0;
                    820:
1.1       nisimura  821:        /* set frame address first */
1.8       nisimura  822:        macl = mach = 0;
                    823:        macl |= (en[3]<<24) | (en[2]<<16)| (en[1]<<8) | en[0];
                    824:        mach |= (en[5]<<8)  | en[4];
1.1       nisimura  825:        CSR_WRITE(sc, AVEAFB + (i * 0x40) + 0, macl);
                    826:        CSR_WRITE(sc, AVEAFB + (i * 0x40) + 4, mach);
                    827:        /* set byte mask according to mask length, any of 6, 3, or 2 */
1.7       nisimura  828:        CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 0, mskbyte0);
1.1       nisimura  829:        CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 4, MSKBYTE1);
                    830:        /* set bit vector mask */
                    831:        CSR_WRITE(sc, AVEAFMSKV + (i * 4), 0xffff);
1.7       nisimura  832:        /* use Rx ring 0 anyway */
1.1       nisimura  833:        CSR_WRITE(sc, AVEAFRING + (i * 4), 0);
                    834:        /* filter entry enable bit vector */
                    835:        CSR_WRITE(sc, AVEAFEN, CSR_READ(sc, AVEAFEN) | 1U << i);
                    836: }
                    837:
                    838: static void
                    839: ave_set_rcvfilt(struct ave_softc *sc)
                    840: {
                    841:        struct ethercom *ec = &sc->sc_ethercom;
                    842:        struct ifnet *ifp = &ec->ec_if;
                    843:        struct ether_multistep step;
                    844:        struct ether_multi *enm;
                    845:        extern const uint8_t ether_ipmulticast_min[];
                    846:        extern const uint8_t ether_ip6multicast_min[];
                    847:        uint32_t csr;
                    848:        int i;
                    849:
                    850:        sc->sc_rxc &= (RXC_AFE | RXC_EN);
                    851:        CSR_WRITE(sc, AVERXC, sc->sc_rxc); /* stop Rx first */
                    852:
                    853:        /* turn off all 7 mcast filter entries */
                    854:        csr = CSR_READ(sc, AVEAFEN);
                    855:        CSR_WRITE(sc, AVEAFEN, csr & ~(0177U << 11));
                    856:
                    857:        ETHER_LOCK(ec);
                    858:        if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 7) {
                    859:                ec->ec_flags |= ETHER_F_ALLMULTI;
                    860:                ETHER_UNLOCK(ec);
                    861:                goto update;
                    862:        }
                    863:        ec->ec_flags &= ~ETHER_F_ALLMULTI;
                    864:        ETHER_FIRST_MULTI(step, ec, enm);
1.13      nisimura  865:        i = 11; /* slot 11-17 to catch multicast frames */
1.1       nisimura  866:        while (enm != NULL) {
                    867:                if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
                    868:                        /*
                    869:                         * We must listen to a range of multicast addresses.
                    870:                         * For now, just accept all multicasts, rather than
                    871:                         * trying to set only those filter bits needed to match
                    872:                         * the range.  (At this time, the only use of address
                    873:                         * ranges is for IP multicast routing, for which the
                    874:                         * range is big enough to require all bits set.)
                    875:                         */
                    876:                        ec->ec_flags |= ETHER_F_ALLMULTI;
                    877:                        ETHER_UNLOCK(ec);
                    878:                        goto update;
                    879:                }
1.13      nisimura  880: printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1.1       nisimura  881:                KASSERT(i < 17);
                    882:                /* use additional MAC addr to accept up to 7 */
                    883:                ave_write_filt(sc, i, enm->enm_addrlo);
                    884:                ETHER_NEXT_MULTI(step, enm);
                    885:                i++;
                    886:        }
                    887:        ETHER_UNLOCK(ec);
                    888:        sc->sc_rxc |= RXC_AFE;
1.13      nisimura  889:        CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
                    890:        return;
1.1       nisimura  891:
                    892:  update:
                    893:        if (ifp->if_flags & IFF_PROMISC)
                    894:                /* RXC_AFE has been cleared, nothing to do */;
1.13      nisimura  895:        else {
                    896:                /* slot 11,12 for IPv4/v6 multicast */
1.1       nisimura  897:                ave_write_filt(sc, 11, ether_ipmulticast_min);
                    898:                ave_write_filt(sc, 12, ether_ip6multicast_min); /* INET6 */
                    899:                /* clear slot 13-17 */
                    900:                csr = CSR_READ(sc, AVEAFEN);
                    901:                CSR_WRITE(sc, AVEAFEN, csr & ~(037U << 13));
                    902:                sc->sc_rxc |= RXC_AFE;
                    903:        }
                    904:        CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
1.13      nisimura  905:        return;
1.1       nisimura  906: }
                    907:
                    908: static void
                    909: ave_watchdog(struct ifnet *ifp)
                    910: {
                    911:        struct ave_softc *sc = ifp->if_softc;
                    912:
                    913:        /*
                    914:         * Since we're not interrupting every packet, sweep
                    915:         * up before we report an error.
                    916:         */
                    917:        txreap(sc);
                    918:
1.13      nisimura  919:        if (sc->sc_txfree != MD_NTXDESC) {
1.1       nisimura  920:                aprint_error_dev(sc->sc_dev,
                    921:                    "device timeout (txfree %d txsfree %d txnext %d)\n",
                    922:                    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
                    923:                if_statinc(ifp, if_oerrors);
                    924:
                    925:                /* Reset the interface. */
                    926:                ave_init(ifp);
                    927:        }
                    928:
                    929:        ave_start(ifp);
                    930: }
                    931:
                    932: static void
                    933: ave_start(struct ifnet *ifp)
                    934: {
                    935:        struct ave_softc *sc = ifp->if_softc;
                    936:        struct mbuf *m0, *m;
                    937:        struct ave_txsoft *txs;
                    938:        bus_dmamap_t dmamap;
                    939:        int error, nexttx, lasttx, ofree, seg;
                    940:        uint32_t tdes0;
                    941:
                    942:        if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
                    943:                return;
                    944:
                    945:        /* Remember the previous number of free descriptors. */
                    946:        ofree = sc->sc_txfree;
                    947:
                    948:        /*
                    949:         * Loop through the send queue, setting up transmit descriptors
                    950:         * until we drain the queue, or use up all available transmit
                    951:         * descriptors.
                    952:         */
                    953:        for (;;) {
                    954:                IFQ_POLL(&ifp->if_snd, m0);
                    955:                if (m0 == NULL)
                    956:                        break;
                    957:
1.13      nisimura  958:                if (sc->sc_txsfree < MD_TXQUEUE_GC) {
1.1       nisimura  959:                        txreap(sc);
                    960:                        if (sc->sc_txsfree == 0)
                    961:                                break;
                    962:                }
                    963:                txs = &sc->sc_txsoft[sc->sc_txsnext];
                    964:                dmamap = txs->txs_dmamap;
                    965:
                    966:                error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
                    967:                    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
                    968:                if (error) {
                    969:                        if (error == EFBIG) {
                    970:                                aprint_error_dev(sc->sc_dev,
                    971:                                    "Tx packet consumes too many "
                    972:                                    "DMA segments, dropping...\n");
                    973:                                    IFQ_DEQUEUE(&ifp->if_snd, m0);
                    974:                                    m_freem(m0);
                    975:                                    continue;
                    976:                        }
                    977:                        /* Short on resources, just stop for now. */
                    978:                        break;
                    979:                }
                    980:
                    981:                if (dmamap->dm_nsegs > sc->sc_txfree) {
                    982:                        /*
                    983:                         * Not enough free descriptors to transmit this
                    984:                         * packet.  We haven't committed anything yet,
                    985:                         * so just unload the DMA map, put the packet
                    986:                         * back on the queue, and punt.  Notify the upper
                    987:                         * layer that there are not more slots left.
                    988:                         */
                    989:                        ifp->if_flags |= IFF_OACTIVE;
                    990:                        bus_dmamap_unload(sc->sc_dmat, dmamap);
                    991:                        break;
                    992:                }
                    993:
                    994:                IFQ_DEQUEUE(&ifp->if_snd, m0);
                    995:
                    996:                /*
                    997:                 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
                    998:                 */
                    999:
                   1000:                bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
                   1001:                    BUS_DMASYNC_PREWRITE);
                   1002:
                   1003:                tdes0 = 0; /* to postpone 1st segment T0_OWN write */
                   1004:                lasttx = -1;
                   1005:                for (nexttx = sc->sc_txnext, seg = 0;
                   1006:                     seg < dmamap->dm_nsegs;
1.13      nisimura 1007:                     seg++, nexttx = MD_NEXTTX(nexttx)) {
1.14      nisimura 1008:                        MAKE_TDES(sc, nexttx, seg, tdes0 | sc->sc_t0csum);
1.1       nisimura 1009:                        /*
                   1010:                         * If this is the first descriptor we're
                   1011:                         * enqueueing, don't set the OWN bit just
                   1012:                         * yet.  That could cause a race condition.
                   1013:                         * We'll do it below.
                   1014:                         */
                   1015:                        tdes0 = T0_OWN; /* 2nd and other segments */
                   1016:                        lasttx = nexttx;
                   1017:                }
                   1018:                /*
                   1019:                 * Outgoing NFS mbuf must be unloaded when Tx completed.
                   1020:                 * Without T0_IOC NFS mbuf is left unack'ed for excessive
                   1021:                 * time and NFS stops to proceed until ave_watchdog()
                   1022:                 * calls txreap() to reclaim the unack'ed mbuf.
                   1023:                 * It's painful to traverse every mbuf chain to determine
                   1024:                 * whether someone is waiting for Tx completion.
                   1025:                 */
                   1026:                m = m0;
                   1027:                do {
                   1028:                        if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1.14      nisimura 1029:                                MARK_TXIC(sc, lasttx);
1.1       nisimura 1030:                                break;
                   1031:                        }
                   1032:                } while ((m = m->m_next) != NULL);
                   1033:
                   1034:                /* Write deferred 1st segment T0_OWN at the final stage */
1.14      nisimura 1035:                MARK_TXLS(sc, lasttx);
                   1036:                MARK_TXFS(sc, sc->sc_txnext);
1.1       nisimura 1037:                /* AVE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
                   1038:                    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); */
                   1039:
                   1040:                /* Tell DMA start transmit */
                   1041:                /* CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0); */
                   1042:
                   1043:                txs->txs_mbuf = m0;
                   1044:                txs->txs_firstdesc = sc->sc_txnext;
                   1045:                txs->txs_lastdesc = lasttx;
                   1046:                txs->txs_ndesc = dmamap->dm_nsegs;
                   1047:
                   1048:                sc->sc_txfree -= txs->txs_ndesc;
                   1049:                sc->sc_txnext = nexttx;
                   1050:                sc->sc_txsfree--;
1.13      nisimura 1051:                sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext);
1.1       nisimura 1052:                /*
                   1053:                 * Pass the packet to any BPF listeners.
                   1054:                 */
                   1055:                bpf_mtap(ifp, m0, BPF_D_OUT);
                   1056:        }
                   1057:
                   1058:        if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
                   1059:                /* No more slots left; notify upper layer. */
                   1060:                ifp->if_flags |= IFF_OACTIVE;
                   1061:        }
                   1062:        if (sc->sc_txfree != ofree) {
                   1063:                /* Set a watchdog timer in case the chip flakes out. */
                   1064:                ifp->if_timer = 5;
                   1065:        }
                   1066: }
                   1067:
                   1068: static int
                   1069: ave_intr(void *arg)
                   1070: {
                   1071:        struct ave_softc *sc = arg;
                   1072:        uint32_t gimr, stat;
                   1073:
                   1074:        gimr = CSR_READ(sc, AVEGIMR);
                   1075:        CSR_WRITE(sc, AVEGIMR, 0);
                   1076:        stat = CSR_READ(sc, AVEGISR);
                   1077:        if (stat == 0)
                   1078:                goto done;
                   1079:        if (stat & GISR_PHY) {
                   1080:                lnkchg(sc);
                   1081:                CSR_WRITE(sc, AVEGISR, GISR_PHY);
                   1082:        }
                   1083:        stat &= CSR_READ(sc, AVEGIMR);
                   1084:        if (stat == 0)
                   1085:                goto done;
                   1086:        if (stat & GISR_RXDROP)
                   1087:                CSR_WRITE(sc, AVEGISR, GISR_RXDROP);
                   1088:        if (stat & GISR_RXOVF)
                   1089:                CSR_WRITE(sc, AVEGISR, GISR_RXOVF);
1.8       nisimura 1090:        if (stat & GISR_RXF2L)
                   1091:                CSR_WRITE(sc, AVEGISR, GISR_RXF2L);
1.1       nisimura 1092:        if (stat & GISR_RXIT) {
                   1093:                rxintr(sc);
                   1094:                CSR_WRITE(sc, AVEGISR, GISR_RXIT);
                   1095:        }
                   1096:        if (stat & GISR_TXCI) {
                   1097:                txreap(sc);
                   1098:                CSR_WRITE(sc, AVEGISR, GISR_TXCI);
                   1099:        }
                   1100:  done:
                   1101:        CSR_WRITE(sc, AVEGIMR, gimr);
                   1102:        return (stat != 0);
                   1103: }
                   1104:
                   1105: static void
                   1106: txreap(struct ave_softc *sc)
                   1107: {
                   1108:        struct ifnet *ifp = &sc->sc_ethercom.ec_if;
                   1109:        struct ave_txsoft *txs;
                   1110:        uint32_t txstat;
                   1111:        int i;
                   1112:
                   1113:        ifp->if_flags &= ~IFF_OACTIVE;
                   1114:
1.13      nisimura 1115:        for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN;
                   1116:             i = MD_NEXTTXS(i), sc->sc_txsfree++) {
1.1       nisimura 1117:                txs = &sc->sc_txsoft[i];
                   1118:
                   1119:                /* AVE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
                   1120:                    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
                   1121:
1.14      nisimura 1122:                txstat = READ_TDES0(sc, txs->txs_lastdesc);
1.1       nisimura 1123:
                   1124:                if (txstat & T0_OWN) /* desc is still in use */
                   1125:                        break;
                   1126:                /*
                   1127:                 * XXX able to count statistics XXX
                   1128:                 * T0_DONEOK -- completed ok
                   1129:                 * T0_OWC    -- out of window or collision
                   1130:                 * T0_ECOL   -- dropped by excess collision
                   1131:                 */
                   1132:                if_statinc(ifp, if_opackets);
                   1133:
                   1134:                sc->sc_txfree += txs->txs_ndesc;
                   1135:                bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
                   1136:                    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
                   1137:                bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
                   1138:                m_freem(txs->txs_mbuf);
                   1139:                txs->txs_mbuf = NULL;
                   1140:        }
                   1141:        sc->sc_txsdirty = i;
1.13      nisimura 1142:        if (sc->sc_txsfree == MD_TXQUEUELEN)
1.1       nisimura 1143:                ifp->if_timer = 0;
                   1144: }
                   1145:
                   1146: static void
                   1147: rxintr(struct ave_softc *sc)
                   1148: {
                   1149:        struct ifnet *ifp = &sc->sc_ethercom.ec_if;
                   1150:        struct ave_rxsoft *rxs;
                   1151:        struct mbuf *m;
                   1152:        uint32_t rxstat;
                   1153:        int i, len;
                   1154:
1.13      nisimura 1155:        for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) {
1.1       nisimura 1156:                rxs = &sc->sc_rxsoft[i];
                   1157:
                   1158:                /* AVE_CDRXSYNC(sc, i,
                   1159:                    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
                   1160:
1.14      nisimura 1161:                rxstat = READ_RDES0(sc, i);
1.1       nisimura 1162:                if (rxstat & R0_OWN) /* desc is left empty */
                   1163:                        break;
                   1164:
                   1165:                /* R0_FS | R0_LS must have been marked for this desc */
                   1166:
                   1167:                bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
                   1168:                    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
                   1169:
                   1170:                len = rxstat & R0_FL_MASK;
                   1171:                len -= ETHER_CRC_LEN;   /* Trim CRC off */
                   1172:                m = rxs->rxs_mbuf;
                   1173:
                   1174:                if (add_rxbuf(sc, i) != 0) {
                   1175:                        if_statinc(ifp, if_ierrors);
1.14      nisimura 1176:                        INIT_RDES(sc, i);
1.1       nisimura 1177:                        bus_dmamap_sync(sc->sc_dmat,
                   1178:                            rxs->rxs_dmamap, 0,
                   1179:                            rxs->rxs_dmamap->dm_mapsize,
                   1180:                            BUS_DMASYNC_PREREAD);
                   1181:                        continue;
                   1182:                }
                   1183:
                   1184:                m_set_rcvif(m, ifp);
                   1185:                m->m_pkthdr.len = m->m_len = len;
                   1186:
                   1187:                if (rxstat & R0_CSUM) {
                   1188:                        uint32_t csum = M_CSUM_IPv4;
                   1189:                        if (rxstat & R0_CERR)
                   1190:                                csum |= M_CSUM_IPv4_BAD;
                   1191:                        m->m_pkthdr.csum_flags |= csum;
                   1192:                }
                   1193:                if_percpuq_enqueue(ifp->if_percpuq, m);
                   1194:        }
                   1195:        sc->sc_rxptr = i;
                   1196: }
                   1197:
                   1198: static int
                   1199: add_rxbuf(struct ave_softc *sc, int i)
                   1200: {
                   1201:        struct ave_rxsoft *rxs = &sc->sc_rxsoft[i];
                   1202:        struct mbuf *m;
                   1203:        int error;
                   1204:
                   1205:        MGETHDR(m, M_DONTWAIT, MT_DATA);
                   1206:        if (m == NULL)
                   1207:                return ENOBUFS;
                   1208:
                   1209:        MCLGET(m, M_DONTWAIT);
                   1210:        if ((m->m_flags & M_EXT) == 0) {
                   1211:                m_freem(m);
                   1212:                return ENOBUFS;
                   1213:        }
                   1214:
                   1215:        if (rxs->rxs_mbuf != NULL)
                   1216:                bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
                   1217:
                   1218:        rxs->rxs_mbuf = m;
                   1219:
                   1220:        error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
                   1221:            m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
                   1222:        if (error) {
                   1223:                aprint_error_dev(sc->sc_dev,
                   1224:                    "can't load rx DMA map %d, error = %d\n", i, error);
                   1225:                panic("add_rxbuf");
                   1226:        }
                   1227:
                   1228:        bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
                   1229:            rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1.14      nisimura 1230:        INIT_RDES(sc, i);
1.1       nisimura 1231:
                   1232:        return 0;
                   1233: }
1.14      nisimura 1234:
                   1235: /* AVE64 descriptor management ops */
                   1236:
                   1237: static void make_tdes(void *cookie, int x, int seg, int tdes0)
                   1238: {
                   1239:        struct ave_softc *sc = cookie;
                   1240:        struct ave_txsoft *txs = &sc->sc_txsoft[x];
                   1241:        struct tdes *txd = &sc->sc_txdescs[x];
                   1242:        bus_addr_t p = txs->txs_dmamap->dm_segs[seg].ds_addr;
                   1243:        bus_size_t z = txs->txs_dmamap->dm_segs[seg].ds_len;
                   1244:
                   1245:        txd->t2 = htole32(BUS_ADDR_HI32(p));
                   1246:        txd->t1 = htole32(BUS_ADDR_LO32(p));
                   1247:        txd->t0 = tdes0 | (z & T0_TBS_MASK);
                   1248: }
                   1249:
                   1250: static void mark_txfs(void *cookie, int x)
                   1251: {
                   1252:        struct ave_softc *sc = cookie;
                   1253:        struct tdes *txd = &sc->sc_txdescs[x];
                   1254:        txd->t0 |= (T0_FS | T0_OWN);
                   1255: }
                   1256:
                   1257: static void mark_txls(void *cookie, int x)
                   1258: {
                   1259:        struct ave_softc *sc = cookie;
                   1260:        struct tdes *txd = &sc->sc_txdescs[x];
                   1261:        txd->t0 |= T0_LS;
                   1262: }
                   1263:
                   1264: static void mark_txic(void *cookie, int x)
                   1265: {
                   1266:        struct ave_softc *sc = cookie;
                   1267:        struct tdes *txd = &sc->sc_txdescs[x];
                   1268:        txd->t0 |= T0_IOC;
                   1269: }
                   1270:
                   1271: static int read_tdes0(void *cookie, int x)
                   1272: {
                   1273:        struct ave_softc *sc = cookie;
                   1274:        struct tdes *txd = &sc->sc_txdescs[x];
                   1275:        return txd->t0;
                   1276: }
                   1277:
                   1278: static int read_rdes0(void *cookie, int x)
                   1279: {
                   1280:        struct ave_softc *sc = cookie;
                   1281:        struct rdes *rxd = &sc->sc_rxdescs[x];
                   1282:        return rxd->r0;
                   1283: }
                   1284:
                   1285: static void init_rdes(void *cookie, int x)
                   1286: {
                   1287:        struct ave_softc *sc = cookie;
                   1288:        struct ave_rxsoft *rxs = &sc->sc_rxsoft[x];
                   1289:        struct rdes *rxd = &sc->sc_rxdescs[x];
                   1290:        struct mbuf *m = rxs->rxs_mbuf;
                   1291:        bus_addr_t p = rxs->rxs_dmamap->dm_segs[0].ds_addr;
                   1292:        bus_size_t z = rxs->rxs_dmamap->dm_segs[0].ds_len;
                   1293:
                   1294:        m->m_data = m->m_ext.ext_buf;
                   1295:        rxd->r1 = htole32(BUS_ADDR_LO32(p));
                   1296:        rxd->r0 = R0_OWN | (z & R0_FL_MASK);
                   1297: }
                   1298:
                   1299: /* AVE32 descriptor management ops */
                   1300:
                   1301: static void omake_tdes(void *cookie, int x, int seg, int tdes0)
                   1302: {
                   1303:        struct ave_softc *sc = cookie;
                   1304:        struct ave_txsoft *txs = &sc->sc_txsoft[x];
                   1305:        struct tdes32 *txd = &sc->sc_txd32[x];
                   1306:        bus_addr_t p = txs->txs_dmamap->dm_segs[seg].ds_addr;
                   1307:        bus_size_t z = txs->txs_dmamap->dm_segs[seg].ds_len;
                   1308:
                   1309:        txd->t1 = htole32(BUS_ADDR_LO32(p));
                   1310:        txd->t0 = tdes0 | (z & T0_TBS_MASK);
                   1311: }
                   1312:
                   1313: static void omark_txfs(void *cookie, int x)
                   1314: {
                   1315:        struct ave_softc *sc = cookie;
                   1316:        struct tdes32 *txd = &sc->sc_txd32[x];
                   1317:        txd->t0 |= (T0_FS | T0_OWN);
                   1318: }
                   1319:
                   1320: static void omark_txls(void *cookie, int x)
                   1321: {
                   1322:        struct ave_softc *sc = cookie;
                   1323:        struct tdes32 *txd = &sc->sc_txd32[x];
                   1324:        txd->t0 |= T0_LS;
                   1325: }
                   1326:
                   1327: static void omark_txic(void *cookie, int x)
                   1328: {
                   1329:        struct ave_softc *sc = cookie;
                   1330:        struct tdes32 *txd = &sc->sc_txd32[x];
                   1331:        txd->t0 |= T0_LS;
                   1332: }
                   1333:
                   1334: static int oread_tdes0(void *cookie, int x)
                   1335: {
                   1336:        struct ave_softc *sc = cookie;
                   1337:        struct tdes32 *txd = &sc->sc_txd32[x];
                   1338:        return txd->t0;
                   1339: }
                   1340:
                   1341: static int oread_rdes0(void *cookie, int x)
                   1342: {
                   1343:        struct ave_softc *sc = cookie;
                   1344:        struct rdes32 *rxd = &sc->sc_rxd32[x];
                   1345:        return rxd->r0;
                   1346: }
                   1347:
                   1348: static void oinit_rdes(void *cookie, int x)
                   1349: {
                   1350:        struct ave_softc *sc = cookie;
                   1351:        struct ave_rxsoft *rxs = &sc->sc_rxsoft[x];
                   1352:        struct rdes32 *rxd = &sc->sc_rxd32[x];
                   1353:        struct mbuf *m = rxs->rxs_mbuf;
                   1354:        bus_addr_t p = rxs->rxs_dmamap->dm_segs[0].ds_addr;
                   1355:        bus_size_t z = rxs->rxs_dmamap->dm_segs[0].ds_len;
                   1356:
                   1357:        m->m_data = m->m_ext.ext_buf;
                   1358:        rxd->r1 = htole32(BUS_ADDR_LO32(p));
                   1359:        rxd->r0 = R0_OWN | (z & R0_FL_MASK);
                   1360: }

CVSweb <webmaster@jp.NetBSD.org>