Annotation of src/sys/arch/arm/sociox/if_ave.c, Revision 1.7
1.7 ! nisimura 1: /* $NetBSD: if_ave.c,v 1.6 2020/03/21 07:16:16 nisimura Exp $ */
1.1 nisimura 2:
3: /*-
4: * Copyright (c) 2020 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Tohru Nishimura.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.6 nisimura 33: * Socionext UniPhier AVE GbE driver
34: *
35: * There are two groups for 64bit paddr model and 32bit paddr.
1.1 nisimura 36: */
37:
38: #include <sys/cdefs.h>
1.7 ! nisimura 39: __KERNEL_RCSID(0, "$NetBSD: if_ave.c,v 1.6 2020/03/21 07:16:16 nisimura Exp $");
1.1 nisimura 40:
41: #include <sys/param.h>
42: #include <sys/bus.h>
43: #include <sys/intr.h>
44: #include <sys/device.h>
45: #include <sys/callout.h>
46: #include <sys/mbuf.h>
47: #include <sys/malloc.h>
48: #include <sys/errno.h>
49: #include <sys/rndsource.h>
50: #include <sys/kernel.h>
51: #include <sys/systm.h>
52:
53: #include <net/if.h>
54: #include <net/if_media.h>
55: #include <net/if_dl.h>
56: #include <net/if_ether.h>
57: #include <dev/mii/mii.h>
58: #include <dev/mii/miivar.h>
59: #include <net/bpf.h>
60:
61: #include <dev/fdt/fdtvar.h>
62:
63: #define FDT_INTR_FLAGS (0) /* !MP_SAFE */
64:
65: #define AVEID 0x000 /* hardware ID */
66: #define AVEHWVER 0x004 /* hardware version */
67: #define AVEGR 0x008 /* chip global control */
68: #define GR_RXRST (1U<<5) /* RxFIFO reset */
69: #define GR_PHYRST (1U<<4) /* external PHY reset */
70: #define GR_GRST (1U<<0) /* full chip reset */
71: #define AVECFG 0x00c /* hw configuration */
72: #define CFG_FLE (1U<<31) /* filter function enable */
73: #define CFG_CKE (1U<<30) /* checksum enable */
74: #define CFG_MII (1U<<27) /* 1: MII/RMII, 0: RGMII */
75: #define CFG_IPFCKE (1U<<24) /* IP framgment csum enable */
76: #define AVEGIMR 0x100 /* global interrupt mask */
77: #define AVEGISR 0x104 /* global interrupt status */
78: #define GISR_PHY (1U<<24) /* PHY status change detected */
79: #define GISR_TXCI (1U<<16) /* transmission completed */
80: #define GISR_RXERR (1U<<8) /* Rx frame error detected */
81: #define GISR_RXOVF (1U<<7) /* Rx oveflow detected */
82: #define GISR_RXDROP (1U<<6) /* Rx has been dropped */
83: #define GISR_RXIT (1U<<5) /* receive itimer notify */
84: #define AVETXC 0x200 /* transmit control */
85: #define TXC_FCE (1U<<18) /* enable Tx flow control */
86: #define TXC_SPD1000 (1U<<17) /* use 1000Mbps */
87: #define TXC_SPD100 (1U<<16) /* use 100Mbps */
88: #define AVERXC 0x204 /* receive control */
89: #define RXC_EN (1U<<30) /* enable receive circuit */
90: #define RXC_USEFDX (1U<<22) /* use full-duplex */
91: #define RXC_FCE (1U<<21) /* enable Rx flow control */
92: #define RXC_AFE (1U<<19) /* use address filter (!promisc) */
93: #define RXC_DRPEN (1U<<18) /* drop receiving PAUSE frames */
94: /* RXC 15:0 max frame length to accept */
95: #define AVEMACL 0x208 /* MAC address lower */
96: #define AVEMACH 0x20c /* MAC address upper */
97: #define AVEMDIOC 0x214 /* MDIO control */
98: #define MDIOC_RD (1U<<3) /* read op */
99: #define MDIOC_WR (1U<<2) /* write op */
100: #define AVEMDADR 0x218 /* MDIO address -- 13:8 phy id */
101: #define AVEMDWRD 0x21c /* MDIO write data - 15:0 */
102: #define AVEMDIOS 0x220 /* MDIO status */
103: #define MDIOS_BUSY (1U<<0) /* MDIO in progress */
104: #define AVEMDRDD 0x224 /* MDIO read data */
105: #define AVEDESCC 0x300 /* descriptor control */
106: #define DESCC_RD0 (1U<<3) /* activate Rx0 descriptor to run */
107: #define DESCC_RSTP (1U<<2) /* pause Rx descriptor */
108: #define DESCC_TD (1U<<0) /* activate Tx descriptor to run */
109: /* 31:16 status report to read */
110: #define AVETXDES 0x304 /* Tx descriptor control */
111: /* 27:16 Tx descriptor byte count
112: * 11:0 start address offset */
113: #define AVERXDES0 0x308 /* Rx0 descriptor control */
114: /* 30:16 Rx descriptor byte count
115: * 14:0 start address offset */
116: #define AVEITIRQC 0x34c /* interval IRQ control */
117: #define ITIRQC_R0E (1U<<27) /* enable Rx0 interval timer */
118: #define INTMVAL (20<<16) /* INTM value */
119: /* 15:0 interval timer count */
120:
121: #define AVEAFB 0x0800 /* address filter base */
122: #define AVEAFMSKB 0x0d00 /* byte mask base */
123: #define MSKBYTE0 0xfffffff3f /* zeros in 7:6 */
124: #define MSKBYTE1 0x003ffffff /* ones in 25:0 */
1.7 ! nisimura 125: #define genmask0(x) (MSKBYTE0 & (~0U << (x)))
1.1 nisimura 126: #define AVEAFMSKV 0x0e00 /* bit mask base */
127: #define AVEAFRING 0x0f00 /* entry ring number selector */
128: #define AVEAFEN 0x0ffc /* entry enable bit vector */
129:
1.6 nisimura 130: #define AVETDB 0x1000 /* 64bit Tx descriptor store, upto 256 */
131: #define AVERDB 0x1c00 /* 64bit Rx descriptor store, upto 2048 */
132: #define AVE32TDB 0x1000 /* 32bit Tx store base, upto 256 */
133: #define AVE32RDB 0x1800 /* 32bit Rx store base, upto 2048 */
1.1 nisimura 134:
1.7 ! nisimura 135: #define AVERMIIC 0x8028 /* RMII control */
! 136: #define RMIIC_RST (1U<<16) /* reset */
! 137: #define AVELINKSEL 0x8034 /* link speed selection */
! 138: #define LINKSEL_SPD100 (1U<<0) /* RMII speed 100Mbps */
! 139:
1.1 nisimura 140: /*
1.5 nisimura 141: * descriptor size is 12 bytes when 64bit paddr design, 8 bytes otherwise.
1.1 nisimura 142: */
143: struct tdes {
1.4 nisimura 144: uint32_t t0, t1, t2;
1.1 nisimura 145: };
146:
147: struct rdes {
1.4 nisimura 148: uint32_t r0, r1, r2;
1.1 nisimura 149: };
150:
1.5 nisimura 151: struct tdes32 { uint32_t t0, t1; };
152: struct rdes32 { uint32_t r0, r1; };
153:
1.1 nisimura 154: #define T0_OWN (1U<<31) /* desc is ready to Tx */
155: #define T0_IOC (1U<<29) /* post interrupt on Tx completes */
156: #define T0_NOCSUM (1U<<28) /* inhibit checksum operation */
157: #define T0_DONEOK (1U<<27) /* status - Tx completed ok */
158: #define T0_FS (1U<<26) /* first segment of frame */
159: #define T0_LS (1U<<25) /* last segment of frame */
160: #define T0_OWC (1U<<21) /* status - out of win. late coll. */
161: #define T0_ECOL (1U<<20) /* status - excess collision */
162: #define T0_TBS_MASK 0xffff /* T0 segment length 15:0 */
163: /* T1 segment address 31:0 */
164: /* T2 segment address 63:32 */
165: #define R0_OWN (1U<<31) /* desc is empty */
166: #define R0_CSUM (1U<<21) /* receive checksum done */
167: #define R0_CERR (1U<<20) /* csum found negative */
168: #define R0_FL_MASK 0x07ff /* R0 frame length 10:0 */
169: /* R1 frame address 31:0 */
170: /* R2 frame address 63:32 */
171:
172: #define AVE_NTXSEGS 16
1.6 nisimura 173: #define AVE_TXQUEUELEN (AVE_NTXDESC / AVE_NTXSEGS)
1.1 nisimura 174: #define AVE_TXQUEUELEN_MASK (AVE_TXQUEUELEN - 1)
175: #define AVE_TXQUEUE_GC (AVE_TXQUEUELEN / 4)
1.6 nisimura 176: #define AVE_NTXDESC 256 /* HW limit */
1.1 nisimura 177: #define AVE_NTXDESC_MASK (AVE_NTXDESC - 1)
178: #define AVE_NEXTTX(x) (((x) + 1) & AVE_NTXDESC_MASK)
179: #define AVE_NEXTTXS(x) (((x) + 1) & AVE_TXQUEUELEN_MASK)
180:
181: #define AVE_NRXDESC 256
182: #define AVE_NRXDESC_MASK (AVE_NRXDESC - 1)
183: #define AVE_NEXTRX(x) (((x) + 1) & AVE_NRXDESC_MASK)
184:
185: #define AVE_INIT_RXDESC(sc, x) \
186: do { \
187: struct ave_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
188: struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
189: struct mbuf *__m = __rxs->rxs_mbuf; \
190: bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
191: __m->m_data = __m->m_ext.ext_buf; \
192: __rxd->r2 = htole32(BUS_ADDR_HI32(__paddr)); \
193: __rxd->r1 = htole32(BUS_ADDR_LO32(__paddr)); \
194: __rxd->r0 = R0_OWN | R0_FL_MASK; \
195: } while (/*CONSTCOND*/0)
1.5 nisimura 196:
197: #define AVE32_INIT_RXDESC(sc, x) \
1.1 nisimura 198: do { \
199: struct ave_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
1.5 nisimura 200: struct rdes32 *__rxd = &(sc)->sc_rxd32[(x)]; \
1.1 nisimura 201: struct mbuf *__m = __rxs->rxs_mbuf; \
202: bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
203: __m->m_data = __m->m_ext.ext_buf; \
204: __rxd->r1 = htole32(__paddr); \
205: __rxd->r0 = R0_OWN | R0_FL_MASK; \
206: } while (/*CONSTCOND*/0)
207:
208: struct ave_txsoft {
209: struct mbuf *txs_mbuf; /* head of our mbuf chain */
210: bus_dmamap_t txs_dmamap; /* our DMA map */
211: int txs_firstdesc; /* first descriptor in packet */
212: int txs_lastdesc; /* last descriptor in packet */
213: int txs_ndesc; /* # of descriptors used */
214: };
215:
216: struct ave_rxsoft {
217: struct mbuf *rxs_mbuf; /* head of our mbuf chain */
218: bus_dmamap_t rxs_dmamap; /* our DMA map */
219: };
220:
221: struct ave_softc {
222: device_t sc_dev; /* generic device information */
223: bus_space_tag_t sc_st; /* bus space tag */
224: bus_space_handle_t sc_sh; /* bus space handle */
225: bus_size_t sc_mapsize; /* csr map size */
226: bus_dma_tag_t sc_dmat; /* bus DMA tag */
227: struct ethercom sc_ethercom; /* Ethernet common data */
228: struct mii_data sc_mii; /* MII */
229: callout_t sc_tick_ch; /* PHY monitor callout */
230: int sc_flowflags; /* 802.3x PAUSE flow control */
231: void *sc_ih; /* interrupt cookie */
232: int sc_phy_id; /* PHY address */
233: uint32_t sc_phymode; /* 1<<27: MII/RMII, 0: RGMII */
234: uint32_t sc_rxc; /* software copy of AVERXC */
1.4 nisimura 235: int sc_model; /* 64 paddr model or otherwise 32 */
1.1 nisimura 236:
237: bus_dmamap_t sc_cddmamap; /* control data DMA map */
238: #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
239:
1.6 nisimura 240: struct tdes *sc_txdescs; /* PTR to tdes [NTXDESC] store */
241: struct rdes *sc_rxdescs; /* PTR to rdes [NRXDESC] store */
1.5 nisimura 242: struct tdes32 *sc_txd32;
243: struct rdes32 *sc_rxd32;
1.1 nisimura 244:
245: struct ave_txsoft sc_txsoft[AVE_TXQUEUELEN];
246: struct ave_rxsoft sc_rxsoft[AVE_NRXDESC];
247: int sc_txfree; /* number of free Tx descriptors */
248: int sc_txnext; /* next ready Tx descriptor */
249: int sc_txsfree; /* number of free Tx jobs */
250: int sc_txsnext; /* next ready Tx job */
251: int sc_txsdirty; /* dirty Tx jobs */
252: int sc_rxptr; /* next ready Rx descriptor/descsoft */
253: uint32_t sc_t0csum; /* t0 field checksum designation */
254:
255: krndsource_t rnd_source; /* random source */
256: };
257:
258: static int ave_fdt_match(device_t, cfdata_t, void *);
259: static void ave_fdt_attach(device_t, device_t, void *);
260:
261: CFATTACH_DECL_NEW(ave_fdt, sizeof(struct ave_softc),
262: ave_fdt_match, ave_fdt_attach, NULL, NULL);
263:
264: static void ave_reset(struct ave_softc *);
265: static int ave_init(struct ifnet *);
266: static void ave_start(struct ifnet *);
267: static void ave_stop(struct ifnet *, int);
268: static void ave_watchdog(struct ifnet *);
269: static int ave_ioctl(struct ifnet *, u_long, void *);
270: static void ave_set_rcvfilt(struct ave_softc *);
271: static void ave_write_filt(struct ave_softc *, int, const uint8_t *);
272: static int ave_ifmedia_upd(struct ifnet *);
273: static void ave_ifmedia_sts(struct ifnet *, struct ifmediareq *);
274: static void mii_statchg(struct ifnet *);
275: static void lnkchg(struct ave_softc *);
276: static void phy_tick(void *);
277: static int mii_readreg(device_t, int, int, uint16_t *);
278: static int mii_writereg(device_t, int, int, uint16_t);
279: static int ave_intr(void *);
280: static void txreap(struct ave_softc *);
281: static void rxintr(struct ave_softc *);
282: static int add_rxbuf(struct ave_softc *, int);
283:
284: #define CSR_READ(sc, off) \
285: bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
286: #define CSR_WRITE(sc, off, val) \
287: bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
288:
1.4 nisimura 289: static const struct of_compat_data compat_data[] = {
290: { "socionext,unifier-ld20-ave4", 64 },
291: { "socionext,unifier-pro4-ave4", 32 },
292: { "socionext,unifier-pxs2-ave4", 32 },
293: { "socionext,unifier-ld11-ave4", 32 },
294: { "socionext,unifier-pxs3-ave4", 32 },
295: { NULL }
296: };
297:
1.1 nisimura 298: static int
299: ave_fdt_match(device_t parent, cfdata_t cf, void *aux)
300: {
301: struct fdt_attach_args * const faa = aux;
302:
1.4 nisimura 303: return of_match_compat_data(faa->faa_phandle, compat_data);
1.1 nisimura 304: }
305:
306: static void
307: ave_fdt_attach(device_t parent, device_t self, void *aux)
308: {
309: struct ave_softc * const sc = device_private(self);
310: struct fdt_attach_args * const faa = aux;
311: const int phandle = faa->faa_phandle;
312: struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
313: struct mii_data * const mii = &sc->sc_mii;
314: struct ifmedia * const ifm = &mii->mii_media;
315: bus_space_tag_t bst = faa->faa_bst;
316: bus_space_handle_t bsh;
317: bus_addr_t addr;
318: bus_size_t size;
319: char intrstr[128];
320: const char *phy_mode;
321: uint32_t hwimp, hwver, csr;
322: uint8_t enaddr[ETHER_ADDR_LEN];
323: int i, error = 0;
324:
1.3 nisimura 325: if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0
326: || bus_space_map(faa->faa_bst, addr, size, 0, &bsh) != 0) {
327: aprint_error(": unable to map device\n");
1.1 nisimura 328: return;
329: }
330: if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
331: aprint_error(": failed to decode interrupt\n");
332: return;
333: }
1.3 nisimura 334: sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET, 0,
335: ave_intr, sc);
336: if (sc->sc_ih == NULL) {
337: aprint_error_dev(self, "couldn't establish interrupt on %s\n",
338: intrstr);
339: goto fail;
340: }
1.1 nisimura 341:
342: sc->sc_dev = self;
343: sc->sc_st = bst;
344: sc->sc_sh = bsh;
345: sc->sc_mapsize = size;
346: sc->sc_dmat = faa->faa_dmat;
347:
1.3 nisimura 348: hwimp = CSR_READ(sc, AVEID);
349: hwver = CSR_READ(sc, AVEHWVER);
1.4 nisimura 350: sc->sc_model = of_search_compatible(phandle, compat_data)->data;
1.3 nisimura 351:
1.6 nisimura 352: phy_mode = fdtbus_get_string(phandle, "phy-mode");
353: if (phy_mode == NULL) {
354: aprint_error(": missing 'phy-mode' property\n");
355: phy_mode = "rgmii";
356: }
357:
1.1 nisimura 358: aprint_naive("\n");
359: aprint_normal(": Gigabit Ethernet Controller\n");
1.6 nisimura 360: aprint_normal_dev(self, "UniPhier %c%c%c%c AVE %d GbE (%d.%d) %s\n",
1.1 nisimura 361: hwimp >> 24, hwimp >> 16, hwimp >> 8, hwimp,
1.6 nisimura 362: sc->sc_model, hwver >> 8, hwver & 0xff, phy_mode);
1.1 nisimura 363: aprint_normal_dev(self, "interrupt on %s\n", intrstr);
364:
365: if (strcmp(phy_mode, "rgmii") == 0)
366: sc->sc_phymode = 0; /* RGMII */
367: else
368: sc->sc_phymode = CFG_MII; /* MII|RMII */
369:
370: CSR_WRITE(sc, AVEGR, GR_GRST | GR_PHYRST);
371: DELAY(20);
372: CSR_WRITE(sc, AVEGR, GR_GRST);
373: DELAY(40);
374: CSR_WRITE(sc, AVEGR, 0);
375: DELAY(40);
376: CSR_WRITE(sc, AVEGIMR, 0);
377:
378: /* Read the Ethernet MAC address from the EEPROM. */
379: csr = CSR_READ(sc, AVEMACL);
380: enaddr[0] = csr;
381: enaddr[1] = csr >> 8;
382: enaddr[2] = csr >> 16;
383: enaddr[3] = csr >> 24;
384: csr = CSR_READ(sc, AVEMACH);
385: enaddr[4] = csr;
386: enaddr[5] = csr >> 8;
387: aprint_normal_dev(self,
388: "Ethernet address %s\n", ether_sprintf(enaddr));
389:
390: mii->mii_ifp = ifp;
391: mii->mii_readreg = mii_readreg;
392: mii->mii_writereg = mii_writereg;
393: mii->mii_statchg = mii_statchg;
394: sc->sc_phy_id = MII_PHY_ANY;
395:
396: sc->sc_ethercom.ec_mii = mii;
397: ifmedia_init(ifm, 0, ave_ifmedia_upd, ave_ifmedia_sts);
398: mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
399: MII_OFFSET_ANY, MIIF_DOPAUSE);
400: if (LIST_FIRST(&mii->mii_phys) == NULL) {
401: ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
402: ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
403: } else
404: ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
405: ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
406:
407: strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
408: ifp->if_softc = sc;
409: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
410: ifp->if_ioctl = ave_ioctl;
411: ifp->if_start = ave_start;
412: ifp->if_watchdog = ave_watchdog;
413: ifp->if_init = ave_init;
414: ifp->if_stop = ave_stop;
415: IFQ_SET_READY(&ifp->if_snd);
416:
417: sc->sc_flowflags = 0;
418: sc->sc_rxc = 0;
419:
420: sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
421: ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
422:
423: if_attach(ifp);
424: if_deferred_start_init(ifp, NULL);
425: ether_ifattach(ifp, enaddr);
426:
427: callout_init(&sc->sc_tick_ch, 0);
428: callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
429:
430: /*
1.6 nisimura 431: * HW has a dedicated store to hold Tx/Rx descriptor arrays.
432: * so no need to build Tx/Rx descriptor control_data.
433: * go straight to make dmamap to hold Tx segments and Rx frames.
1.1 nisimura 434: */
435: for (i = 0; i < AVE_TXQUEUELEN; i++) {
436: if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
437: AVE_NTXSEGS, MCLBYTES, 0, 0,
438: &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1.6 nisimura 439: aprint_error_dev(self,
1.1 nisimura 440: "unable to create tx DMA map %d, error = %d\n",
441: i, error);
442: goto fail_4;
443: }
444: }
445: for (i = 0; i < AVE_NRXDESC; i++) {
446: if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
447: 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1.6 nisimura 448: aprint_error_dev(self,
1.1 nisimura 449: "unable to create rx DMA map %d, error = %d\n",
450: i, error);
451: goto fail_5;
452: }
453: sc->sc_rxsoft[i].rxs_mbuf = NULL;
454: }
455:
456: if (pmf_device_register(sc->sc_dev, NULL, NULL))
1.6 nisimura 457: pmf_class_network_register(self, ifp);
1.1 nisimura 458: else
1.6 nisimura 459: aprint_error_dev(self,
1.1 nisimura 460: "couldn't establish power handler\n");
461:
1.6 nisimura 462: rnd_attach_source(&sc->rnd_source, device_xname(self),
1.1 nisimura 463: RND_TYPE_NET, RND_FLAG_DEFAULT);
464:
465: return;
466:
467: fail_5:
468: for (i = 0; i < AVE_NRXDESC; i++) {
469: if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
470: bus_dmamap_destroy(sc->sc_dmat,
471: sc->sc_rxsoft[i].rxs_dmamap);
472: }
473: fail_4:
474: for (i = 0; i < AVE_TXQUEUELEN; i++) {
475: if (sc->sc_txsoft[i].txs_dmamap != NULL)
476: bus_dmamap_destroy(sc->sc_dmat,
477: sc->sc_txsoft[i].txs_dmamap);
478: }
479: /* no fail_3|2|1 */
480: fail:
481: fdtbus_intr_disestablish(phandle, sc->sc_ih);
482: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_mapsize);
483: return;
484: }
485:
486: static void
487: ave_reset(struct ave_softc *sc)
488: {
489:
490: CSR_WRITE(sc, AVERXC, 0); /* stop Rx first */
491: CSR_WRITE(sc, AVEDESCC, 0); /* stop Tx/Rx descriptor engine */
492: CSR_WRITE(sc, AVEGR, GR_RXRST); /* assert RxFIFO reset operation */
493: DELAY(50);
494: CSR_WRITE(sc, AVEGR, 0); /* negate reset */
1.6 nisimura 495: CSR_WRITE(sc, AVEGISR, GISR_RXOVF); /* clear OVF condition */
1.1 nisimura 496: }
497:
498: static int
499: ave_init(struct ifnet *ifp)
500: {
501: struct ave_softc *sc = ifp->if_softc;
502: extern const uint8_t etherbroadcastaddr[];
503: const uint8_t promisc[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
504: uint32_t csr;
505: int i;
506:
507: CSR_WRITE(sc, AVEGIMR, 0);
508:
509: /* cancel pending I/O */
510: ave_stop(ifp, 0);
511:
512: /* make sure Rx circuit sane & stable state */
513: ave_reset(sc);
514:
515: CSR_WRITE(sc, AVECFG, CFG_FLE | sc->sc_phymode);
516:
517: /* set Tx/Rx descriptor ring base addr offset and total size */
518: CSR_WRITE(sc, AVETXDES, 0U|(sizeof(struct tdes)*AVE_NTXDESC) << 16);
519: CSR_WRITE(sc, AVERXDES0, 0U|(sizeof(struct rdes)*AVE_NRXDESC) << 16);
520:
521: /* set ptr to Tx/Rx descriptor store */
522: sc->sc_txdescs = (void *)((uintptr_t)sc->sc_sh + AVETDB);
523: sc->sc_rxdescs = (void *)((uintptr_t)sc->sc_sh + AVERDB);
1.5 nisimura 524: sc->sc_txd32 = (void *)((uintptr_t)sc->sc_sh + AVE32TDB);
525: sc->sc_rxd32 = (void *)((uintptr_t)sc->sc_sh + AVE32RDB);
1.1 nisimura 526:
527: /* build sane and loaded Tx/Rx descriptors */
528: memset(sc->sc_txdescs, 0, sizeof(struct tdes)*AVE_NTXDESC);
529: for (i = 0; i < AVE_NRXDESC; i++)
530: (void)add_rxbuf(sc, i);
531:
532: /*
533: * address filter usage
534: * 0 - promisc.
535: * 1 - my own MAC station address
536: * 2 - broadcast address
537: */
538: CSR_WRITE(sc, AVEAFEN, 0); /* clear all 17 entries first */
539: ave_write_filt(sc, 0, promisc);
540: ave_write_filt(sc, 1, CLLADDR(ifp->if_sadl));
541: ave_write_filt(sc, 2, etherbroadcastaddr);
542:
543: /* accept multicast frame or run promisc mode */
544: ave_set_rcvfilt(sc);
545:
546: csr = CSR_READ(sc, AVECFG);
547: if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
548: sc->sc_t0csum = 0;
549: csr |= (CFG_CKE | CFG_IPFCKE);
550: } else
551: sc->sc_t0csum = T0_NOCSUM;
552: if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
553: csr |= (CFG_CKE | CFG_IPFCKE);
554: CSR_WRITE(sc, AVECFG, csr);
555:
556: sc->sc_rxc = 1518 | RXC_AFE | RXC_DRPEN;
557: CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
558:
559: /* activate Tx/Rx descriptor engine */
560: CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0);
561:
562: /* enable Rx ring0 timer */
563: csr = CSR_READ(sc, AVEITIRQC) & 0xffff;
564: CSR_WRITE(sc, AVEITIRQC, csr | ITIRQC_R0E | INTMVAL);
565:
566: CSR_WRITE(sc, AVEGIMR, /* PHY interrupt is not maskable */
567: GISR_TXCI | GISR_RXIT | GISR_RXDROP | GISR_RXOVF | GISR_RXERR);
568:
569: ifp->if_flags |= IFF_RUNNING;
570: ifp->if_flags &= ~IFF_OACTIVE;
571:
572: /* start one second timer */
573: callout_schedule(&sc->sc_tick_ch, hz);
574:
575: return 0;
576: }
577:
578: static void
579: ave_stop(struct ifnet *ifp, int disable)
580: {
581: struct ave_softc *sc = ifp->if_softc;
582:
583: /* Stop the one second clock. */
584: callout_stop(&sc->sc_tick_ch);
585:
586: /* Down the MII. */
587: mii_down(&sc->sc_mii);
588:
589: /* Mark the interface down and cancel the watchdog timer. */
590: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
591: ifp->if_timer = 0;
592: }
593:
594: static int
595: ave_ifmedia_upd(struct ifnet *ifp)
596: {
597: struct ave_softc *sc = ifp->if_softc;
598: struct ifmedia *ifm = &sc->sc_mii.mii_media;
599: uint32_t txcr, rxcr;
600:
601: txcr = CSR_READ(sc, AVETXC);
602: rxcr = CSR_READ(sc, AVERXC);
603: CSR_WRITE(sc, AVERXC, rxcr &~ RXC_EN); /* stop Rx first */
604:
605: if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
606: ; /* restart AN */
607: ; /* enable AN */
608: ; /* advertise flow control pause */
609: ; /* adv. 1000FDX,100FDX,100HDX,10FDX,10HDX */
610: } else {
611: txcr &= ~(TXC_SPD1000 | TXC_SPD100);
612: rxcr &= ~RXC_USEFDX;
613: if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_1000_T)
614: txcr |= TXC_SPD1000;
615: else if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
616: txcr |= TXC_SPD100;
617: if (ifm->ifm_media & IFM_FDX)
618: rxcr |= RXC_USEFDX;
619: }
620: sc->sc_rxc = rxcr;
621: CSR_WRITE(sc, AVETXC, txcr);
1.4 nisimura 622: CSR_WRITE(sc, AVERXC, rxcr | RXC_EN);
1.1 nisimura 623: return 0;
624: }
625:
626: static void
627: ave_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
628: {
629: struct ave_softc *sc = ifp->if_softc;
630: struct mii_data *mii = &sc->sc_mii;
631:
632: mii_pollstat(mii);
633: ifmr->ifm_status = mii->mii_media_status;
634: ifmr->ifm_active = sc->sc_flowflags |
635: (mii->mii_media_active & ~IFM_ETH_FMASK);
636: }
637:
638: void
639: mii_statchg(struct ifnet *ifp)
640: {
641: struct ave_softc *sc = ifp->if_softc;
642: struct mii_data *mii = &sc->sc_mii;
1.7 ! nisimura 643: struct ifmedia * const ifm = &mii->mii_media;
! 644: uint32_t txcr, rxcr, csr;
1.1 nisimura 645:
1.7 ! nisimura 646: /* get flow control negotiation result */
1.1 nisimura 647: if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
648: (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
649: sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
650:
651: txcr = CSR_READ(sc, AVETXC);
652: rxcr = CSR_READ(sc, AVERXC);
653: CSR_WRITE(sc, AVERXC, rxcr &~ RXC_EN); /* stop Rx first */
1.7 ! nisimura 654:
! 655: /* adjust 802.3x PAUSE flow control */
1.1 nisimura 656: if ((mii->mii_media_active & IFM_FDX)
657: && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
658: txcr |= TXC_FCE;
659: else
660: txcr &= ~TXC_FCE;
661: if ((mii->mii_media_active & IFM_FDX)
662: && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
663: rxcr |= RXC_FCE;
664: else
665: rxcr &= ~RXC_FCE;
1.7 ! nisimura 666:
! 667: /* HW does not handle auto speed adjustment */
! 668: txcr &= ~(TXC_SPD1000 | TXC_SPD100);
! 669: if ((sc->sc_phymode & CFG_MII) == 0 /* RGMII model */
! 670: && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
! 671: txcr |= TXC_SPD1000;
! 672: else if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
! 673: txcr |= TXC_SPD100;
! 674:
! 675: /* adjust LINKSEL when MII/RMII too */
! 676: if (sc->sc_phymode & CFG_MII) {
! 677: csr = CSR_READ(sc, AVELINKSEL);
! 678: if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
! 679: csr |= LINKSEL_SPD100;
! 680: else
! 681: csr &= ~LINKSEL_SPD100;
! 682: CSR_WRITE(sc, AVELINKSEL, csr);
! 683: }
! 684:
1.1 nisimura 685: sc->sc_rxc = rxcr;
686: CSR_WRITE(sc, AVETXC, txcr);
1.7 ! nisimura 687: CSR_WRITE(sc, AVERXC, rxcr | RXC_EN);
1.1 nisimura 688:
689: printf("%ctxfe, %crxfe\n",
690: (txcr & TXC_FCE) ? '+' : '-', (rxcr & RXC_FCE) ? '+' : '-');
691: }
692:
693: static void
694: lnkchg(struct ave_softc *sc)
695: {
696: struct ifmediareq ifmr;
697:
698: ave_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
699: }
700:
701: static void
702: phy_tick(void *arg)
703: {
704: struct ave_softc *sc = arg;
705: struct mii_data *mii = &sc->sc_mii;
706: int s;
707:
708: s = splnet();
709: mii_tick(mii);
710: splx(s);
711:
712: callout_schedule(&sc->sc_tick_ch, hz);
713: }
714:
715: static int
716: mii_readreg(device_t self, int phy, int reg, uint16_t *val)
717: {
718: struct ave_softc *sc = device_private(self);
719: uint32_t ctrl, stat;
720:
721: CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
722: ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_WR;
723: CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_RD);
724: stat = CSR_READ(sc, AVEMDIOS);
725: while (stat & MDIOS_BUSY) {
726: DELAY(10);
727: stat = CSR_READ(sc, AVEMDIOS);
728: }
729: *val = CSR_READ(sc, AVEMDRDD);
730: return 0;
731: }
732:
733: static int
734: mii_writereg(device_t self, int phy, int reg, uint16_t val)
735: {
736: struct ave_softc *sc = device_private(self);
737: uint32_t ctrl, stat;
738:
739: CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
740: CSR_WRITE(sc, AVEMDWRD, val);
741: ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_RD;
742: CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_WR);
743: stat = CSR_READ(sc, AVEMDIOS);
744: while (stat & MDIOS_BUSY) {
745: DELAY(10);
746: stat = CSR_READ(sc, AVEMDIOS);
747: }
748: return 0;
749: }
750:
751: static int
752: ave_ioctl(struct ifnet *ifp, u_long cmd, void *data)
753: {
754: struct ave_softc *sc = ifp->if_softc;
755: struct ifreq *ifr = (struct ifreq *)data;
756: struct ifmedia *ifm;
757: int s, error;
758:
759: s = splnet();
760:
761: switch (cmd) {
762: case SIOCSIFMEDIA:
763: /* Flow control requires full-duplex mode. */
764: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
765: (ifr->ifr_media & IFM_FDX) == 0)
766: ifr->ifr_media &= ~IFM_ETH_FMASK;
767: if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
768: if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
769: /* We can do both TXPAUSE and RXPAUSE. */
770: ifr->ifr_media |=
771: IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
772: }
773: sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
774: }
775: ifm = &sc->sc_mii.mii_media;
776: error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
777: break;
778: default:
779: if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
780: break;
781:
782: error = 0;
783:
784: if (cmd == SIOCSIFCAP)
785: error = (*ifp->if_init)(ifp);
786: if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
787: ;
788: else if (ifp->if_flags & IFF_RUNNING) {
789: /*
790: * Multicast list has changed; set the hardware filter
791: * accordingly.
792: */
793: ave_set_rcvfilt(sc);
794: }
795: break;
796: }
797:
798: splx(s);
799: return error;
800: }
801:
802: static void
803: ave_write_filt(struct ave_softc *sc, int i, const uint8_t *en)
804: {
1.7 ! nisimura 805: uint32_t macl, mach, n, mskbyte0;
1.1 nisimura 806:
807: macl = mach = 0;
808: macl |= (en[3]<<24) | (en[2]<<16)| (en[1]<<8) | en[0];
809: mach |= (en[5]<<8) | en[4];
1.7 ! nisimura 810: /* pick v4mcast or v6mcast length */
! 811: n = (en[0] == 0x01) ? 3 : (en[0] == 0x33) ? 2 : ETHER_ADDR_LEN;
! 812: /* entry 0 is reserved for promisc mode */
! 813: mskbyte0 = (i > 0) ? genmask0(n) : MSKBYTE0;
! 814:
1.1 nisimura 815: /* set frame address first */
816: CSR_WRITE(sc, AVEAFB + (i * 0x40) + 0, macl);
817: CSR_WRITE(sc, AVEAFB + (i * 0x40) + 4, mach);
818: /* set byte mask according to mask length, any of 6, 3, or 2 */
1.7 ! nisimura 819: CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 0, mskbyte0);
1.1 nisimura 820: CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 4, MSKBYTE1);
821: /* set bit vector mask */
822: CSR_WRITE(sc, AVEAFMSKV + (i * 4), 0xffff);
1.7 ! nisimura 823: /* use Rx ring 0 anyway */
1.1 nisimura 824: CSR_WRITE(sc, AVEAFRING + (i * 4), 0);
825: /* filter entry enable bit vector */
826: CSR_WRITE(sc, AVEAFEN, CSR_READ(sc, AVEAFEN) | 1U << i);
827: }
828:
829: static void
830: ave_set_rcvfilt(struct ave_softc *sc)
831: {
832: struct ethercom *ec = &sc->sc_ethercom;
833: struct ifnet *ifp = &ec->ec_if;
834: struct ether_multistep step;
835: struct ether_multi *enm;
836: extern const uint8_t ether_ipmulticast_min[];
837: extern const uint8_t ether_ip6multicast_min[];
838: uint32_t csr;
839: int i;
840:
841: sc->sc_rxc &= (RXC_AFE | RXC_EN);
842: CSR_WRITE(sc, AVERXC, sc->sc_rxc); /* stop Rx first */
843:
844: /* turn off all 7 mcast filter entries */
845: csr = CSR_READ(sc, AVEAFEN);
846: CSR_WRITE(sc, AVEAFEN, csr & ~(0177U << 11));
847:
848: ETHER_LOCK(ec);
849: if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 7) {
850: ec->ec_flags |= ETHER_F_ALLMULTI;
851: ETHER_UNLOCK(ec);
852: goto update;
853: }
854: ec->ec_flags &= ~ETHER_F_ALLMULTI;
855: ETHER_FIRST_MULTI(step, ec, enm);
856: i = 11; /* slot 11:17 to catch multicast frames */
857: while (enm != NULL) {
858: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
859: /*
860: * We must listen to a range of multicast addresses.
861: * For now, just accept all multicasts, rather than
862: * trying to set only those filter bits needed to match
863: * the range. (At this time, the only use of address
864: * ranges is for IP multicast routing, for which the
865: * range is big enough to require all bits set.)
866: */
867: ec->ec_flags |= ETHER_F_ALLMULTI;
868: ETHER_UNLOCK(ec);
869: goto update;
870: }
871: KASSERT(i < 17);
872: printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
873: /* use additional MAC addr to accept up to 7 */
874: ave_write_filt(sc, i, enm->enm_addrlo);
875: ETHER_NEXT_MULTI(step, enm);
876: i++;
877: }
878: ETHER_UNLOCK(ec);
879: sc->sc_rxc |= RXC_AFE;
880:
881: update:
882: if (ifp->if_flags & IFF_PROMISC)
883: /* RXC_AFE has been cleared, nothing to do */;
884: else if (ec->ec_flags & ETHER_F_ALLMULTI) {
885: /* slot 11/12 for IPv4/v6 multicast */
886: ave_write_filt(sc, 11, ether_ipmulticast_min);
887: ave_write_filt(sc, 12, ether_ip6multicast_min); /* INET6 */
888: /* clear slot 13-17 */
889: csr = CSR_READ(sc, AVEAFEN);
890: CSR_WRITE(sc, AVEAFEN, csr & ~(037U << 13));
891: sc->sc_rxc |= RXC_AFE;
892: }
893: CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
894: }
895:
896: static void
897: ave_watchdog(struct ifnet *ifp)
898: {
899: struct ave_softc *sc = ifp->if_softc;
900:
901: /*
902: * Since we're not interrupting every packet, sweep
903: * up before we report an error.
904: */
905: txreap(sc);
906:
907: if (sc->sc_txfree != AVE_NTXDESC) {
908: aprint_error_dev(sc->sc_dev,
909: "device timeout (txfree %d txsfree %d txnext %d)\n",
910: sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
911: if_statinc(ifp, if_oerrors);
912:
913: /* Reset the interface. */
914: ave_init(ifp);
915: }
916:
917: ave_start(ifp);
918: }
919:
920: static void
921: ave_start(struct ifnet *ifp)
922: {
923: struct ave_softc *sc = ifp->if_softc;
924: struct mbuf *m0, *m;
925: struct ave_txsoft *txs;
926: bus_dmamap_t dmamap;
927: int error, nexttx, lasttx, ofree, seg;
928: uint32_t tdes0;
929:
930: if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
931: return;
932:
933: /* Remember the previous number of free descriptors. */
934: ofree = sc->sc_txfree;
935:
936: /*
937: * Loop through the send queue, setting up transmit descriptors
938: * until we drain the queue, or use up all available transmit
939: * descriptors.
940: */
941: for (;;) {
942: IFQ_POLL(&ifp->if_snd, m0);
943: if (m0 == NULL)
944: break;
945:
946: if (sc->sc_txsfree < AVE_TXQUEUE_GC) {
947: txreap(sc);
948: if (sc->sc_txsfree == 0)
949: break;
950: }
951: txs = &sc->sc_txsoft[sc->sc_txsnext];
952: dmamap = txs->txs_dmamap;
953:
954: error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
955: BUS_DMA_WRITE | BUS_DMA_NOWAIT);
956: if (error) {
957: if (error == EFBIG) {
958: aprint_error_dev(sc->sc_dev,
959: "Tx packet consumes too many "
960: "DMA segments, dropping...\n");
961: IFQ_DEQUEUE(&ifp->if_snd, m0);
962: m_freem(m0);
963: continue;
964: }
965: /* Short on resources, just stop for now. */
966: break;
967: }
968:
969: if (dmamap->dm_nsegs > sc->sc_txfree) {
970: /*
971: * Not enough free descriptors to transmit this
972: * packet. We haven't committed anything yet,
973: * so just unload the DMA map, put the packet
974: * back on the queue, and punt. Notify the upper
975: * layer that there are not more slots left.
976: */
977: ifp->if_flags |= IFF_OACTIVE;
978: bus_dmamap_unload(sc->sc_dmat, dmamap);
979: break;
980: }
981:
982: IFQ_DEQUEUE(&ifp->if_snd, m0);
983:
984: /*
985: * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
986: */
987:
988: bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
989: BUS_DMASYNC_PREWRITE);
990:
991: tdes0 = 0; /* to postpone 1st segment T0_OWN write */
992: lasttx = -1;
993: for (nexttx = sc->sc_txnext, seg = 0;
994: seg < dmamap->dm_nsegs;
995: seg++, nexttx = AVE_NEXTTX(nexttx)) {
996: struct tdes *tdes = &sc->sc_txdescs[nexttx];
997: bus_addr_t paddr = dmamap->dm_segs[seg].ds_addr;
998: /*
999: * If this is the first descriptor we're
1000: * enqueueing, don't set the OWN bit just
1001: * yet. That could cause a race condition.
1002: * We'll do it below.
1003: */
1004: tdes->t2 = htole32(BUS_ADDR_HI32(paddr));
1005: tdes->t1 = htole32(BUS_ADDR_LO32(paddr));
1006: tdes->t0 = tdes0 | sc->sc_t0csum
1007: | (dmamap->dm_segs[seg].ds_len & T0_TBS_MASK);
1008: tdes0 = T0_OWN; /* 2nd and other segments */
1009: lasttx = nexttx;
1010: }
1011: /*
1012: * Outgoing NFS mbuf must be unloaded when Tx completed.
1013: * Without T0_IOC NFS mbuf is left unack'ed for excessive
1014: * time and NFS stops to proceed until ave_watchdog()
1015: * calls txreap() to reclaim the unack'ed mbuf.
1016: * It's painful to traverse every mbuf chain to determine
1017: * whether someone is waiting for Tx completion.
1018: */
1019: m = m0;
1020: do {
1021: if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1022: sc->sc_txdescs[lasttx].t0 |= T0_IOC;
1023: break;
1024: }
1025: } while ((m = m->m_next) != NULL);
1026:
1027: /* Write deferred 1st segment T0_OWN at the final stage */
1028: sc->sc_txdescs[lasttx].t0 |= T0_LS;
1029: sc->sc_txdescs[sc->sc_txnext].t0 |= (T0_FS | T0_OWN);
1030: /* AVE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1031: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); */
1032:
1033: /* Tell DMA start transmit */
1034: /* CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0); */
1035:
1036: txs->txs_mbuf = m0;
1037: txs->txs_firstdesc = sc->sc_txnext;
1038: txs->txs_lastdesc = lasttx;
1039: txs->txs_ndesc = dmamap->dm_nsegs;
1040:
1041: sc->sc_txfree -= txs->txs_ndesc;
1042: sc->sc_txnext = nexttx;
1043: sc->sc_txsfree--;
1044: sc->sc_txsnext = AVE_NEXTTXS(sc->sc_txsnext);
1045: /*
1046: * Pass the packet to any BPF listeners.
1047: */
1048: bpf_mtap(ifp, m0, BPF_D_OUT);
1049: }
1050:
1051: if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1052: /* No more slots left; notify upper layer. */
1053: ifp->if_flags |= IFF_OACTIVE;
1054: }
1055: if (sc->sc_txfree != ofree) {
1056: /* Set a watchdog timer in case the chip flakes out. */
1057: ifp->if_timer = 5;
1058: }
1059: }
1060:
1061: static int
1062: ave_intr(void *arg)
1063: {
1064: struct ave_softc *sc = arg;
1065: uint32_t gimr, stat;
1066:
1067: gimr = CSR_READ(sc, AVEGIMR);
1068: CSR_WRITE(sc, AVEGIMR, 0);
1069: stat = CSR_READ(sc, AVEGISR);
1070: if (stat == 0)
1071: goto done;
1072: if (stat & GISR_PHY) {
1073: lnkchg(sc);
1074: CSR_WRITE(sc, AVEGISR, GISR_PHY);
1075: }
1076: stat &= CSR_READ(sc, AVEGIMR);
1077: if (stat == 0)
1078: goto done;
1079: if (stat & GISR_RXDROP)
1080: CSR_WRITE(sc, AVEGISR, GISR_RXDROP);
1081: if (stat & GISR_RXOVF)
1082: CSR_WRITE(sc, AVEGISR, GISR_RXOVF);
1083: if (stat & GISR_RXERR)
1084: CSR_WRITE(sc, AVEGISR, GISR_RXERR);
1085: if (stat & GISR_RXIT) {
1086: rxintr(sc);
1087: CSR_WRITE(sc, AVEGISR, GISR_RXIT);
1088: }
1089: if (stat & GISR_TXCI) {
1090: txreap(sc);
1091: CSR_WRITE(sc, AVEGISR, GISR_TXCI);
1092: }
1093: done:
1094: CSR_WRITE(sc, AVEGIMR, gimr);
1095: return (stat != 0);
1096: }
1097:
1098: static void
1099: txreap(struct ave_softc *sc)
1100: {
1101: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1102: struct ave_txsoft *txs;
1103: uint32_t txstat;
1104: int i;
1105:
1106: ifp->if_flags &= ~IFF_OACTIVE;
1107:
1108: for (i = sc->sc_txsdirty; sc->sc_txsfree != AVE_TXQUEUELEN;
1109: i = AVE_NEXTTXS(i), sc->sc_txsfree++) {
1110: txs = &sc->sc_txsoft[i];
1111:
1112: /* AVE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1113: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
1114:
1115: txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1116:
1117: if (txstat & T0_OWN) /* desc is still in use */
1118: break;
1119: /*
1120: * XXX able to count statistics XXX
1121: * T0_DONEOK -- completed ok
1122: * T0_OWC -- out of window or collision
1123: * T0_ECOL -- dropped by excess collision
1124: */
1125: if_statinc(ifp, if_opackets);
1126:
1127: sc->sc_txfree += txs->txs_ndesc;
1128: bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1129: 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1130: bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1131: m_freem(txs->txs_mbuf);
1132: txs->txs_mbuf = NULL;
1133: }
1134: sc->sc_txsdirty = i;
1135: if (sc->sc_txsfree == AVE_TXQUEUELEN)
1136: ifp->if_timer = 0;
1137: }
1138:
1139: static void
1140: rxintr(struct ave_softc *sc)
1141: {
1142: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1143: struct ave_rxsoft *rxs;
1144: struct mbuf *m;
1145: uint32_t rxstat;
1146: int i, len;
1147:
1148: for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = AVE_NEXTRX(i)) {
1149: rxs = &sc->sc_rxsoft[i];
1150:
1151: /* AVE_CDRXSYNC(sc, i,
1152: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
1153:
1154: rxstat = sc->sc_rxdescs[i].r0;
1155: if (rxstat & R0_OWN) /* desc is left empty */
1156: break;
1157:
1158: /* R0_FS | R0_LS must have been marked for this desc */
1159:
1160: bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1161: rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1162:
1163: len = rxstat & R0_FL_MASK;
1164: len -= ETHER_CRC_LEN; /* Trim CRC off */
1165: m = rxs->rxs_mbuf;
1166:
1167: if (add_rxbuf(sc, i) != 0) {
1168: if_statinc(ifp, if_ierrors);
1169: AVE_INIT_RXDESC(sc, i);
1170: bus_dmamap_sync(sc->sc_dmat,
1171: rxs->rxs_dmamap, 0,
1172: rxs->rxs_dmamap->dm_mapsize,
1173: BUS_DMASYNC_PREREAD);
1174: continue;
1175: }
1176:
1177: m_set_rcvif(m, ifp);
1178: m->m_pkthdr.len = m->m_len = len;
1179:
1180: if (rxstat & R0_CSUM) {
1181: uint32_t csum = M_CSUM_IPv4;
1182: if (rxstat & R0_CERR)
1183: csum |= M_CSUM_IPv4_BAD;
1184: m->m_pkthdr.csum_flags |= csum;
1185: }
1186: if_percpuq_enqueue(ifp->if_percpuq, m);
1187: }
1188: sc->sc_rxptr = i;
1189: }
1190:
1191: static int
1192: add_rxbuf(struct ave_softc *sc, int i)
1193: {
1194: struct ave_rxsoft *rxs = &sc->sc_rxsoft[i];
1195: struct mbuf *m;
1196: int error;
1197:
1198: MGETHDR(m, M_DONTWAIT, MT_DATA);
1199: if (m == NULL)
1200: return ENOBUFS;
1201:
1202: MCLGET(m, M_DONTWAIT);
1203: if ((m->m_flags & M_EXT) == 0) {
1204: m_freem(m);
1205: return ENOBUFS;
1206: }
1207:
1208: if (rxs->rxs_mbuf != NULL)
1209: bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1210:
1211: rxs->rxs_mbuf = m;
1212:
1213: error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1214: m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1215: if (error) {
1216: aprint_error_dev(sc->sc_dev,
1217: "can't load rx DMA map %d, error = %d\n", i, error);
1218: panic("add_rxbuf");
1219: }
1220:
1221: bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1222: rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1223: AVE_INIT_RXDESC(sc, i);
1224:
1225: return 0;
1226: }
CVSweb <webmaster@jp.NetBSD.org>