Annotation of src/sys/arch/arm/sociox/if_ave.c, Revision 1.11
1.11 ! nisimura 1: /* $NetBSD: if_ave.c,v 1.10 2020/03/22 00:14:16 nisimura Exp $ */
1.1 nisimura 2:
3: /*-
4: * Copyright (c) 2020 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Tohru Nishimura.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.6 nisimura 33: * Socionext UniPhier AVE GbE driver
34: *
35: * There are two groups for 64bit paddr model and 32bit paddr.
1.1 nisimura 36: */
37:
38: #include <sys/cdefs.h>
1.11 ! nisimura 39: __KERNEL_RCSID(0, "$NetBSD: if_ave.c,v 1.10 2020/03/22 00:14:16 nisimura Exp $");
1.1 nisimura 40:
41: #include <sys/param.h>
42: #include <sys/bus.h>
43: #include <sys/intr.h>
44: #include <sys/device.h>
45: #include <sys/callout.h>
46: #include <sys/mbuf.h>
47: #include <sys/malloc.h>
48: #include <sys/errno.h>
49: #include <sys/rndsource.h>
50: #include <sys/kernel.h>
51: #include <sys/systm.h>
52:
53: #include <net/if.h>
54: #include <net/if_media.h>
55: #include <net/if_dl.h>
56: #include <net/if_ether.h>
57: #include <dev/mii/mii.h>
58: #include <dev/mii/miivar.h>
59: #include <net/bpf.h>
60:
61: #include <dev/fdt/fdtvar.h>
62:
1.11 ! nisimura 63: #define NOT_MP_SAFE (0)
1.1 nisimura 64:
65: #define AVEID 0x000 /* hardware ID */
66: #define AVEHWVER 0x004 /* hardware version */
67: #define AVEGR 0x008 /* chip global control */
68: #define GR_RXRST (1U<<5) /* RxFIFO reset */
69: #define GR_PHYRST (1U<<4) /* external PHY reset */
70: #define GR_GRST (1U<<0) /* full chip reset */
71: #define AVECFG 0x00c /* hw configuration */
72: #define CFG_FLE (1U<<31) /* filter function enable */
73: #define CFG_CKE (1U<<30) /* checksum enable */
1.11 ! nisimura 74: #define CFG_MII (1U<<27) /* 1: RMII/MII, 0: RGMII */
1.1 nisimura 75: #define CFG_IPFCKE (1U<<24) /* IP framgment csum enable */
76: #define AVEGIMR 0x100 /* global interrupt mask */
77: #define AVEGISR 0x104 /* global interrupt status */
78: #define GISR_PHY (1U<<24) /* PHY status change detected */
79: #define GISR_TXCI (1U<<16) /* transmission completed */
1.8 nisimura 80: #define GISR_RXF2L (1U<<8) /* Rx frame length beyond limit */
81: #define GISR_RXOVF (1U<<7) /* RxFIFO oveflow detected */
82: #define GISR_RXDROP (1U<<6) /* PAUSE frame has been dropped */
1.1 nisimura 83: #define GISR_RXIT (1U<<5) /* receive itimer notify */
84: #define AVETXC 0x200 /* transmit control */
1.10 nisimura 85: #define TXC_FCE (1U<<18) /* generate PAUSE to moderate Rx lvl */
1.1 nisimura 86: #define TXC_SPD1000 (1U<<17) /* use 1000Mbps */
87: #define TXC_SPD100 (1U<<16) /* use 100Mbps */
88: #define AVERXC 0x204 /* receive control */
89: #define RXC_EN (1U<<30) /* enable receive circuit */
90: #define RXC_USEFDX (1U<<22) /* use full-duplex */
1.10 nisimura 91: #define RXC_FCE (1U<<21) /* accept PAUSE to throttle Tx */
1.1 nisimura 92: #define RXC_AFE (1U<<19) /* use address filter (!promisc) */
93: #define RXC_DRPEN (1U<<18) /* drop receiving PAUSE frames */
94: /* RXC 15:0 max frame length to accept */
95: #define AVEMACL 0x208 /* MAC address lower */
96: #define AVEMACH 0x20c /* MAC address upper */
97: #define AVEMDIOC 0x214 /* MDIO control */
98: #define MDIOC_RD (1U<<3) /* read op */
99: #define MDIOC_WR (1U<<2) /* write op */
100: #define AVEMDADR 0x218 /* MDIO address -- 13:8 phy id */
101: #define AVEMDWRD 0x21c /* MDIO write data - 15:0 */
102: #define AVEMDIOS 0x220 /* MDIO status */
103: #define MDIOS_BUSY (1U<<0) /* MDIO in progress */
104: #define AVEMDRDD 0x224 /* MDIO read data */
105: #define AVEDESCC 0x300 /* descriptor control */
106: #define DESCC_RD0 (1U<<3) /* activate Rx0 descriptor to run */
107: #define DESCC_RSTP (1U<<2) /* pause Rx descriptor */
108: #define DESCC_TD (1U<<0) /* activate Tx descriptor to run */
109: /* 31:16 status report to read */
110: #define AVETXDES 0x304 /* Tx descriptor control */
111: /* 27:16 Tx descriptor byte count
112: * 11:0 start address offset */
113: #define AVERXDES0 0x308 /* Rx0 descriptor control */
114: /* 30:16 Rx descriptor byte count
115: * 14:0 start address offset */
116: #define AVEITIRQC 0x34c /* interval IRQ control */
117: #define ITIRQC_R0E (1U<<27) /* enable Rx0 interval timer */
118: #define INTMVAL (20<<16) /* INTM value */
119: /* 15:0 interval timer count */
120:
121: #define AVEAFB 0x0800 /* address filter base */
122: #define AVEAFMSKB 0x0d00 /* byte mask base */
123: #define MSKBYTE0 0xfffffff3f /* zeros in 7:6 */
124: #define MSKBYTE1 0x003ffffff /* ones in 25:0 */
1.7 nisimura 125: #define genmask0(x) (MSKBYTE0 & (~0U << (x)))
1.1 nisimura 126: #define AVEAFMSKV 0x0e00 /* bit mask base */
127: #define AVEAFRING 0x0f00 /* entry ring number selector */
128: #define AVEAFEN 0x0ffc /* entry enable bit vector */
129:
1.6 nisimura 130: #define AVETDB 0x1000 /* 64bit Tx descriptor store, upto 256 */
131: #define AVERDB 0x1c00 /* 64bit Rx descriptor store, upto 2048 */
132: #define AVE32TDB 0x1000 /* 32bit Tx store base, upto 256 */
133: #define AVE32RDB 0x1800 /* 32bit Rx store base, upto 2048 */
1.1 nisimura 134:
1.7 nisimura 135: #define AVERMIIC 0x8028 /* RMII control */
1.8 nisimura 136: #define RMIIC_RST (1U<<16) /* reset operation */
137: #define AVELINKSEL 0x8034 /* RMII speed selection */
138: #define LINKSEL_SPD100 (1U<<0) /* use 100Mbps */
1.7 nisimura 139:
1.1 nisimura 140: /*
1.5 nisimura 141: * descriptor size is 12 bytes when 64bit paddr design, 8 bytes otherwise.
1.1 nisimura 142: */
143: struct tdes {
1.4 nisimura 144: uint32_t t0, t1, t2;
1.1 nisimura 145: };
146:
147: struct rdes {
1.4 nisimura 148: uint32_t r0, r1, r2;
1.1 nisimura 149: };
150:
1.5 nisimura 151: struct tdes32 { uint32_t t0, t1; };
152: struct rdes32 { uint32_t r0, r1; };
153:
1.1 nisimura 154: #define T0_OWN (1U<<31) /* desc is ready to Tx */
155: #define T0_IOC (1U<<29) /* post interrupt on Tx completes */
156: #define T0_NOCSUM (1U<<28) /* inhibit checksum operation */
157: #define T0_DONEOK (1U<<27) /* status - Tx completed ok */
158: #define T0_FS (1U<<26) /* first segment of frame */
159: #define T0_LS (1U<<25) /* last segment of frame */
160: #define T0_OWC (1U<<21) /* status - out of win. late coll. */
161: #define T0_ECOL (1U<<20) /* status - excess collision */
162: #define T0_TBS_MASK 0xffff /* T0 segment length 15:0 */
163: /* T1 segment address 31:0 */
164: /* T2 segment address 63:32 */
165: #define R0_OWN (1U<<31) /* desc is empty */
166: #define R0_CSUM (1U<<21) /* receive checksum done */
167: #define R0_CERR (1U<<20) /* csum found negative */
168: #define R0_FL_MASK 0x07ff /* R0 frame length 10:0 */
169: /* R1 frame address 31:0 */
170: /* R2 frame address 63:32 */
171:
172: #define AVE_NTXSEGS 16
1.6 nisimura 173: #define AVE_TXQUEUELEN (AVE_NTXDESC / AVE_NTXSEGS)
1.1 nisimura 174: #define AVE_TXQUEUELEN_MASK (AVE_TXQUEUELEN - 1)
175: #define AVE_TXQUEUE_GC (AVE_TXQUEUELEN / 4)
1.6 nisimura 176: #define AVE_NTXDESC 256 /* HW limit */
1.1 nisimura 177: #define AVE_NTXDESC_MASK (AVE_NTXDESC - 1)
178: #define AVE_NEXTTX(x) (((x) + 1) & AVE_NTXDESC_MASK)
179: #define AVE_NEXTTXS(x) (((x) + 1) & AVE_TXQUEUELEN_MASK)
180:
181: #define AVE_NRXDESC 256
182: #define AVE_NRXDESC_MASK (AVE_NRXDESC - 1)
183: #define AVE_NEXTRX(x) (((x) + 1) & AVE_NRXDESC_MASK)
184:
185: #define AVE_INIT_RXDESC(sc, x) \
186: do { \
187: struct ave_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
188: struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
189: struct mbuf *__m = __rxs->rxs_mbuf; \
190: bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
191: __m->m_data = __m->m_ext.ext_buf; \
192: __rxd->r2 = htole32(BUS_ADDR_HI32(__paddr)); \
193: __rxd->r1 = htole32(BUS_ADDR_LO32(__paddr)); \
194: __rxd->r0 = R0_OWN | R0_FL_MASK; \
195: } while (/*CONSTCOND*/0)
1.5 nisimura 196:
197: #define AVE32_INIT_RXDESC(sc, x) \
1.1 nisimura 198: do { \
199: struct ave_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
1.5 nisimura 200: struct rdes32 *__rxd = &(sc)->sc_rxd32[(x)]; \
1.1 nisimura 201: struct mbuf *__m = __rxs->rxs_mbuf; \
202: bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \
203: __m->m_data = __m->m_ext.ext_buf; \
204: __rxd->r1 = htole32(__paddr); \
205: __rxd->r0 = R0_OWN | R0_FL_MASK; \
206: } while (/*CONSTCOND*/0)
207:
208: struct ave_txsoft {
209: struct mbuf *txs_mbuf; /* head of our mbuf chain */
210: bus_dmamap_t txs_dmamap; /* our DMA map */
211: int txs_firstdesc; /* first descriptor in packet */
212: int txs_lastdesc; /* last descriptor in packet */
213: int txs_ndesc; /* # of descriptors used */
214: };
215:
216: struct ave_rxsoft {
217: struct mbuf *rxs_mbuf; /* head of our mbuf chain */
218: bus_dmamap_t rxs_dmamap; /* our DMA map */
219: };
220:
221: struct ave_softc {
222: device_t sc_dev; /* generic device information */
223: bus_space_tag_t sc_st; /* bus space tag */
224: bus_space_handle_t sc_sh; /* bus space handle */
225: bus_size_t sc_mapsize; /* csr map size */
226: bus_dma_tag_t sc_dmat; /* bus DMA tag */
227: struct ethercom sc_ethercom; /* Ethernet common data */
228: struct mii_data sc_mii; /* MII */
229: callout_t sc_tick_ch; /* PHY monitor callout */
230: int sc_flowflags; /* 802.3x PAUSE flow control */
231: void *sc_ih; /* interrupt cookie */
232: int sc_phy_id; /* PHY address */
233: uint32_t sc_phymode; /* 1<<27: MII/RMII, 0: RGMII */
234: uint32_t sc_rxc; /* software copy of AVERXC */
1.4 nisimura 235: int sc_model; /* 64 paddr model or otherwise 32 */
1.1 nisimura 236:
237: bus_dmamap_t sc_cddmamap; /* control data DMA map */
238: #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
239:
1.6 nisimura 240: struct tdes *sc_txdescs; /* PTR to tdes [NTXDESC] store */
241: struct rdes *sc_rxdescs; /* PTR to rdes [NRXDESC] store */
1.5 nisimura 242: struct tdes32 *sc_txd32;
243: struct rdes32 *sc_rxd32;
1.1 nisimura 244:
245: struct ave_txsoft sc_txsoft[AVE_TXQUEUELEN];
246: struct ave_rxsoft sc_rxsoft[AVE_NRXDESC];
247: int sc_txfree; /* number of free Tx descriptors */
248: int sc_txnext; /* next ready Tx descriptor */
249: int sc_txsfree; /* number of free Tx jobs */
250: int sc_txsnext; /* next ready Tx job */
251: int sc_txsdirty; /* dirty Tx jobs */
252: int sc_rxptr; /* next ready Rx descriptor/descsoft */
253: uint32_t sc_t0csum; /* t0 field checksum designation */
254:
255: krndsource_t rnd_source; /* random source */
256: };
257:
258: static int ave_fdt_match(device_t, cfdata_t, void *);
259: static void ave_fdt_attach(device_t, device_t, void *);
260:
261: CFATTACH_DECL_NEW(ave_fdt, sizeof(struct ave_softc),
262: ave_fdt_match, ave_fdt_attach, NULL, NULL);
263:
264: static void ave_reset(struct ave_softc *);
265: static int ave_init(struct ifnet *);
266: static void ave_start(struct ifnet *);
267: static void ave_stop(struct ifnet *, int);
268: static void ave_watchdog(struct ifnet *);
269: static int ave_ioctl(struct ifnet *, u_long, void *);
270: static void ave_set_rcvfilt(struct ave_softc *);
271: static void ave_write_filt(struct ave_softc *, int, const uint8_t *);
272: static int ave_ifmedia_upd(struct ifnet *);
273: static void ave_ifmedia_sts(struct ifnet *, struct ifmediareq *);
274: static void mii_statchg(struct ifnet *);
275: static void lnkchg(struct ave_softc *);
276: static void phy_tick(void *);
277: static int mii_readreg(device_t, int, int, uint16_t *);
278: static int mii_writereg(device_t, int, int, uint16_t);
279: static int ave_intr(void *);
280: static void txreap(struct ave_softc *);
281: static void rxintr(struct ave_softc *);
282: static int add_rxbuf(struct ave_softc *, int);
283:
284: #define CSR_READ(sc, off) \
285: bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
286: #define CSR_WRITE(sc, off, val) \
287: bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
288:
1.4 nisimura 289: static const struct of_compat_data compat_data[] = {
1.10 nisimura 290: { "socionext,unifier-ld20-ave4", 64 }, /* XXX only this for now */
1.4 nisimura 291: { "socionext,unifier-pro4-ave4", 32 },
292: { "socionext,unifier-pxs2-ave4", 32 },
293: { "socionext,unifier-ld11-ave4", 32 },
294: { "socionext,unifier-pxs3-ave4", 32 },
295: { NULL }
296: };
297:
1.1 nisimura 298: static int
299: ave_fdt_match(device_t parent, cfdata_t cf, void *aux)
300: {
301: struct fdt_attach_args * const faa = aux;
302:
1.4 nisimura 303: return of_match_compat_data(faa->faa_phandle, compat_data);
1.1 nisimura 304: }
305:
306: static void
307: ave_fdt_attach(device_t parent, device_t self, void *aux)
308: {
309: struct ave_softc * const sc = device_private(self);
310: struct fdt_attach_args * const faa = aux;
311: const int phandle = faa->faa_phandle;
312: struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
313: struct mii_data * const mii = &sc->sc_mii;
314: struct ifmedia * const ifm = &mii->mii_media;
315: bus_space_tag_t bst = faa->faa_bst;
316: bus_space_handle_t bsh;
317: bus_addr_t addr;
318: bus_size_t size;
319: char intrstr[128];
320: const char *phy_mode;
321: uint32_t hwimp, hwver, csr;
322: uint8_t enaddr[ETHER_ADDR_LEN];
323: int i, error = 0;
324:
1.3 nisimura 325: if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0
326: || bus_space_map(faa->faa_bst, addr, size, 0, &bsh) != 0) {
327: aprint_error(": unable to map device\n");
1.1 nisimura 328: return;
329: }
330: if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
331: aprint_error(": failed to decode interrupt\n");
332: return;
333: }
1.11 ! nisimura 334: sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET, NOT_MP_SAFE,
1.3 nisimura 335: ave_intr, sc);
336: if (sc->sc_ih == NULL) {
337: aprint_error_dev(self, "couldn't establish interrupt on %s\n",
338: intrstr);
339: goto fail;
340: }
1.1 nisimura 341:
342: sc->sc_dev = self;
343: sc->sc_st = bst;
344: sc->sc_sh = bsh;
345: sc->sc_mapsize = size;
346: sc->sc_dmat = faa->faa_dmat;
347:
1.3 nisimura 348: hwimp = CSR_READ(sc, AVEID);
349: hwver = CSR_READ(sc, AVEHWVER);
1.4 nisimura 350: sc->sc_model = of_search_compatible(phandle, compat_data)->data;
1.3 nisimura 351:
1.6 nisimura 352: phy_mode = fdtbus_get_string(phandle, "phy-mode");
353: if (phy_mode == NULL) {
354: aprint_error(": missing 'phy-mode' property\n");
355: phy_mode = "rgmii";
356: }
357:
1.1 nisimura 358: aprint_naive("\n");
359: aprint_normal(": Gigabit Ethernet Controller\n");
1.6 nisimura 360: aprint_normal_dev(self, "UniPhier %c%c%c%c AVE %d GbE (%d.%d) %s\n",
1.1 nisimura 361: hwimp >> 24, hwimp >> 16, hwimp >> 8, hwimp,
1.6 nisimura 362: sc->sc_model, hwver >> 8, hwver & 0xff, phy_mode);
1.1 nisimura 363: aprint_normal_dev(self, "interrupt on %s\n", intrstr);
364:
365: if (strcmp(phy_mode, "rgmii") == 0)
366: sc->sc_phymode = 0; /* RGMII */
367: else
368: sc->sc_phymode = CFG_MII; /* MII|RMII */
369:
370: CSR_WRITE(sc, AVEGR, GR_GRST | GR_PHYRST);
371: DELAY(20);
372: CSR_WRITE(sc, AVEGR, GR_GRST);
373: DELAY(40);
374: CSR_WRITE(sc, AVEGR, 0);
375: DELAY(40);
376: CSR_WRITE(sc, AVEGIMR, 0);
377:
378: /* Read the Ethernet MAC address from the EEPROM. */
379: csr = CSR_READ(sc, AVEMACL);
380: enaddr[0] = csr;
381: enaddr[1] = csr >> 8;
382: enaddr[2] = csr >> 16;
383: enaddr[3] = csr >> 24;
384: csr = CSR_READ(sc, AVEMACH);
385: enaddr[4] = csr;
386: enaddr[5] = csr >> 8;
387: aprint_normal_dev(self,
388: "Ethernet address %s\n", ether_sprintf(enaddr));
389:
1.9 nisimura 390: sc->sc_flowflags = 0;
391: sc->sc_rxc = 0;
392:
1.1 nisimura 393: mii->mii_ifp = ifp;
394: mii->mii_readreg = mii_readreg;
395: mii->mii_writereg = mii_writereg;
396: mii->mii_statchg = mii_statchg;
397: sc->sc_phy_id = MII_PHY_ANY;
398:
399: sc->sc_ethercom.ec_mii = mii;
400: ifmedia_init(ifm, 0, ave_ifmedia_upd, ave_ifmedia_sts);
401: mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id,
402: MII_OFFSET_ANY, MIIF_DOPAUSE);
403: if (LIST_FIRST(&mii->mii_phys) == NULL) {
404: ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
405: ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
406: } else
407: ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
408: ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
409:
410: strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
411: ifp->if_softc = sc;
412: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
413: ifp->if_ioctl = ave_ioctl;
414: ifp->if_start = ave_start;
415: ifp->if_watchdog = ave_watchdog;
416: ifp->if_init = ave_init;
417: ifp->if_stop = ave_stop;
418: IFQ_SET_READY(&ifp->if_snd);
419:
420: sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
421: ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
422:
423: if_attach(ifp);
424: if_deferred_start_init(ifp, NULL);
425: ether_ifattach(ifp, enaddr);
426:
427: callout_init(&sc->sc_tick_ch, 0);
428: callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
429:
430: /*
1.6 nisimura 431: * HW has a dedicated store to hold Tx/Rx descriptor arrays.
432: * so no need to build Tx/Rx descriptor control_data.
433: * go straight to make dmamap to hold Tx segments and Rx frames.
1.1 nisimura 434: */
435: for (i = 0; i < AVE_TXQUEUELEN; i++) {
436: if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
437: AVE_NTXSEGS, MCLBYTES, 0, 0,
438: &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1.6 nisimura 439: aprint_error_dev(self,
1.1 nisimura 440: "unable to create tx DMA map %d, error = %d\n",
441: i, error);
442: goto fail_4;
443: }
444: }
445: for (i = 0; i < AVE_NRXDESC; i++) {
446: if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
447: 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1.6 nisimura 448: aprint_error_dev(self,
1.1 nisimura 449: "unable to create rx DMA map %d, error = %d\n",
450: i, error);
451: goto fail_5;
452: }
453: sc->sc_rxsoft[i].rxs_mbuf = NULL;
454: }
455:
456: if (pmf_device_register(sc->sc_dev, NULL, NULL))
1.6 nisimura 457: pmf_class_network_register(self, ifp);
1.1 nisimura 458: else
1.6 nisimura 459: aprint_error_dev(self,
1.1 nisimura 460: "couldn't establish power handler\n");
461:
1.6 nisimura 462: rnd_attach_source(&sc->rnd_source, device_xname(self),
1.1 nisimura 463: RND_TYPE_NET, RND_FLAG_DEFAULT);
464:
465: return;
466:
467: fail_5:
468: for (i = 0; i < AVE_NRXDESC; i++) {
469: if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
470: bus_dmamap_destroy(sc->sc_dmat,
471: sc->sc_rxsoft[i].rxs_dmamap);
472: }
473: fail_4:
474: for (i = 0; i < AVE_TXQUEUELEN; i++) {
475: if (sc->sc_txsoft[i].txs_dmamap != NULL)
476: bus_dmamap_destroy(sc->sc_dmat,
477: sc->sc_txsoft[i].txs_dmamap);
478: }
479: /* no fail_3|2|1 */
480: fail:
481: fdtbus_intr_disestablish(phandle, sc->sc_ih);
482: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_mapsize);
483: return;
484: }
485:
486: static void
487: ave_reset(struct ave_softc *sc)
488: {
1.8 nisimura 489: uint32_t csr;
1.1 nisimura 490:
491: CSR_WRITE(sc, AVERXC, 0); /* stop Rx first */
492: CSR_WRITE(sc, AVEDESCC, 0); /* stop Tx/Rx descriptor engine */
1.8 nisimura 493: if (sc->sc_phymode & CFG_MII) {
494: csr = CSR_READ(sc, AVERMIIC);
495: CSR_WRITE(sc, AVERMIIC, csr &~ RMIIC_RST);
496: DELAY(10);
497: CSR_WRITE(sc , AVERMIIC, csr);
498: }
1.1 nisimura 499: CSR_WRITE(sc, AVEGR, GR_RXRST); /* assert RxFIFO reset operation */
500: DELAY(50);
1.8 nisimura 501: CSR_WRITE(sc, AVEGR, 0);
1.6 nisimura 502: CSR_WRITE(sc, AVEGISR, GISR_RXOVF); /* clear OVF condition */
1.1 nisimura 503: }
504:
505: static int
506: ave_init(struct ifnet *ifp)
507: {
508: struct ave_softc *sc = ifp->if_softc;
509: extern const uint8_t etherbroadcastaddr[];
510: const uint8_t promisc[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
511: uint32_t csr;
512: int i;
513:
514: CSR_WRITE(sc, AVEGIMR, 0);
515:
1.11 ! nisimura 516: /* Cancel pending I/O. */
1.1 nisimura 517: ave_stop(ifp, 0);
518:
519: /* make sure Rx circuit sane & stable state */
520: ave_reset(sc);
521:
522: CSR_WRITE(sc, AVECFG, CFG_FLE | sc->sc_phymode);
523:
524: /* set Tx/Rx descriptor ring base addr offset and total size */
525: CSR_WRITE(sc, AVETXDES, 0U|(sizeof(struct tdes)*AVE_NTXDESC) << 16);
526: CSR_WRITE(sc, AVERXDES0, 0U|(sizeof(struct rdes)*AVE_NRXDESC) << 16);
527:
528: /* set ptr to Tx/Rx descriptor store */
529: sc->sc_txdescs = (void *)((uintptr_t)sc->sc_sh + AVETDB);
530: sc->sc_rxdescs = (void *)((uintptr_t)sc->sc_sh + AVERDB);
1.5 nisimura 531: sc->sc_txd32 = (void *)((uintptr_t)sc->sc_sh + AVE32TDB);
532: sc->sc_rxd32 = (void *)((uintptr_t)sc->sc_sh + AVE32RDB);
1.1 nisimura 533:
1.11 ! nisimura 534: /* build sane Tx and load Rx descriptors with mbuf */
1.9 nisimura 535: for (i = 0; i < AVE_NTXDESC; i++) {
536: struct tdes *tdes = &sc->sc_txdescs[i];
537: tdes->t2 = tdes->t1 = 0;
538: tdes->t0 = T0_OWN;
539: }
1.1 nisimura 540: for (i = 0; i < AVE_NRXDESC; i++)
541: (void)add_rxbuf(sc, i);
542:
543: /*
544: * address filter usage
545: * 0 - promisc.
546: * 1 - my own MAC station address
547: * 2 - broadcast address
548: */
549: CSR_WRITE(sc, AVEAFEN, 0); /* clear all 17 entries first */
550: ave_write_filt(sc, 0, promisc);
551: ave_write_filt(sc, 1, CLLADDR(ifp->if_sadl));
552: ave_write_filt(sc, 2, etherbroadcastaddr);
553:
554: /* accept multicast frame or run promisc mode */
555: ave_set_rcvfilt(sc);
556:
1.9 nisimura 557: (void)ave_ifmedia_upd(ifp);
558:
1.1 nisimura 559: csr = CSR_READ(sc, AVECFG);
560: if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
561: sc->sc_t0csum = 0;
562: csr |= (CFG_CKE | CFG_IPFCKE);
563: } else
564: sc->sc_t0csum = T0_NOCSUM;
565: if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
566: csr |= (CFG_CKE | CFG_IPFCKE);
567: CSR_WRITE(sc, AVECFG, csr);
568:
569: sc->sc_rxc = 1518 | RXC_AFE | RXC_DRPEN;
570: CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
571:
572: /* activate Tx/Rx descriptor engine */
573: CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0);
574:
575: /* enable Rx ring0 timer */
576: csr = CSR_READ(sc, AVEITIRQC) & 0xffff;
577: CSR_WRITE(sc, AVEITIRQC, csr | ITIRQC_R0E | INTMVAL);
578:
579: CSR_WRITE(sc, AVEGIMR, /* PHY interrupt is not maskable */
1.8 nisimura 580: GISR_TXCI | GISR_RXIT | GISR_RXDROP | GISR_RXOVF | GISR_RXF2L);
1.1 nisimura 581:
582: ifp->if_flags |= IFF_RUNNING;
583: ifp->if_flags &= ~IFF_OACTIVE;
584:
585: /* start one second timer */
586: callout_schedule(&sc->sc_tick_ch, hz);
587:
588: return 0;
589: }
590:
591: static void
592: ave_stop(struct ifnet *ifp, int disable)
593: {
594: struct ave_softc *sc = ifp->if_softc;
595:
596: /* Stop the one second clock. */
597: callout_stop(&sc->sc_tick_ch);
598:
599: /* Down the MII. */
600: mii_down(&sc->sc_mii);
601:
602: /* Mark the interface down and cancel the watchdog timer. */
603: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
604: ifp->if_timer = 0;
605: }
606:
607: static int
608: ave_ifmedia_upd(struct ifnet *ifp)
609: {
610: struct ave_softc *sc = ifp->if_softc;
611: struct ifmedia *ifm = &sc->sc_mii.mii_media;
1.9 nisimura 612: uint32_t txcr, rxcr, csr;
1.1 nisimura 613:
614: txcr = CSR_READ(sc, AVETXC);
615: rxcr = CSR_READ(sc, AVERXC);
616: CSR_WRITE(sc, AVERXC, rxcr &~ RXC_EN); /* stop Rx first */
617:
618: if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
619: ; /* restart AN */
620: ; /* enable AN */
621: ; /* advertise flow control pause */
622: ; /* adv. 1000FDX,100FDX,100HDX,10FDX,10HDX */
623: } else {
1.9 nisimura 624: #if 1 /* XXX not sure to belong here XXX */
1.1 nisimura 625: txcr &= ~(TXC_SPD1000 | TXC_SPD100);
626: rxcr &= ~RXC_USEFDX;
1.9 nisimura 627: if ((sc->sc_phymode & CFG_MII) == 0 /* RGMII model */
628: && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_1000_T)
1.1 nisimura 629: txcr |= TXC_SPD1000;
630: else if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
631: txcr |= TXC_SPD100;
632: if (ifm->ifm_media & IFM_FDX)
1.9 nisimura 633: rxcr |= RXC_USEFDX;
634:
635: /* adjust LINKSEL when MII/RMII too */
636: if (sc->sc_phymode & CFG_MII) {
637: csr = CSR_READ(sc, AVELINKSEL) &~ LINKSEL_SPD100;;
638: if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
639: csr |= LINKSEL_SPD100;
640: CSR_WRITE(sc, AVELINKSEL, csr);
641: }
642: #endif
1.1 nisimura 643: }
644: sc->sc_rxc = rxcr;
645: CSR_WRITE(sc, AVETXC, txcr);
1.4 nisimura 646: CSR_WRITE(sc, AVERXC, rxcr | RXC_EN);
1.1 nisimura 647: return 0;
648: }
649:
650: static void
651: ave_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
652: {
653: struct ave_softc *sc = ifp->if_softc;
654: struct mii_data *mii = &sc->sc_mii;
655:
656: mii_pollstat(mii);
657: ifmr->ifm_status = mii->mii_media_status;
658: ifmr->ifm_active = sc->sc_flowflags |
659: (mii->mii_media_active & ~IFM_ETH_FMASK);
660: }
661:
662: void
663: mii_statchg(struct ifnet *ifp)
664: {
665: struct ave_softc *sc = ifp->if_softc;
666: struct mii_data *mii = &sc->sc_mii;
1.9 nisimura 667: uint32_t txcr, rxcr;
1.1 nisimura 668:
1.9 nisimura 669: /* Get flow control negotiation result. */
1.1 nisimura 670: if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
671: (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
672: sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
673:
674: txcr = CSR_READ(sc, AVETXC);
675: rxcr = CSR_READ(sc, AVERXC);
676: CSR_WRITE(sc, AVERXC, rxcr &~ RXC_EN); /* stop Rx first */
1.7 nisimura 677:
1.9 nisimura 678: /* Adjust 802.3x PAUSE flow control. */
679: txcr &= ~TXC_FCE;
680: rxcr &= ~RXC_FCE;
681: if (mii->mii_media_active & IFM_FDX) {
682: if (sc->sc_flowflags & IFM_ETH_TXPAUSE)
683: txcr |= TXC_FCE;
684: if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
685: rxcr |= RXC_FCE;
1.7 nisimura 686: }
687:
1.1 nisimura 688: sc->sc_rxc = rxcr;
689: CSR_WRITE(sc, AVETXC, txcr);
1.7 nisimura 690: CSR_WRITE(sc, AVERXC, rxcr | RXC_EN);
1.1 nisimura 691:
692: printf("%ctxfe, %crxfe\n",
693: (txcr & TXC_FCE) ? '+' : '-', (rxcr & RXC_FCE) ? '+' : '-');
694: }
695:
696: static void
697: lnkchg(struct ave_softc *sc)
698: {
699: struct ifmediareq ifmr;
700:
701: ave_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
702: }
703:
704: static void
705: phy_tick(void *arg)
706: {
707: struct ave_softc *sc = arg;
708: struct mii_data *mii = &sc->sc_mii;
709: int s;
710:
711: s = splnet();
712: mii_tick(mii);
713: splx(s);
714:
715: callout_schedule(&sc->sc_tick_ch, hz);
716: }
717:
718: static int
719: mii_readreg(device_t self, int phy, int reg, uint16_t *val)
720: {
721: struct ave_softc *sc = device_private(self);
722: uint32_t ctrl, stat;
723:
724: CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
725: ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_WR;
726: CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_RD);
727: stat = CSR_READ(sc, AVEMDIOS);
728: while (stat & MDIOS_BUSY) {
729: DELAY(10);
730: stat = CSR_READ(sc, AVEMDIOS);
731: }
732: *val = CSR_READ(sc, AVEMDRDD);
733: return 0;
734: }
735:
736: static int
737: mii_writereg(device_t self, int phy, int reg, uint16_t val)
738: {
739: struct ave_softc *sc = device_private(self);
740: uint32_t ctrl, stat;
741:
742: CSR_WRITE(sc, AVEMDADR, reg | (sc->sc_phy_id << 8));
743: CSR_WRITE(sc, AVEMDWRD, val);
744: ctrl = CSR_READ(sc, AVEMDIOC) & ~MDIOC_RD;
745: CSR_WRITE(sc, AVEMDIOC, ctrl | MDIOC_WR);
746: stat = CSR_READ(sc, AVEMDIOS);
747: while (stat & MDIOS_BUSY) {
748: DELAY(10);
749: stat = CSR_READ(sc, AVEMDIOS);
750: }
751: return 0;
752: }
753:
754: static int
755: ave_ioctl(struct ifnet *ifp, u_long cmd, void *data)
756: {
757: struct ave_softc *sc = ifp->if_softc;
758: struct ifreq *ifr = (struct ifreq *)data;
759: struct ifmedia *ifm;
760: int s, error;
761:
762: s = splnet();
763:
764: switch (cmd) {
765: case SIOCSIFMEDIA:
766: /* Flow control requires full-duplex mode. */
767: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
768: (ifr->ifr_media & IFM_FDX) == 0)
769: ifr->ifr_media &= ~IFM_ETH_FMASK;
770: if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
771: if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
772: /* We can do both TXPAUSE and RXPAUSE. */
773: ifr->ifr_media |=
774: IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
775: }
776: sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
777: }
778: ifm = &sc->sc_mii.mii_media;
779: error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
780: break;
781: default:
782: if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
783: break;
784:
785: error = 0;
786:
787: if (cmd == SIOCSIFCAP)
788: error = (*ifp->if_init)(ifp);
789: if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
790: ;
791: else if (ifp->if_flags & IFF_RUNNING) {
792: /*
793: * Multicast list has changed; set the hardware filter
794: * accordingly.
795: */
796: ave_set_rcvfilt(sc);
797: }
798: break;
799: }
800:
801: splx(s);
802: return error;
803: }
804:
805: static void
806: ave_write_filt(struct ave_softc *sc, int i, const uint8_t *en)
807: {
1.7 nisimura 808: uint32_t macl, mach, n, mskbyte0;
1.1 nisimura 809:
1.7 nisimura 810: /* pick v4mcast or v6mcast length */
811: n = (en[0] == 0x01) ? 3 : (en[0] == 0x33) ? 2 : ETHER_ADDR_LEN;
1.9 nisimura 812: /* slot 0 is reserved for promisc mode */
1.7 nisimura 813: mskbyte0 = (i > 0) ? genmask0(n) : MSKBYTE0;
814:
1.1 nisimura 815: /* set frame address first */
1.8 nisimura 816: macl = mach = 0;
817: macl |= (en[3]<<24) | (en[2]<<16)| (en[1]<<8) | en[0];
818: mach |= (en[5]<<8) | en[4];
1.1 nisimura 819: CSR_WRITE(sc, AVEAFB + (i * 0x40) + 0, macl);
820: CSR_WRITE(sc, AVEAFB + (i * 0x40) + 4, mach);
821: /* set byte mask according to mask length, any of 6, 3, or 2 */
1.7 nisimura 822: CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 0, mskbyte0);
1.1 nisimura 823: CSR_WRITE(sc, AVEAFMSKB + (i * 8) + 4, MSKBYTE1);
824: /* set bit vector mask */
825: CSR_WRITE(sc, AVEAFMSKV + (i * 4), 0xffff);
1.7 nisimura 826: /* use Rx ring 0 anyway */
1.1 nisimura 827: CSR_WRITE(sc, AVEAFRING + (i * 4), 0);
828: /* filter entry enable bit vector */
829: CSR_WRITE(sc, AVEAFEN, CSR_READ(sc, AVEAFEN) | 1U << i);
830: }
831:
832: static void
833: ave_set_rcvfilt(struct ave_softc *sc)
834: {
835: struct ethercom *ec = &sc->sc_ethercom;
836: struct ifnet *ifp = &ec->ec_if;
837: struct ether_multistep step;
838: struct ether_multi *enm;
839: extern const uint8_t ether_ipmulticast_min[];
840: extern const uint8_t ether_ip6multicast_min[];
841: uint32_t csr;
842: int i;
843:
844: sc->sc_rxc &= (RXC_AFE | RXC_EN);
845: CSR_WRITE(sc, AVERXC, sc->sc_rxc); /* stop Rx first */
846:
847: /* turn off all 7 mcast filter entries */
848: csr = CSR_READ(sc, AVEAFEN);
849: CSR_WRITE(sc, AVEAFEN, csr & ~(0177U << 11));
850:
851: ETHER_LOCK(ec);
852: if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 7) {
853: ec->ec_flags |= ETHER_F_ALLMULTI;
854: ETHER_UNLOCK(ec);
855: goto update;
856: }
857: ec->ec_flags &= ~ETHER_F_ALLMULTI;
858: ETHER_FIRST_MULTI(step, ec, enm);
859: i = 11; /* slot 11:17 to catch multicast frames */
860: while (enm != NULL) {
861: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
862: /*
863: * We must listen to a range of multicast addresses.
864: * For now, just accept all multicasts, rather than
865: * trying to set only those filter bits needed to match
866: * the range. (At this time, the only use of address
867: * ranges is for IP multicast routing, for which the
868: * range is big enough to require all bits set.)
869: */
870: ec->ec_flags |= ETHER_F_ALLMULTI;
871: ETHER_UNLOCK(ec);
872: goto update;
873: }
874: KASSERT(i < 17);
875: printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
876: /* use additional MAC addr to accept up to 7 */
877: ave_write_filt(sc, i, enm->enm_addrlo);
878: ETHER_NEXT_MULTI(step, enm);
879: i++;
880: }
881: ETHER_UNLOCK(ec);
882: sc->sc_rxc |= RXC_AFE;
883:
884: update:
885: if (ifp->if_flags & IFF_PROMISC)
886: /* RXC_AFE has been cleared, nothing to do */;
887: else if (ec->ec_flags & ETHER_F_ALLMULTI) {
888: /* slot 11/12 for IPv4/v6 multicast */
889: ave_write_filt(sc, 11, ether_ipmulticast_min);
890: ave_write_filt(sc, 12, ether_ip6multicast_min); /* INET6 */
891: /* clear slot 13-17 */
892: csr = CSR_READ(sc, AVEAFEN);
893: CSR_WRITE(sc, AVEAFEN, csr & ~(037U << 13));
894: sc->sc_rxc |= RXC_AFE;
895: }
896: CSR_WRITE(sc, AVERXC, sc->sc_rxc | RXC_EN);
897: }
898:
899: static void
900: ave_watchdog(struct ifnet *ifp)
901: {
902: struct ave_softc *sc = ifp->if_softc;
903:
904: /*
905: * Since we're not interrupting every packet, sweep
906: * up before we report an error.
907: */
908: txreap(sc);
909:
910: if (sc->sc_txfree != AVE_NTXDESC) {
911: aprint_error_dev(sc->sc_dev,
912: "device timeout (txfree %d txsfree %d txnext %d)\n",
913: sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
914: if_statinc(ifp, if_oerrors);
915:
916: /* Reset the interface. */
917: ave_init(ifp);
918: }
919:
920: ave_start(ifp);
921: }
922:
923: static void
924: ave_start(struct ifnet *ifp)
925: {
926: struct ave_softc *sc = ifp->if_softc;
927: struct mbuf *m0, *m;
928: struct ave_txsoft *txs;
929: bus_dmamap_t dmamap;
930: int error, nexttx, lasttx, ofree, seg;
931: uint32_t tdes0;
932:
933: if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
934: return;
935:
936: /* Remember the previous number of free descriptors. */
937: ofree = sc->sc_txfree;
938:
939: /*
940: * Loop through the send queue, setting up transmit descriptors
941: * until we drain the queue, or use up all available transmit
942: * descriptors.
943: */
944: for (;;) {
945: IFQ_POLL(&ifp->if_snd, m0);
946: if (m0 == NULL)
947: break;
948:
949: if (sc->sc_txsfree < AVE_TXQUEUE_GC) {
950: txreap(sc);
951: if (sc->sc_txsfree == 0)
952: break;
953: }
954: txs = &sc->sc_txsoft[sc->sc_txsnext];
955: dmamap = txs->txs_dmamap;
956:
957: error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
958: BUS_DMA_WRITE | BUS_DMA_NOWAIT);
959: if (error) {
960: if (error == EFBIG) {
961: aprint_error_dev(sc->sc_dev,
962: "Tx packet consumes too many "
963: "DMA segments, dropping...\n");
964: IFQ_DEQUEUE(&ifp->if_snd, m0);
965: m_freem(m0);
966: continue;
967: }
968: /* Short on resources, just stop for now. */
969: break;
970: }
971:
972: if (dmamap->dm_nsegs > sc->sc_txfree) {
973: /*
974: * Not enough free descriptors to transmit this
975: * packet. We haven't committed anything yet,
976: * so just unload the DMA map, put the packet
977: * back on the queue, and punt. Notify the upper
978: * layer that there are not more slots left.
979: */
980: ifp->if_flags |= IFF_OACTIVE;
981: bus_dmamap_unload(sc->sc_dmat, dmamap);
982: break;
983: }
984:
985: IFQ_DEQUEUE(&ifp->if_snd, m0);
986:
987: /*
988: * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
989: */
990:
991: bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
992: BUS_DMASYNC_PREWRITE);
993:
994: tdes0 = 0; /* to postpone 1st segment T0_OWN write */
995: lasttx = -1;
996: for (nexttx = sc->sc_txnext, seg = 0;
997: seg < dmamap->dm_nsegs;
998: seg++, nexttx = AVE_NEXTTX(nexttx)) {
999: struct tdes *tdes = &sc->sc_txdescs[nexttx];
1000: bus_addr_t paddr = dmamap->dm_segs[seg].ds_addr;
1001: /*
1002: * If this is the first descriptor we're
1003: * enqueueing, don't set the OWN bit just
1004: * yet. That could cause a race condition.
1005: * We'll do it below.
1006: */
1007: tdes->t2 = htole32(BUS_ADDR_HI32(paddr));
1008: tdes->t1 = htole32(BUS_ADDR_LO32(paddr));
1009: tdes->t0 = tdes0 | sc->sc_t0csum
1010: | (dmamap->dm_segs[seg].ds_len & T0_TBS_MASK);
1011: tdes0 = T0_OWN; /* 2nd and other segments */
1012: lasttx = nexttx;
1013: }
1014: /*
1015: * Outgoing NFS mbuf must be unloaded when Tx completed.
1016: * Without T0_IOC NFS mbuf is left unack'ed for excessive
1017: * time and NFS stops to proceed until ave_watchdog()
1018: * calls txreap() to reclaim the unack'ed mbuf.
1019: * It's painful to traverse every mbuf chain to determine
1020: * whether someone is waiting for Tx completion.
1021: */
1022: m = m0;
1023: do {
1024: if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1025: sc->sc_txdescs[lasttx].t0 |= T0_IOC;
1026: break;
1027: }
1028: } while ((m = m->m_next) != NULL);
1029:
1030: /* Write deferred 1st segment T0_OWN at the final stage */
1031: sc->sc_txdescs[lasttx].t0 |= T0_LS;
1032: sc->sc_txdescs[sc->sc_txnext].t0 |= (T0_FS | T0_OWN);
1033: /* AVE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1034: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); */
1035:
1036: /* Tell DMA start transmit */
1037: /* CSR_WRITE(sc, AVEDESCC, DESCC_TD | DESCC_RD0); */
1038:
1039: txs->txs_mbuf = m0;
1040: txs->txs_firstdesc = sc->sc_txnext;
1041: txs->txs_lastdesc = lasttx;
1042: txs->txs_ndesc = dmamap->dm_nsegs;
1043:
1044: sc->sc_txfree -= txs->txs_ndesc;
1045: sc->sc_txnext = nexttx;
1046: sc->sc_txsfree--;
1047: sc->sc_txsnext = AVE_NEXTTXS(sc->sc_txsnext);
1048: /*
1049: * Pass the packet to any BPF listeners.
1050: */
1051: bpf_mtap(ifp, m0, BPF_D_OUT);
1052: }
1053:
1054: if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1055: /* No more slots left; notify upper layer. */
1056: ifp->if_flags |= IFF_OACTIVE;
1057: }
1058: if (sc->sc_txfree != ofree) {
1059: /* Set a watchdog timer in case the chip flakes out. */
1060: ifp->if_timer = 5;
1061: }
1062: }
1063:
1064: static int
1065: ave_intr(void *arg)
1066: {
1067: struct ave_softc *sc = arg;
1068: uint32_t gimr, stat;
1069:
1070: gimr = CSR_READ(sc, AVEGIMR);
1071: CSR_WRITE(sc, AVEGIMR, 0);
1072: stat = CSR_READ(sc, AVEGISR);
1073: if (stat == 0)
1074: goto done;
1075: if (stat & GISR_PHY) {
1076: lnkchg(sc);
1077: CSR_WRITE(sc, AVEGISR, GISR_PHY);
1078: }
1079: stat &= CSR_READ(sc, AVEGIMR);
1080: if (stat == 0)
1081: goto done;
1082: if (stat & GISR_RXDROP)
1083: CSR_WRITE(sc, AVEGISR, GISR_RXDROP);
1084: if (stat & GISR_RXOVF)
1085: CSR_WRITE(sc, AVEGISR, GISR_RXOVF);
1.8 nisimura 1086: if (stat & GISR_RXF2L)
1087: CSR_WRITE(sc, AVEGISR, GISR_RXF2L);
1.1 nisimura 1088: if (stat & GISR_RXIT) {
1089: rxintr(sc);
1090: CSR_WRITE(sc, AVEGISR, GISR_RXIT);
1091: }
1092: if (stat & GISR_TXCI) {
1093: txreap(sc);
1094: CSR_WRITE(sc, AVEGISR, GISR_TXCI);
1095: }
1096: done:
1097: CSR_WRITE(sc, AVEGIMR, gimr);
1098: return (stat != 0);
1099: }
1100:
1101: static void
1102: txreap(struct ave_softc *sc)
1103: {
1104: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1105: struct ave_txsoft *txs;
1106: uint32_t txstat;
1107: int i;
1108:
1109: ifp->if_flags &= ~IFF_OACTIVE;
1110:
1111: for (i = sc->sc_txsdirty; sc->sc_txsfree != AVE_TXQUEUELEN;
1112: i = AVE_NEXTTXS(i), sc->sc_txsfree++) {
1113: txs = &sc->sc_txsoft[i];
1114:
1115: /* AVE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1116: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
1117:
1118: txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1119:
1120: if (txstat & T0_OWN) /* desc is still in use */
1121: break;
1122: /*
1123: * XXX able to count statistics XXX
1124: * T0_DONEOK -- completed ok
1125: * T0_OWC -- out of window or collision
1126: * T0_ECOL -- dropped by excess collision
1127: */
1128: if_statinc(ifp, if_opackets);
1129:
1130: sc->sc_txfree += txs->txs_ndesc;
1131: bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1132: 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1133: bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1134: m_freem(txs->txs_mbuf);
1135: txs->txs_mbuf = NULL;
1136: }
1137: sc->sc_txsdirty = i;
1138: if (sc->sc_txsfree == AVE_TXQUEUELEN)
1139: ifp->if_timer = 0;
1140: }
1141:
1142: static void
1143: rxintr(struct ave_softc *sc)
1144: {
1145: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1146: struct ave_rxsoft *rxs;
1147: struct mbuf *m;
1148: uint32_t rxstat;
1149: int i, len;
1150:
1151: for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = AVE_NEXTRX(i)) {
1152: rxs = &sc->sc_rxsoft[i];
1153:
1154: /* AVE_CDRXSYNC(sc, i,
1155: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); */
1156:
1157: rxstat = sc->sc_rxdescs[i].r0;
1158: if (rxstat & R0_OWN) /* desc is left empty */
1159: break;
1160:
1161: /* R0_FS | R0_LS must have been marked for this desc */
1162:
1163: bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1164: rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1165:
1166: len = rxstat & R0_FL_MASK;
1167: len -= ETHER_CRC_LEN; /* Trim CRC off */
1168: m = rxs->rxs_mbuf;
1169:
1170: if (add_rxbuf(sc, i) != 0) {
1171: if_statinc(ifp, if_ierrors);
1172: AVE_INIT_RXDESC(sc, i);
1173: bus_dmamap_sync(sc->sc_dmat,
1174: rxs->rxs_dmamap, 0,
1175: rxs->rxs_dmamap->dm_mapsize,
1176: BUS_DMASYNC_PREREAD);
1177: continue;
1178: }
1179:
1180: m_set_rcvif(m, ifp);
1181: m->m_pkthdr.len = m->m_len = len;
1182:
1183: if (rxstat & R0_CSUM) {
1184: uint32_t csum = M_CSUM_IPv4;
1185: if (rxstat & R0_CERR)
1186: csum |= M_CSUM_IPv4_BAD;
1187: m->m_pkthdr.csum_flags |= csum;
1188: }
1189: if_percpuq_enqueue(ifp->if_percpuq, m);
1190: }
1191: sc->sc_rxptr = i;
1192: }
1193:
1194: static int
1195: add_rxbuf(struct ave_softc *sc, int i)
1196: {
1197: struct ave_rxsoft *rxs = &sc->sc_rxsoft[i];
1198: struct mbuf *m;
1199: int error;
1200:
1201: MGETHDR(m, M_DONTWAIT, MT_DATA);
1202: if (m == NULL)
1203: return ENOBUFS;
1204:
1205: MCLGET(m, M_DONTWAIT);
1206: if ((m->m_flags & M_EXT) == 0) {
1207: m_freem(m);
1208: return ENOBUFS;
1209: }
1210:
1211: if (rxs->rxs_mbuf != NULL)
1212: bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1213:
1214: rxs->rxs_mbuf = m;
1215:
1216: error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1217: m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1218: if (error) {
1219: aprint_error_dev(sc->sc_dev,
1220: "can't load rx DMA map %d, error = %d\n", i, error);
1221: panic("add_rxbuf");
1222: }
1223:
1224: bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1225: rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1226: AVE_INIT_RXDESC(sc, i);
1227:
1228: return 0;
1229: }
CVSweb <webmaster@jp.NetBSD.org>