Annotation of src/sys/dev/pci/if_bge.c, Revision 1.175
1.175 ! martin 1: /* $NetBSD: if_bge.c,v 1.174 2010/01/24 23:09:26 martin Exp $ */
1.8 thorpej 2:
1.1 fvdl 3: /*
4: * Copyright (c) 2001 Wind River Systems
5: * Copyright (c) 1997, 1998, 1999, 2001
6: * Bill Paul <wpaul@windriver.com>. All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Bill Paul.
19: * 4. Neither the name of the author nor the names of any co-contributors
20: * may be used to endorse or promote products derived from this software
21: * without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33: * THE POSSIBILITY OF SUCH DAMAGE.
34: *
35: * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36: */
37:
38: /*
1.12 thorpej 39: * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
1.1 fvdl 40: *
1.12 thorpej 41: * NetBSD version by:
42: *
43: * Frank van der Linden <fvdl@wasabisystems.com>
44: * Jason Thorpe <thorpej@wasabisystems.com>
1.32 tron 45: * Jonathan Stone <jonathan@dsg.stanford.edu>
1.12 thorpej 46: *
47: * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com>
1.1 fvdl 48: * Senior Engineer, Wind River Systems
49: */
50:
51: /*
52: * The Broadcom BCM5700 is based on technology originally developed by
53: * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54: * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55: * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56: * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57: * frames, highly configurable RX filtering, and 16 RX and TX queues
58: * (which, along with RX filter rules, can be used for QOS applications).
59: * Other features, such as TCP segmentation, may be available as part
60: * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61: * firmware images can be stored in hardware and need not be compiled
62: * into the driver.
63: *
64: * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
1.33 tsutsui 65: * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
1.1 fvdl 66: *
67: * The BCM5701 is a single-chip solution incorporating both the BCM5700
1.25 jonathan 68: * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
1.1 fvdl 69: * does not support external SSRAM.
70: *
71: * Broadcom also produces a variation of the BCM5700 under the "Altima"
72: * brand name, which is functionally similar but lacks PCI-X support.
73: *
74: * Without external SSRAM, you can only have at most 4 TX rings,
75: * and the use of the mini RX ring is disabled. This seems to imply
76: * that these features are simply not available on the BCM5701. As a
77: * result, this driver does not implement any support for the mini RX
78: * ring.
79: */
1.43 lukem 80:
81: #include <sys/cdefs.h>
1.175 ! martin 82: __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.174 2010/01/24 23:09:26 martin Exp $");
1.1 fvdl 83:
84: #include "vlan.h"
1.148 mlelstv 85: #include "rnd.h"
1.1 fvdl 86:
87: #include <sys/param.h>
88: #include <sys/systm.h>
89: #include <sys/callout.h>
90: #include <sys/sockio.h>
91: #include <sys/mbuf.h>
92: #include <sys/malloc.h>
93: #include <sys/kernel.h>
94: #include <sys/device.h>
95: #include <sys/socket.h>
1.64 jonathan 96: #include <sys/sysctl.h>
1.1 fvdl 97:
98: #include <net/if.h>
99: #include <net/if_dl.h>
100: #include <net/if_media.h>
101: #include <net/if_ether.h>
102:
1.148 mlelstv 103: #if NRND > 0
104: #include <sys/rnd.h>
105: #endif
106:
1.1 fvdl 107: #ifdef INET
108: #include <netinet/in.h>
109: #include <netinet/in_systm.h>
110: #include <netinet/in_var.h>
111: #include <netinet/ip.h>
112: #endif
113:
1.95 jonathan 114: /* Headers for TCP Segmentation Offload (TSO) */
115: #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
116: #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
117: #include <netinet/ip.h> /* for struct ip */
118: #include <netinet/tcp.h> /* for struct tcphdr */
119:
120:
1.1 fvdl 121: #include <net/bpf.h>
122:
123: #include <dev/pci/pcireg.h>
124: #include <dev/pci/pcivar.h>
125: #include <dev/pci/pcidevs.h>
126:
127: #include <dev/mii/mii.h>
128: #include <dev/mii/miivar.h>
129: #include <dev/mii/miidevs.h>
130: #include <dev/mii/brgphyreg.h>
131:
132: #include <dev/pci/if_bgereg.h>
1.164 msaitoh 133: #include <dev/pci/if_bgevar.h>
1.1 fvdl 134:
135: #include <uvm/uvm_extern.h>
1.164 msaitoh 136: #include <prop/proplib.h>
1.1 fvdl 137:
1.46 jonathan 138: #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
139:
1.63 jonathan 140:
141: /*
142: * Tunable thresholds for rx-side bge interrupt mitigation.
143: */
144:
145: /*
146: * The pairs of values below were obtained from empirical measurement
147: * on bcm5700 rev B2; they ar designed to give roughly 1 receive
148: * interrupt for every N packets received, where N is, approximately,
149: * the second value (rx_max_bds) in each pair. The values are chosen
150: * such that moving from one pair to the succeeding pair was observed
151: * to roughly halve interrupt rate under sustained input packet load.
152: * The values were empirically chosen to avoid overflowing internal
153: * limits on the bcm5700: inreasing rx_ticks much beyond 600
154: * results in internal wrapping and higher interrupt rates.
155: * The limit of 46 frames was chosen to match NFS workloads.
1.87 perry 156: *
1.63 jonathan 157: * These values also work well on bcm5701, bcm5704C, and (less
158: * tested) bcm5703. On other chipsets, (including the Altima chip
159: * family), the larger values may overflow internal chip limits,
160: * leading to increasing interrupt rates rather than lower interrupt
161: * rates.
162: *
163: * Applications using heavy interrupt mitigation (interrupting every
164: * 32 or 46 frames) in both directions may need to increase the TCP
165: * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
1.87 perry 166: * full link bandwidth, due to ACKs and window updates lingering
1.63 jonathan 167: * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
168: */
1.104 thorpej 169: static const struct bge_load_rx_thresh {
1.63 jonathan 170: int rx_ticks;
171: int rx_max_bds; }
172: bge_rx_threshes[] = {
173: { 32, 2 },
174: { 50, 4 },
175: { 100, 8 },
176: { 192, 16 },
177: { 416, 32 },
178: { 598, 46 }
179: };
180: #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
181:
182: /* XXX patchable; should be sysctl'able */
1.64 jonathan 183: static int bge_auto_thresh = 1;
184: static int bge_rx_thresh_lvl;
185:
1.104 thorpej 186: static int bge_rxthresh_nodenum;
1.1 fvdl 187:
1.170 msaitoh 188: typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
1.151 cegger 189:
1.104 thorpej 190: static int bge_probe(device_t, cfdata_t, void *);
191: static void bge_attach(device_t, device_t, void *);
192: static void bge_release_resources(struct bge_softc *);
193: static void bge_txeof(struct bge_softc *);
194: static void bge_rxeof(struct bge_softc *);
195:
1.172 msaitoh 196: static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
1.170 msaitoh 197: static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
198: static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
199: static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
200: static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
1.151 cegger 201:
1.104 thorpej 202: static void bge_tick(void *);
203: static void bge_stats_update(struct bge_softc *);
1.172 msaitoh 204: static void bge_stats_update_regs(struct bge_softc *);
1.170 msaitoh 205: static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
1.104 thorpej 206:
207: static int bge_intr(void *);
208: static void bge_start(struct ifnet *);
1.126 christos 209: static int bge_ioctl(struct ifnet *, u_long, void *);
1.104 thorpej 210: static int bge_init(struct ifnet *);
1.141 jmcneill 211: static void bge_stop(struct ifnet *, int);
1.104 thorpej 212: static void bge_watchdog(struct ifnet *);
213: static int bge_ifmedia_upd(struct ifnet *);
214: static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
215:
216: static void bge_setmulti(struct bge_softc *);
217:
218: static void bge_handle_events(struct bge_softc *);
219: static int bge_alloc_jumbo_mem(struct bge_softc *);
220: #if 0 /* XXX */
221: static void bge_free_jumbo_mem(struct bge_softc *);
1.1 fvdl 222: #endif
1.104 thorpej 223: static void *bge_jalloc(struct bge_softc *);
1.126 christos 224: static void bge_jfree(struct mbuf *, void *, size_t, void *);
1.104 thorpej 225: static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
226: bus_dmamap_t);
227: static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
228: static int bge_init_rx_ring_std(struct bge_softc *);
229: static void bge_free_rx_ring_std(struct bge_softc *);
230: static int bge_init_rx_ring_jumbo(struct bge_softc *);
231: static void bge_free_rx_ring_jumbo(struct bge_softc *);
232: static void bge_free_tx_ring(struct bge_softc *);
233: static int bge_init_tx_ring(struct bge_softc *);
234:
235: static int bge_chipinit(struct bge_softc *);
236: static int bge_blockinit(struct bge_softc *);
237: static int bge_setpowerstate(struct bge_softc *, int);
1.1 fvdl 238:
1.104 thorpej 239: static void bge_reset(struct bge_softc *);
1.161 msaitoh 240: static void bge_link_upd(struct bge_softc *);
1.95 jonathan 241:
1.1 fvdl 242: #ifdef BGE_DEBUG
243: #define DPRINTF(x) if (bgedebug) printf x
244: #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
1.95 jonathan 245: #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
1.1 fvdl 246: int bgedebug = 0;
1.95 jonathan 247: int bge_tso_debug = 0;
1.172 msaitoh 248: void bge_debug_info(struct bge_softc *);
1.1 fvdl 249: #else
250: #define DPRINTF(x)
251: #define DPRINTFN(n,x)
1.95 jonathan 252: #define BGE_TSO_PRINTF(x)
1.1 fvdl 253: #endif
254:
1.72 thorpej 255: #ifdef BGE_EVENT_COUNTERS
256: #define BGE_EVCNT_INCR(ev) (ev).ev_count++
257: #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
258: #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
259: #else
260: #define BGE_EVCNT_INCR(ev) /* nothing */
261: #define BGE_EVCNT_ADD(ev, val) /* nothing */
262: #define BGE_EVCNT_UPD(ev, val) /* nothing */
263: #endif
264:
1.158 msaitoh 265: static const struct bge_product {
266: pci_vendor_id_t bp_vendor;
267: pci_product_id_t bp_product;
268: const char *bp_name;
269: } bge_products[] = {
270: /*
271: * The BCM5700 documentation seems to indicate that the hardware
272: * still has the Alteon vendor ID burned into it, though it
273: * should always be overridden by the value in the EEPROM. We'll
274: * check for it anyway.
275: */
276: { PCI_VENDOR_ALTEON,
277: PCI_PRODUCT_ALTEON_BCM5700,
278: "Broadcom BCM5700 Gigabit Ethernet",
279: },
280: { PCI_VENDOR_ALTEON,
281: PCI_PRODUCT_ALTEON_BCM5701,
282: "Broadcom BCM5701 Gigabit Ethernet",
283: },
284: { PCI_VENDOR_ALTIMA,
285: PCI_PRODUCT_ALTIMA_AC1000,
286: "Altima AC1000 Gigabit Ethernet",
287: },
288: { PCI_VENDOR_ALTIMA,
289: PCI_PRODUCT_ALTIMA_AC1001,
290: "Altima AC1001 Gigabit Ethernet",
291: },
292: { PCI_VENDOR_ALTIMA,
293: PCI_PRODUCT_ALTIMA_AC9100,
294: "Altima AC9100 Gigabit Ethernet",
295: },
296: { PCI_VENDOR_BROADCOM,
297: PCI_PRODUCT_BROADCOM_BCM5700,
298: "Broadcom BCM5700 Gigabit Ethernet",
299: },
300: { PCI_VENDOR_BROADCOM,
301: PCI_PRODUCT_BROADCOM_BCM5701,
302: "Broadcom BCM5701 Gigabit Ethernet",
303: },
304: { PCI_VENDOR_BROADCOM,
305: PCI_PRODUCT_BROADCOM_BCM5702,
306: "Broadcom BCM5702 Gigabit Ethernet",
307: },
308: { PCI_VENDOR_BROADCOM,
309: PCI_PRODUCT_BROADCOM_BCM5702X,
310: "Broadcom BCM5702X Gigabit Ethernet" },
311: { PCI_VENDOR_BROADCOM,
312: PCI_PRODUCT_BROADCOM_BCM5703,
313: "Broadcom BCM5703 Gigabit Ethernet",
314: },
315: { PCI_VENDOR_BROADCOM,
316: PCI_PRODUCT_BROADCOM_BCM5703X,
317: "Broadcom BCM5703X Gigabit Ethernet",
318: },
319: { PCI_VENDOR_BROADCOM,
320: PCI_PRODUCT_BROADCOM_BCM5703_ALT,
321: "Broadcom BCM5703 Gigabit Ethernet",
322: },
323: { PCI_VENDOR_BROADCOM,
324: PCI_PRODUCT_BROADCOM_BCM5704C,
325: "Broadcom BCM5704C Dual Gigabit Ethernet",
326: },
327: { PCI_VENDOR_BROADCOM,
328: PCI_PRODUCT_BROADCOM_BCM5704S,
329: "Broadcom BCM5704S Dual Gigabit Ethernet",
330: },
331: { PCI_VENDOR_BROADCOM,
332: PCI_PRODUCT_BROADCOM_BCM5705,
333: "Broadcom BCM5705 Gigabit Ethernet",
334: },
335: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 336: PCI_PRODUCT_BROADCOM_BCM5705F,
337: "Broadcom BCM5705F Gigabit Ethernet",
338: },
339: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 340: PCI_PRODUCT_BROADCOM_BCM5705K,
341: "Broadcom BCM5705K Gigabit Ethernet",
342: },
343: { PCI_VENDOR_BROADCOM,
344: PCI_PRODUCT_BROADCOM_BCM5705M,
345: "Broadcom BCM5705M Gigabit Ethernet",
346: },
347: { PCI_VENDOR_BROADCOM,
348: PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
349: "Broadcom BCM5705M Gigabit Ethernet",
350: },
351: { PCI_VENDOR_BROADCOM,
352: PCI_PRODUCT_BROADCOM_BCM5714,
1.172 msaitoh 353: "Broadcom BCM5714 Gigabit Ethernet",
354: },
355: { PCI_VENDOR_BROADCOM,
356: PCI_PRODUCT_BROADCOM_BCM5714S,
357: "Broadcom BCM5714S Gigabit Ethernet",
1.158 msaitoh 358: },
359: { PCI_VENDOR_BROADCOM,
360: PCI_PRODUCT_BROADCOM_BCM5715,
1.172 msaitoh 361: "Broadcom BCM5715 Gigabit Ethernet",
1.158 msaitoh 362: },
363: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 364: PCI_PRODUCT_BROADCOM_BCM5715S,
365: "Broadcom BCM5715S Gigabit Ethernet",
366: },
367: { PCI_VENDOR_BROADCOM,
368: PCI_PRODUCT_BROADCOM_BCM5717,
369: "Broadcom BCM5717 Gigabit Ethernet",
370: },
371: { PCI_VENDOR_BROADCOM,
372: PCI_PRODUCT_BROADCOM_BCM5718,
373: "Broadcom BCM5718 Gigabit Ethernet",
374: },
375: { PCI_VENDOR_BROADCOM,
376: PCI_PRODUCT_BROADCOM_BCM5720,
377: "Broadcom BCM5720 Gigabit Ethernet",
1.158 msaitoh 378: },
379: { PCI_VENDOR_BROADCOM,
380: PCI_PRODUCT_BROADCOM_BCM5721,
381: "Broadcom BCM5721 Gigabit Ethernet",
382: },
383: { PCI_VENDOR_BROADCOM,
384: PCI_PRODUCT_BROADCOM_BCM5722,
385: "Broadcom BCM5722 Gigabit Ethernet",
386: },
387: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 388: PCI_PRODUCT_BROADCOM_BCM5723,
389: "Broadcom BCM5723 Gigabit Ethernet",
390: },
391: { PCI_VENDOR_BROADCOM,
392: PCI_PRODUCT_BROADCOM_BCM5724,
393: "Broadcom BCM5724 Gigabit Ethernet",
394: },
395: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 396: PCI_PRODUCT_BROADCOM_BCM5750,
397: "Broadcom BCM5750 Gigabit Ethernet",
398: },
399: { PCI_VENDOR_BROADCOM,
400: PCI_PRODUCT_BROADCOM_BCM5750M,
401: "Broadcom BCM5750M Gigabit Ethernet",
402: },
403: { PCI_VENDOR_BROADCOM,
404: PCI_PRODUCT_BROADCOM_BCM5751,
405: "Broadcom BCM5751 Gigabit Ethernet",
406: },
407: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 408: PCI_PRODUCT_BROADCOM_BCM5751F,
409: "Broadcom BCM5751F Gigabit Ethernet",
410: },
411: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 412: PCI_PRODUCT_BROADCOM_BCM5751M,
413: "Broadcom BCM5751M Gigabit Ethernet",
414: },
415: { PCI_VENDOR_BROADCOM,
416: PCI_PRODUCT_BROADCOM_BCM5752,
417: "Broadcom BCM5752 Gigabit Ethernet",
418: },
419: { PCI_VENDOR_BROADCOM,
420: PCI_PRODUCT_BROADCOM_BCM5752M,
421: "Broadcom BCM5752M Gigabit Ethernet",
422: },
423: { PCI_VENDOR_BROADCOM,
424: PCI_PRODUCT_BROADCOM_BCM5753,
425: "Broadcom BCM5753 Gigabit Ethernet",
426: },
427: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 428: PCI_PRODUCT_BROADCOM_BCM5753F,
429: "Broadcom BCM5753F Gigabit Ethernet",
430: },
431: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 432: PCI_PRODUCT_BROADCOM_BCM5753M,
433: "Broadcom BCM5753M Gigabit Ethernet",
434: },
435: { PCI_VENDOR_BROADCOM,
436: PCI_PRODUCT_BROADCOM_BCM5754,
437: "Broadcom BCM5754 Gigabit Ethernet",
438: },
439: { PCI_VENDOR_BROADCOM,
440: PCI_PRODUCT_BROADCOM_BCM5754M,
441: "Broadcom BCM5754M Gigabit Ethernet",
442: },
443: { PCI_VENDOR_BROADCOM,
444: PCI_PRODUCT_BROADCOM_BCM5755,
445: "Broadcom BCM5755 Gigabit Ethernet",
446: },
447: { PCI_VENDOR_BROADCOM,
448: PCI_PRODUCT_BROADCOM_BCM5755M,
449: "Broadcom BCM5755M Gigabit Ethernet",
450: },
1.172 msaitoh 451: { PCI_VENDOR_BROADCOM,
452: PCI_PRODUCT_BROADCOM_BCM5756,
453: "Broadcom BCM5756 Gigabit Ethernet",
454: },
455: { PCI_VENDOR_BROADCOM,
456: PCI_PRODUCT_BROADCOM_BCM5761,
457: "Broadcom BCM5761 Gigabit Ethernet",
458: },
459: { PCI_VENDOR_BROADCOM,
460: PCI_PRODUCT_BROADCOM_BCM5761E,
461: "Broadcom BCM5761E Gigabit Ethernet",
462: },
463: { PCI_VENDOR_BROADCOM,
464: PCI_PRODUCT_BROADCOM_BCM5761S,
465: "Broadcom BCM5761S Gigabit Ethernet",
466: },
467: { PCI_VENDOR_BROADCOM,
468: PCI_PRODUCT_BROADCOM_BCM5761SE,
469: "Broadcom BCM5761SE Gigabit Ethernet",
470: },
471: { PCI_VENDOR_BROADCOM,
472: PCI_PRODUCT_BROADCOM_BCM5764,
473: "Broadcom BCM5764 Gigabit Ethernet",
474: },
1.158 msaitoh 475: { PCI_VENDOR_BROADCOM,
476: PCI_PRODUCT_BROADCOM_BCM5780,
477: "Broadcom BCM5780 Gigabit Ethernet",
478: },
479: { PCI_VENDOR_BROADCOM,
480: PCI_PRODUCT_BROADCOM_BCM5780S,
481: "Broadcom BCM5780S Gigabit Ethernet",
482: },
483: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 484: PCI_PRODUCT_BROADCOM_BCM5781,
485: "Broadcom BCM5781 Gigabit Ethernet",
486: },
487: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 488: PCI_PRODUCT_BROADCOM_BCM5782,
489: "Broadcom BCM5782 Gigabit Ethernet",
490: },
491: { PCI_VENDOR_BROADCOM,
1.172 msaitoh 492: PCI_PRODUCT_BROADCOM_BCM5784M,
493: "BCM5784M NetLink 1000baseT Ethernet",
494: },
495: { PCI_VENDOR_BROADCOM,
1.158 msaitoh 496: PCI_PRODUCT_BROADCOM_BCM5786,
497: "Broadcom BCM5786 Gigabit Ethernet",
498: },
499: { PCI_VENDOR_BROADCOM,
500: PCI_PRODUCT_BROADCOM_BCM5787,
501: "Broadcom BCM5787 Gigabit Ethernet",
502: },
503: { PCI_VENDOR_BROADCOM,
504: PCI_PRODUCT_BROADCOM_BCM5787M,
505: "Broadcom BCM5787M Gigabit Ethernet",
506: },
507: { PCI_VENDOR_BROADCOM,
508: PCI_PRODUCT_BROADCOM_BCM5788,
509: "Broadcom BCM5788 Gigabit Ethernet",
510: },
511: { PCI_VENDOR_BROADCOM,
512: PCI_PRODUCT_BROADCOM_BCM5789,
513: "Broadcom BCM5789 Gigabit Ethernet",
514: },
515: { PCI_VENDOR_BROADCOM,
516: PCI_PRODUCT_BROADCOM_BCM5901,
517: "Broadcom BCM5901 Fast Ethernet",
518: },
519: { PCI_VENDOR_BROADCOM,
520: PCI_PRODUCT_BROADCOM_BCM5901A2,
521: "Broadcom BCM5901A2 Fast Ethernet",
522: },
1.172 msaitoh 523: { PCI_VENDOR_BROADCOM,
524: PCI_PRODUCT_BROADCOM_BCM5903M,
525: "Broadcom BCM5903M Fast Ethernet",
1.158 msaitoh 526: },
527: { PCI_VENDOR_BROADCOM,
528: PCI_PRODUCT_BROADCOM_BCM5906,
529: "Broadcom BCM5906 Fast Ethernet",
530: },
531: { PCI_VENDOR_BROADCOM,
532: PCI_PRODUCT_BROADCOM_BCM5906M,
533: "Broadcom BCM5906M Fast Ethernet",
534: },
1.172 msaitoh 535: { PCI_VENDOR_BROADCOM,
536: PCI_PRODUCT_BROADCOM_BCM57760,
537: "Broadcom BCM57760 Fast Ethernet",
538: },
539: { PCI_VENDOR_BROADCOM,
540: PCI_PRODUCT_BROADCOM_BCM57761,
541: "Broadcom BCM57761 Fast Ethernet",
542: },
543: { PCI_VENDOR_BROADCOM,
544: PCI_PRODUCT_BROADCOM_BCM57765,
545: "Broadcom BCM57765 Fast Ethernet",
546: },
547: { PCI_VENDOR_BROADCOM,
548: PCI_PRODUCT_BROADCOM_BCM57780,
549: "Broadcom BCM57780 Fast Ethernet",
550: },
551: { PCI_VENDOR_BROADCOM,
552: PCI_PRODUCT_BROADCOM_BCM57781,
553: "Broadcom BCM57781 Fast Ethernet",
554: },
555: { PCI_VENDOR_BROADCOM,
556: PCI_PRODUCT_BROADCOM_BCM57785,
557: "Broadcom BCM57785 Fast Ethernet",
558: },
559: { PCI_VENDOR_BROADCOM,
560: PCI_PRODUCT_BROADCOM_BCM57788,
561: "Broadcom BCM57788 Fast Ethernet",
562: },
563: { PCI_VENDOR_BROADCOM,
564: PCI_PRODUCT_BROADCOM_BCM57790,
565: "Broadcom BCM57790 Fast Ethernet",
566: },
567: { PCI_VENDOR_BROADCOM,
568: PCI_PRODUCT_BROADCOM_BCM57791,
569: "Broadcom BCM57791 Fast Ethernet",
570: },
571: { PCI_VENDOR_BROADCOM,
572: PCI_PRODUCT_BROADCOM_BCM57795,
573: "Broadcom BCM57795 Fast Ethernet",
574: },
575: { PCI_VENDOR_SCHNEIDERKOCH,
576: PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
577: "SysKonnect SK-9Dx1 Gigabit Ethernet",
578: },
579: { PCI_VENDOR_3COM,
580: PCI_PRODUCT_3COM_3C996,
581: "3Com 3c996 Gigabit Ethernet",
582: },
1.158 msaitoh 583: { 0,
584: 0,
585: NULL },
586: };
587:
1.95 jonathan 588: /*
589: * XXX: how to handle variants based on 5750 and derivatives:
1.107 blymn 590: * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which
1.95 jonathan 591: * in general behave like a 5705, except with additional quirks.
592: * This driver's current handling of the 5721 is wrong;
593: * how we map ASIC revision to "quirks" needs more thought.
594: * (defined here until the thought is done).
595: */
1.172 msaitoh 596: #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY)
597: #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY)
598: #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS)
599: #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS)
600: #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS)
601: #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE)
1.166 msaitoh 602:
1.158 msaitoh 603: static const struct bge_revision {
604: uint32_t br_chipid;
605: const char *br_name;
606: } bge_revisions[] = {
607: { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
608: { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
609: { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
610: { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
611: { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
612: { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
613: /* This is treated like a BCM5700 Bx */
614: { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
615: { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
616: { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
617: { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
618: { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
619: { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
1.172 msaitoh 620: { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
621: { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
622: { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
623: { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
624: { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
1.158 msaitoh 625: { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
626: { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
627: { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
628: { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
1.159 msaitoh 629: { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
1.158 msaitoh 630: { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
631: { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
632: { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
633: { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
634: { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
635: { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
1.161 msaitoh 636: { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
637: { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
638: { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
639: { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
640: { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
641: { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
1.158 msaitoh 642: { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
643: { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
644: { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
1.159 msaitoh 645: { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
646: { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
647: { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
648: { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
649: { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
650: { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
1.158 msaitoh 651: { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
652: { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
653: { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
654: { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
1.172 msaitoh 655: { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
656: { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
657: { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
658: { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
659: /* 5754 and 5787 share the same ASIC ID */
1.158 msaitoh 660: { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
661: { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
662: { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
1.161 msaitoh 663: { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
664: { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
1.172 msaitoh 665: { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
666: { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
667:
1.158 msaitoh 668: { 0, NULL }
669: };
670:
671: /*
672: * Some defaults for major revisions, so that newer steppings
673: * that we don't know about have a shot at working.
674: */
675: static const struct bge_revision bge_majorrevs[] = {
676: { BGE_ASICREV_BCM5700, "unknown BCM5700" },
677: { BGE_ASICREV_BCM5701, "unknown BCM5701" },
678: { BGE_ASICREV_BCM5703, "unknown BCM5703" },
679: { BGE_ASICREV_BCM5704, "unknown BCM5704" },
680: { BGE_ASICREV_BCM5705, "unknown BCM5705" },
1.162 msaitoh 681: { BGE_ASICREV_BCM5750, "unknown BCM5750" },
1.158 msaitoh 682: { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
1.172 msaitoh 683: { BGE_ASICREV_BCM5752, "unknown BCM5752" },
684: { BGE_ASICREV_BCM5780, "unknown BCM5780" },
1.158 msaitoh 685: { BGE_ASICREV_BCM5714, "unknown BCM5714" },
686: { BGE_ASICREV_BCM5755, "unknown BCM5755" },
1.172 msaitoh 687: { BGE_ASICREV_BCM5761, "unknown BCM5761" },
688: { BGE_ASICREV_BCM5784, "unknown BCM5784" },
689: { BGE_ASICREV_BCM5785, "unknown BCM5785" },
1.162 msaitoh 690: /* 5754 and 5787 share the same ASIC ID */
1.166 msaitoh 691: { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
1.172 msaitoh 692: { BGE_ASICREV_BCM5906, "unknown BCM5906" },
693: { BGE_ASICREV_BCM57780, "unknown BCM57780" },
694: { BGE_ASICREV_BCM5717, "unknown BCM5717" },
695: { BGE_ASICREV_BCM57765, "unknown BCM57765" },
696:
1.158 msaitoh 697: { 0, NULL }
698: };
1.17 thorpej 699:
1.138 joerg 700: CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc),
1.22 thorpej 701: bge_probe, bge_attach, NULL, NULL);
1.1 fvdl 702:
1.170 msaitoh 703: static uint32_t
1.104 thorpej 704: bge_readmem_ind(struct bge_softc *sc, int off)
1.1 fvdl 705: {
706: pcireg_t val;
707:
1.141 jmcneill 708: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
709: val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
1.1 fvdl 710: return val;
711: }
712:
1.104 thorpej 713: static void
714: bge_writemem_ind(struct bge_softc *sc, int off, int val)
1.1 fvdl 715: {
1.141 jmcneill 716: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
717: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
1.1 fvdl 718: }
719:
720: #ifdef notdef
1.170 msaitoh 721: static uint32_t
1.104 thorpej 722: bge_readreg_ind(struct bge_softc *sc, int off)
1.1 fvdl 723: {
1.141 jmcneill 724: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
1.158 msaitoh 725: return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
1.1 fvdl 726: }
727: #endif
728:
1.104 thorpej 729: static void
730: bge_writereg_ind(struct bge_softc *sc, int off, int val)
1.1 fvdl 731: {
1.141 jmcneill 732: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
733: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
1.1 fvdl 734: }
735:
1.151 cegger 736: static void
737: bge_writemem_direct(struct bge_softc *sc, int off, int val)
738: {
739: CSR_WRITE_4(sc, off, val);
740: }
741:
742: static void
743: bge_writembx(struct bge_softc *sc, int off, int val)
744: {
745: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
746: off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
747:
748: CSR_WRITE_4(sc, off, val);
749: }
750:
1.170 msaitoh 751: static uint8_t
752: bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1.151 cegger 753: {
1.170 msaitoh 754: uint32_t access, byte = 0;
1.151 cegger 755: int i;
756:
757: /* Lock. */
758: CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
759: for (i = 0; i < 8000; i++) {
760: if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
761: break;
762: DELAY(20);
763: }
764: if (i == 8000)
1.170 msaitoh 765: return 1;
1.151 cegger 766:
767: /* Enable access. */
768: access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
769: CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
770:
771: CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
772: CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
773: for (i = 0; i < BGE_TIMEOUT * 10; i++) {
774: DELAY(10);
775: if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
776: DELAY(10);
777: break;
778: }
779: }
780:
781: if (i == BGE_TIMEOUT * 10) {
782: aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
1.170 msaitoh 783: return 1;
1.151 cegger 784: }
785:
786: /* Get result. */
787: byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
788:
789: *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
790:
791: /* Disable access. */
792: CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
793:
794: /* Unlock. */
795: CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
796: CSR_READ_4(sc, BGE_NVRAM_SWARB);
797:
1.170 msaitoh 798: return 0;
1.151 cegger 799: }
800:
801: /*
802: * Read a sequence of bytes from NVRAM.
803: */
804: static int
1.170 msaitoh 805: bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
1.151 cegger 806: {
807: int err = 0, i;
1.170 msaitoh 808: uint8_t byte = 0;
1.151 cegger 809:
810: if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1.170 msaitoh 811: return 1;
1.151 cegger 812:
813: for (i = 0; i < cnt; i++) {
814: err = bge_nvram_getbyte(sc, off + i, &byte);
815: if (err)
816: break;
817: *(dest + i) = byte;
818: }
819:
820: return (err ? 1 : 0);
821: }
822:
1.1 fvdl 823: /*
824: * Read a byte of data stored in the EEPROM at address 'addr.' The
825: * BCM570x supports both the traditional bitbang interface and an
826: * auto access interface for reading the EEPROM. We use the auto
827: * access method.
828: */
1.170 msaitoh 829: static uint8_t
830: bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1.1 fvdl 831: {
832: int i;
1.170 msaitoh 833: uint32_t byte = 0;
1.1 fvdl 834:
835: /*
836: * Enable use of auto EEPROM access so we can avoid
837: * having to use the bitbang method.
838: */
839: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
840:
841: /* Reset the EEPROM, load the clock period. */
842: CSR_WRITE_4(sc, BGE_EE_ADDR,
1.161 msaitoh 843: BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1.1 fvdl 844: DELAY(20);
845:
846: /* Issue the read EEPROM command. */
847: CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
848:
849: /* Wait for completion */
1.170 msaitoh 850: for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1.1 fvdl 851: DELAY(10);
852: if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
853: break;
854: }
855:
1.172 msaitoh 856: if (i == BGE_TIMEOUT * 10) {
1.138 joerg 857: aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
1.170 msaitoh 858: return 0;
1.1 fvdl 859: }
860:
861: /* Get result. */
862: byte = CSR_READ_4(sc, BGE_EE_DATA);
863:
864: *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
865:
1.170 msaitoh 866: return 0;
1.1 fvdl 867: }
868:
869: /*
870: * Read a sequence of bytes from the EEPROM.
871: */
1.104 thorpej 872: static int
1.126 christos 873: bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
1.1 fvdl 874: {
875: int err = 0, i;
1.170 msaitoh 876: uint8_t byte = 0;
1.126 christos 877: char *dest = destv;
1.1 fvdl 878:
879: for (i = 0; i < cnt; i++) {
880: err = bge_eeprom_getbyte(sc, off + i, &byte);
881: if (err)
882: break;
883: *(dest + i) = byte;
884: }
885:
1.158 msaitoh 886: return (err ? 1 : 0);
1.1 fvdl 887: }
888:
1.104 thorpej 889: static int
890: bge_miibus_readreg(device_t dev, int phy, int reg)
1.1 fvdl 891: {
1.138 joerg 892: struct bge_softc *sc = device_private(dev);
1.170 msaitoh 893: uint32_t val;
1.172 msaitoh 894: uint32_t autopoll;
1.1 fvdl 895: int i;
896:
1.25 jonathan 897: /*
1.156 msaitoh 898: * Broadcom's own driver always assumes the internal
899: * PHY is at GMII address 1. On some chips, the PHY responds
900: * to accesses at all addresses, which could cause us to
901: * bogusly attach the PHY 32 times at probe type. Always
902: * restricting the lookup to address 1 is simpler than
903: * trying to figure out which chips revisions should be
904: * special-cased.
1.25 jonathan 905: */
1.156 msaitoh 906: if (phy != 1)
1.170 msaitoh 907: return 0;
1.1 fvdl 908:
1.25 jonathan 909: /* Reading with autopolling on may trigger PCI errors */
1.172 msaitoh 910: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
911: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 912: BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1.172 msaitoh 913: BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.25 jonathan 914: DELAY(40);
915: }
916:
1.172 msaitoh 917: CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
918: BGE_MIPHY(phy) | BGE_MIREG(reg));
1.1 fvdl 919:
920: for (i = 0; i < BGE_TIMEOUT; i++) {
921: val = CSR_READ_4(sc, BGE_MI_COMM);
922: if (!(val & BGE_MICOMM_BUSY))
923: break;
1.9 thorpej 924: delay(10);
1.1 fvdl 925: }
926:
927: if (i == BGE_TIMEOUT) {
1.138 joerg 928: aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1.29 itojun 929: val = 0;
1.25 jonathan 930: goto done;
1.1 fvdl 931: }
932:
933: val = CSR_READ_4(sc, BGE_MI_COMM);
934:
1.25 jonathan 935: done:
1.172 msaitoh 936: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 937: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1.172 msaitoh 938: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.25 jonathan 939: DELAY(40);
940: }
1.29 itojun 941:
1.1 fvdl 942: if (val & BGE_MICOMM_READFAIL)
1.170 msaitoh 943: return 0;
1.1 fvdl 944:
1.158 msaitoh 945: return (val & 0xFFFF);
1.1 fvdl 946: }
947:
1.104 thorpej 948: static void
949: bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1.1 fvdl 950: {
1.138 joerg 951: struct bge_softc *sc = device_private(dev);
1.172 msaitoh 952: uint32_t autopoll;
1.29 itojun 953: int i;
1.1 fvdl 954:
1.151 cegger 955: if (phy!=1) {
956: return;
957: }
958:
959: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
960: (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) {
961: return;
962: }
963:
1.161 msaitoh 964: /* Reading with autopolling on may trigger PCI errors */
1.172 msaitoh 965: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
966: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.25 jonathan 967: delay(40);
1.161 msaitoh 968: BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1.172 msaitoh 969: BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.25 jonathan 970: delay(10); /* 40 usec is supposed to be adequate */
971: }
1.29 itojun 972:
1.161 msaitoh 973: CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
974: BGE_MIPHY(phy) | BGE_MIREG(reg)|val);
1.1 fvdl 975:
976: for (i = 0; i < BGE_TIMEOUT; i++) {
1.151 cegger 977: delay(10);
978: if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
979: delay(5);
980: CSR_READ_4(sc, BGE_MI_COMM);
1.1 fvdl 981: break;
1.151 cegger 982: }
1.1 fvdl 983: }
984:
1.172 msaitoh 985: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 986: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1.172 msaitoh 987: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.25 jonathan 988: delay(40);
989: }
1.29 itojun 990:
1.138 joerg 991: if (i == BGE_TIMEOUT)
992: aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1.1 fvdl 993: }
994:
1.104 thorpej 995: static void
996: bge_miibus_statchg(device_t dev)
1.1 fvdl 997: {
1.138 joerg 998: struct bge_softc *sc = device_private(dev);
1.1 fvdl 999: struct mii_data *mii = &sc->bge_mii;
1000:
1.69 thorpej 1001: /*
1002: * Get flow control negotiation result.
1003: */
1004: if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1005: (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
1006: sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1007: mii->mii_media_active &= ~IFM_ETH_FMASK;
1008: }
1009:
1.1 fvdl 1010: BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
1.161 msaitoh 1011: if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1012: IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1.1 fvdl 1013: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
1.161 msaitoh 1014: else
1.1 fvdl 1015: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
1016:
1.158 msaitoh 1017: if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1.1 fvdl 1018: BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1.158 msaitoh 1019: else
1.1 fvdl 1020: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
1.69 thorpej 1021:
1022: /*
1023: * 802.3x flow control
1024: */
1.158 msaitoh 1025: if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1.69 thorpej 1026: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1.158 msaitoh 1027: else
1.69 thorpej 1028: BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
1.158 msaitoh 1029:
1030: if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1.69 thorpej 1031: BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1.158 msaitoh 1032: else
1.69 thorpej 1033: BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
1.1 fvdl 1034: }
1035:
1036: /*
1.63 jonathan 1037: * Update rx threshold levels to values in a particular slot
1038: * of the interrupt-mitigation table bge_rx_threshes.
1039: */
1.104 thorpej 1040: static void
1.63 jonathan 1041: bge_set_thresh(struct ifnet *ifp, int lvl)
1042: {
1043: struct bge_softc *sc = ifp->if_softc;
1044: int s;
1045:
1046: /* For now, just save the new Rx-intr thresholds and record
1047: * that a threshold update is pending. Updating the hardware
1048: * registers here (even at splhigh()) is observed to
1049: * occasionaly cause glitches where Rx-interrupts are not
1.68 keihan 1050: * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05
1.63 jonathan 1051: */
1052: s = splnet();
1053: sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1054: sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1055: sc->bge_pending_rxintr_change = 1;
1056: splx(s);
1057:
1058: return;
1059: }
1060:
1061:
1062: /*
1063: * Update Rx thresholds of all bge devices
1064: */
1.104 thorpej 1065: static void
1.63 jonathan 1066: bge_update_all_threshes(int lvl)
1067: {
1068: struct ifnet *ifp;
1069: const char * const namebuf = "bge";
1070: int namelen;
1071:
1072: if (lvl < 0)
1073: lvl = 0;
1.170 msaitoh 1074: else if (lvl >= NBGE_RX_THRESH)
1.63 jonathan 1075: lvl = NBGE_RX_THRESH - 1;
1.87 perry 1076:
1.63 jonathan 1077: namelen = strlen(namebuf);
1078: /*
1079: * Now search all the interfaces for this name/number
1080: */
1.81 matt 1081: IFNET_FOREACH(ifp) {
1.67 jonathan 1082: if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1.63 jonathan 1083: continue;
1084: /* We got a match: update if doing auto-threshold-tuning */
1085: if (bge_auto_thresh)
1.67 jonathan 1086: bge_set_thresh(ifp, lvl);
1.63 jonathan 1087: }
1088: }
1089:
1090: /*
1.1 fvdl 1091: * Handle events that have triggered interrupts.
1092: */
1.104 thorpej 1093: static void
1.116 christos 1094: bge_handle_events(struct bge_softc *sc)
1.1 fvdl 1095: {
1096:
1097: return;
1098: }
1099:
1100: /*
1101: * Memory management for jumbo frames.
1102: */
1103:
1.104 thorpej 1104: static int
1105: bge_alloc_jumbo_mem(struct bge_softc *sc)
1.1 fvdl 1106: {
1.126 christos 1107: char *ptr, *kva;
1.1 fvdl 1108: bus_dma_segment_t seg;
1109: int i, rseg, state, error;
1110: struct bge_jpool_entry *entry;
1111:
1112: state = error = 0;
1113:
1114: /* Grab a big chunk o' storage. */
1115: if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1116: &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1.138 joerg 1117: aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1.1 fvdl 1118: return ENOBUFS;
1119: }
1120:
1121: state = 1;
1.126 christos 1122: if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
1.1 fvdl 1123: BUS_DMA_NOWAIT)) {
1.138 joerg 1124: aprint_error_dev(sc->bge_dev,
1125: "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1.1 fvdl 1126: error = ENOBUFS;
1127: goto out;
1128: }
1129:
1130: state = 2;
1131: if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1132: BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
1.138 joerg 1133: aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1.1 fvdl 1134: error = ENOBUFS;
1135: goto out;
1136: }
1137:
1138: state = 3;
1139: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1140: kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1.138 joerg 1141: aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1.1 fvdl 1142: error = ENOBUFS;
1143: goto out;
1144: }
1145:
1146: state = 4;
1.126 christos 1147: sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1.89 christos 1148: DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1.1 fvdl 1149:
1150: SLIST_INIT(&sc->bge_jfree_listhead);
1151: SLIST_INIT(&sc->bge_jinuse_listhead);
1152:
1153: /*
1154: * Now divide it up into 9K pieces and save the addresses
1155: * in an array.
1156: */
1157: ptr = sc->bge_cdata.bge_jumbo_buf;
1158: for (i = 0; i < BGE_JSLOTS; i++) {
1159: sc->bge_cdata.bge_jslots[i] = ptr;
1160: ptr += BGE_JLEN;
1161: entry = malloc(sizeof(struct bge_jpool_entry),
1162: M_DEVBUF, M_NOWAIT);
1163: if (entry == NULL) {
1.138 joerg 1164: aprint_error_dev(sc->bge_dev,
1165: "no memory for jumbo buffer queue!\n");
1.1 fvdl 1166: error = ENOBUFS;
1167: goto out;
1168: }
1169: entry->slot = i;
1170: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1171: entry, jpool_entries);
1172: }
1173: out:
1174: if (error != 0) {
1175: switch (state) {
1176: case 4:
1177: bus_dmamap_unload(sc->bge_dmatag,
1178: sc->bge_cdata.bge_rx_jumbo_map);
1179: case 3:
1180: bus_dmamap_destroy(sc->bge_dmatag,
1181: sc->bge_cdata.bge_rx_jumbo_map);
1182: case 2:
1183: bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1184: case 1:
1185: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1186: break;
1187: default:
1188: break;
1189: }
1190: }
1191:
1192: return error;
1193: }
1194:
1195: /*
1196: * Allocate a jumbo buffer.
1197: */
1.104 thorpej 1198: static void *
1199: bge_jalloc(struct bge_softc *sc)
1.1 fvdl 1200: {
1201: struct bge_jpool_entry *entry;
1202:
1203: entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1204:
1205: if (entry == NULL) {
1.138 joerg 1206: aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1.170 msaitoh 1207: return NULL;
1.1 fvdl 1208: }
1209:
1210: SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1211: SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1.158 msaitoh 1212: return (sc->bge_cdata.bge_jslots[entry->slot]);
1.1 fvdl 1213: }
1214:
1215: /*
1216: * Release a jumbo buffer.
1217: */
1.104 thorpej 1218: static void
1.126 christos 1219: bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1.1 fvdl 1220: {
1221: struct bge_jpool_entry *entry;
1222: struct bge_softc *sc;
1223: int i, s;
1224:
1225: /* Extract the softc struct pointer. */
1226: sc = (struct bge_softc *)arg;
1227:
1228: if (sc == NULL)
1229: panic("bge_jfree: can't find softc pointer!");
1230:
1231: /* calculate the slot this buffer belongs to */
1232:
1.126 christos 1233: i = ((char *)buf
1234: - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1.1 fvdl 1235:
1236: if ((i < 0) || (i >= BGE_JSLOTS))
1237: panic("bge_jfree: asked to free buffer that we don't manage!");
1238:
1239: s = splvm();
1240: entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1241: if (entry == NULL)
1242: panic("bge_jfree: buffer not in use!");
1243: entry->slot = i;
1244: SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1245: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1246:
1247: if (__predict_true(m != NULL))
1.140 ad 1248: pool_cache_put(mb_cache, m);
1.1 fvdl 1249: splx(s);
1250: }
1251:
1252:
1253: /*
1254: * Intialize a standard receive ring descriptor.
1255: */
1.104 thorpej 1256: static int
1257: bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap)
1.1 fvdl 1258: {
1259: struct mbuf *m_new = NULL;
1260: struct bge_rx_bd *r;
1261: int error;
1262:
1263: if (dmamap == NULL) {
1264: error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1265: MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
1266: if (error != 0)
1267: return error;
1268: }
1269:
1270: sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1271:
1272: if (m == NULL) {
1273: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1.158 msaitoh 1274: if (m_new == NULL)
1.170 msaitoh 1275: return ENOBUFS;
1.1 fvdl 1276:
1277: MCLGET(m_new, M_DONTWAIT);
1278: if (!(m_new->m_flags & M_EXT)) {
1279: m_freem(m_new);
1.170 msaitoh 1280: return ENOBUFS;
1.1 fvdl 1281: }
1282: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1283:
1284: } else {
1285: m_new = m;
1286: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1287: m_new->m_data = m_new->m_ext.ext_buf;
1288: }
1.157 msaitoh 1289: if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1.125 bouyer 1290: m_adj(m_new, ETHER_ALIGN);
1.124 bouyer 1291: if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
1292: BUS_DMA_READ|BUS_DMA_NOWAIT))
1.170 msaitoh 1293: return ENOBUFS;
1.125 bouyer 1294: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1.124 bouyer 1295: BUS_DMASYNC_PREREAD);
1.1 fvdl 1296:
1297: sc->bge_cdata.bge_rx_std_chain[i] = m_new;
1298: r = &sc->bge_rdata->bge_rx_std_ring[i];
1.172 msaitoh 1299: BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1.1 fvdl 1300: r->bge_flags = BGE_RXBDFLAG_END;
1301: r->bge_len = m_new->m_len;
1302: r->bge_idx = i;
1303:
1304: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1305: offsetof(struct bge_ring_data, bge_rx_std_ring) +
1306: i * sizeof (struct bge_rx_bd),
1307: sizeof (struct bge_rx_bd),
1308: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1309:
1.170 msaitoh 1310: return 0;
1.1 fvdl 1311: }
1312:
1313: /*
1314: * Initialize a jumbo receive ring descriptor. This allocates
1315: * a jumbo buffer from the pool managed internally by the driver.
1316: */
1.104 thorpej 1317: static int
1318: bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1.1 fvdl 1319: {
1320: struct mbuf *m_new = NULL;
1321: struct bge_rx_bd *r;
1.126 christos 1322: void *buf = NULL;
1.1 fvdl 1323:
1324: if (m == NULL) {
1325:
1326: /* Allocate the mbuf. */
1327: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1.158 msaitoh 1328: if (m_new == NULL)
1.170 msaitoh 1329: return ENOBUFS;
1.1 fvdl 1330:
1331: /* Allocate the jumbo buffer */
1332: buf = bge_jalloc(sc);
1333: if (buf == NULL) {
1334: m_freem(m_new);
1.138 joerg 1335: aprint_error_dev(sc->bge_dev,
1336: "jumbo allocation failed -- packet dropped!\n");
1.170 msaitoh 1337: return ENOBUFS;
1.1 fvdl 1338: }
1339:
1340: /* Attach the buffer to the mbuf. */
1341: m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1342: MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1343: bge_jfree, sc);
1.74 yamt 1344: m_new->m_flags |= M_EXT_RW;
1.1 fvdl 1345: } else {
1346: m_new = m;
1.124 bouyer 1347: buf = m_new->m_data = m_new->m_ext.ext_buf;
1.1 fvdl 1348: m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1349: }
1.157 msaitoh 1350: if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1.125 bouyer 1351: m_adj(m_new, ETHER_ALIGN);
1.124 bouyer 1352: bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1.126 christos 1353: mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
1.124 bouyer 1354: BUS_DMASYNC_PREREAD);
1.1 fvdl 1355: /* Set up the descriptor. */
1356: r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1357: sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1.172 msaitoh 1358: BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1.1 fvdl 1359: r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1360: r->bge_len = m_new->m_len;
1361: r->bge_idx = i;
1362:
1363: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1364: offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1365: i * sizeof (struct bge_rx_bd),
1366: sizeof (struct bge_rx_bd),
1367: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1368:
1.170 msaitoh 1369: return 0;
1.1 fvdl 1370: }
1371:
1372: /*
1373: * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1374: * that's 1MB or memory, which is a lot. For now, we fill only the first
1375: * 256 ring entries and hope that our CPU is fast enough to keep up with
1376: * the NIC.
1377: */
1.104 thorpej 1378: static int
1379: bge_init_rx_ring_std(struct bge_softc *sc)
1.1 fvdl 1380: {
1381: int i;
1382:
1383: if (sc->bge_flags & BGE_RXRING_VALID)
1384: return 0;
1385:
1386: for (i = 0; i < BGE_SSLOTS; i++) {
1387: if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1.170 msaitoh 1388: return ENOBUFS;
1.1 fvdl 1389: }
1390:
1391: sc->bge_std = i - 1;
1.151 cegger 1392: bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1.1 fvdl 1393:
1394: sc->bge_flags |= BGE_RXRING_VALID;
1395:
1.170 msaitoh 1396: return 0;
1.1 fvdl 1397: }
1398:
1.104 thorpej 1399: static void
1400: bge_free_rx_ring_std(struct bge_softc *sc)
1.1 fvdl 1401: {
1402: int i;
1403:
1404: if (!(sc->bge_flags & BGE_RXRING_VALID))
1405: return;
1406:
1407: for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1408: if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1409: m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1410: sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1.87 perry 1411: bus_dmamap_destroy(sc->bge_dmatag,
1.1 fvdl 1412: sc->bge_cdata.bge_rx_std_map[i]);
1413: }
1414: memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1415: sizeof(struct bge_rx_bd));
1416: }
1417:
1418: sc->bge_flags &= ~BGE_RXRING_VALID;
1419: }
1420:
1.104 thorpej 1421: static int
1422: bge_init_rx_ring_jumbo(struct bge_softc *sc)
1.1 fvdl 1423: {
1424: int i;
1.34 jonathan 1425: volatile struct bge_rcb *rcb;
1.1 fvdl 1426:
1.59 martin 1427: if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1428: return 0;
1429:
1.1 fvdl 1430: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1431: if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1.170 msaitoh 1432: return ENOBUFS;
1.1 fvdl 1433: };
1434:
1435: sc->bge_jumbo = i - 1;
1.59 martin 1436: sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1.1 fvdl 1437:
1438: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1.34 jonathan 1439: rcb->bge_maxlen_flags = 0;
1440: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1.1 fvdl 1441:
1.151 cegger 1442: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1.1 fvdl 1443:
1.170 msaitoh 1444: return 0;
1.1 fvdl 1445: }
1446:
1.104 thorpej 1447: static void
1448: bge_free_rx_ring_jumbo(struct bge_softc *sc)
1.1 fvdl 1449: {
1450: int i;
1451:
1452: if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1453: return;
1454:
1455: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1456: if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1457: m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1458: sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1459: }
1460: memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1461: sizeof(struct bge_rx_bd));
1462: }
1463:
1464: sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1465: }
1466:
1.104 thorpej 1467: static void
1468: bge_free_tx_ring(struct bge_softc *sc)
1.1 fvdl 1469: {
1470: int i, freed;
1471: struct txdmamap_pool_entry *dma;
1472:
1473: if (!(sc->bge_flags & BGE_TXRING_VALID))
1474: return;
1475:
1476: freed = 0;
1477:
1478: for (i = 0; i < BGE_TX_RING_CNT; i++) {
1479: if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1480: freed++;
1481: m_freem(sc->bge_cdata.bge_tx_chain[i]);
1482: sc->bge_cdata.bge_tx_chain[i] = NULL;
1483: SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1484: link);
1485: sc->txdma[i] = 0;
1486: }
1487: memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1488: sizeof(struct bge_tx_bd));
1489: }
1490:
1491: while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1492: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1493: bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1494: free(dma, M_DEVBUF);
1495: }
1496:
1497: sc->bge_flags &= ~BGE_TXRING_VALID;
1498: }
1499:
1.104 thorpej 1500: static int
1501: bge_init_tx_ring(struct bge_softc *sc)
1.1 fvdl 1502: {
1503: int i;
1504: bus_dmamap_t dmamap;
1505: struct txdmamap_pool_entry *dma;
1506:
1507: if (sc->bge_flags & BGE_TXRING_VALID)
1508: return 0;
1509:
1510: sc->bge_txcnt = 0;
1511: sc->bge_tx_saved_considx = 0;
1.94 jonathan 1512:
1513: /* Initialize transmit producer index for host-memory send ring. */
1514: sc->bge_tx_prodidx = 0;
1.151 cegger 1515: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1.158 msaitoh 1516: /* 5700 b2 errata */
1517: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 1518: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1.25 jonathan 1519:
1.158 msaitoh 1520: /* NIC-memory send ring not used; initialize to zero. */
1.151 cegger 1521: bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1.158 msaitoh 1522: /* 5700 b2 errata */
1523: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 1524: bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1.1 fvdl 1525:
1526: SLIST_INIT(&sc->txdma_list);
1527: for (i = 0; i < BGE_RSLOTS; i++) {
1.95 jonathan 1528: if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1.1 fvdl 1529: BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1530: &dmamap))
1.170 msaitoh 1531: return ENOBUFS;
1.1 fvdl 1532: if (dmamap == NULL)
1533: panic("dmamap NULL in bge_init_tx_ring");
1534: dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1535: if (dma == NULL) {
1.138 joerg 1536: aprint_error_dev(sc->bge_dev,
1537: "can't alloc txdmamap_pool_entry\n");
1.1 fvdl 1538: bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1.170 msaitoh 1539: return ENOMEM;
1.1 fvdl 1540: }
1541: dma->dmamap = dmamap;
1542: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1543: }
1544:
1545: sc->bge_flags |= BGE_TXRING_VALID;
1546:
1.170 msaitoh 1547: return 0;
1.1 fvdl 1548: }
1549:
1.104 thorpej 1550: static void
1551: bge_setmulti(struct bge_softc *sc)
1.1 fvdl 1552: {
1553: struct ethercom *ac = &sc->ethercom;
1554: struct ifnet *ifp = &ac->ec_if;
1555: struct ether_multi *enm;
1556: struct ether_multistep step;
1.170 msaitoh 1557: uint32_t hashes[4] = { 0, 0, 0, 0 };
1558: uint32_t h;
1.1 fvdl 1559: int i;
1560:
1.13 thorpej 1561: if (ifp->if_flags & IFF_PROMISC)
1562: goto allmulti;
1.1 fvdl 1563:
1564: /* Now program new ones. */
1565: ETHER_FIRST_MULTI(step, ac, enm);
1566: while (enm != NULL) {
1.13 thorpej 1567: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1568: /*
1569: * We must listen to a range of multicast addresses.
1570: * For now, just accept all multicasts, rather than
1571: * trying to set only those filter bits needed to match
1572: * the range. (At this time, the only use of address
1573: * ranges is for IP multicast routing, for which the
1574: * range is big enough to require all bits set.)
1575: */
1576: goto allmulti;
1577: }
1578:
1.158 msaitoh 1579: h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1.1 fvdl 1580:
1.158 msaitoh 1581: /* Just want the 7 least-significant bits. */
1582: h &= 0x7f;
1.1 fvdl 1583:
1.158 msaitoh 1584: hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1585: ETHER_NEXT_MULTI(step, enm);
1.25 jonathan 1586: }
1587:
1.158 msaitoh 1588: ifp->if_flags &= ~IFF_ALLMULTI;
1589: goto setit;
1.1 fvdl 1590:
1.158 msaitoh 1591: allmulti:
1592: ifp->if_flags |= IFF_ALLMULTI;
1593: hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1.133 markd 1594:
1.158 msaitoh 1595: setit:
1596: for (i = 0; i < 4; i++)
1597: CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1598: }
1.133 markd 1599:
1.158 msaitoh 1600: const int bge_swapbits[] = {
1601: 0,
1602: BGE_MODECTL_BYTESWAP_DATA,
1603: BGE_MODECTL_WORDSWAP_DATA,
1604: BGE_MODECTL_BYTESWAP_NONFRAME,
1605: BGE_MODECTL_WORDSWAP_NONFRAME,
1.1 fvdl 1606:
1.158 msaitoh 1607: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1608: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1609: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1.95 jonathan 1610:
1.158 msaitoh 1611: BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1612: BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1.95 jonathan 1613:
1.158 msaitoh 1614: BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1.95 jonathan 1615:
1.158 msaitoh 1616: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1617: BGE_MODECTL_BYTESWAP_NONFRAME,
1618: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1619: BGE_MODECTL_WORDSWAP_NONFRAME,
1620: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1621: BGE_MODECTL_WORDSWAP_NONFRAME,
1622: BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1623: BGE_MODECTL_WORDSWAP_NONFRAME,
1.1 fvdl 1624:
1.158 msaitoh 1625: BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1626: BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1627: };
1.1 fvdl 1628:
1.158 msaitoh 1629: int bge_swapindex = 0;
1.1 fvdl 1630:
1.158 msaitoh 1631: /*
1632: * Do endian, PCI and DMA initialization. Also check the on-board ROM
1633: * self-test results.
1634: */
1635: static int
1636: bge_chipinit(struct bge_softc *sc)
1637: {
1638: int i;
1.170 msaitoh 1639: uint32_t dma_rw_ctl;
1.1 fvdl 1640:
1641:
1.158 msaitoh 1642: /* Set endianness before we access any non-PCI registers. */
1643: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1644: BGE_INIT);
1.1 fvdl 1645:
1.158 msaitoh 1646: /* Set power state to D0. */
1647: bge_setpowerstate(sc, 0);
1.1 fvdl 1648:
1.158 msaitoh 1649: /* Clear the MAC control register */
1650: CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1.1 fvdl 1651:
1.158 msaitoh 1652: /*
1653: * Clear the MAC statistics block in the NIC's
1654: * internal memory.
1655: */
1656: for (i = BGE_STATS_BLOCK;
1.170 msaitoh 1657: i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1.158 msaitoh 1658: BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1.1 fvdl 1659:
1.158 msaitoh 1660: for (i = BGE_STATUS_BLOCK;
1.170 msaitoh 1661: i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1.158 msaitoh 1662: BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1.1 fvdl 1663:
1.158 msaitoh 1664: /* Set up the PCI DMA control register. */
1.166 msaitoh 1665: dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1.158 msaitoh 1666: if (sc->bge_flags & BGE_PCIE) {
1.166 msaitoh 1667: /* Read watermark not used, 128 bytes for write. */
1.158 msaitoh 1668: DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
1669: device_xname(sc->bge_dev)));
1.166 msaitoh 1670: dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1.170 msaitoh 1671: } else if (sc->bge_flags & BGE_PCIX) {
1.158 msaitoh 1672: DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
1673: device_xname(sc->bge_dev)));
1674: /* PCI-X bus */
1.172 msaitoh 1675: if (BGE_IS_5714_FAMILY(sc)) {
1676: /* 256 bytes for read and write. */
1677: dma_rw_ctl |= (0x02 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1678: (0x02 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1679:
1680: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1681: dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1682: else
1683: dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1684: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1685: /* 1536 bytes for read, 384 bytes for write. */
1686: dma_rw_ctl |=
1.158 msaitoh 1687: (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1688: (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1.172 msaitoh 1689: } else {
1690: /* 384 bytes for read and write. */
1691: dma_rw_ctl |= (0x03 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1692: (0x03 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1693: (0x0F);
1694: }
1695:
1696: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1697: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1698: uint32_t tmp;
1699:
1700: /* Set ONEDMA_ATONCE for hardware workaround. */
1701: tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1702: if (tmp == 6 || tmp == 7)
1703: dma_rw_ctl |=
1704: BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1705:
1706: /* Set PCI-X DMA write workaround. */
1707: dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1.158 msaitoh 1708: }
1709: } else {
1.172 msaitoh 1710: /* Conventional PCI bus: 256 bytes for read and write. */
1.158 msaitoh 1711: DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
1712: device_xname(sc->bge_dev)));
1.166 msaitoh 1713: dma_rw_ctl = (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1714: (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1.160 msaitoh 1715: if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1716: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1.158 msaitoh 1717: dma_rw_ctl |= 0x0F;
1718: }
1.157 msaitoh 1719:
1.161 msaitoh 1720: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1721: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1722: dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1723: BGE_PCIDMARWCTL_ASRT_ALL_BE;
1724:
1725: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1726: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1727: dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1728:
1.158 msaitoh 1729: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1.120 tsutsui 1730:
1.158 msaitoh 1731: /*
1732: * Set up general mode register.
1733: */
1.161 msaitoh 1734: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1735: BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1736: BGE_MODECTL_TX_NO_PHDR_CSUM | BGE_MODECTL_RX_NO_PHDR_CSUM);
1.16 thorpej 1737:
1.158 msaitoh 1738: /*
1.172 msaitoh 1739: * BCM5701 B5 have a bug causing data corruption when using
1740: * 64-bit DMA reads, which can be terminated early and then
1741: * completed later as 32-bit accesses, in combination with
1742: * certain bridges.
1743: */
1744: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1745: sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1746: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1747:
1748: /*
1.158 msaitoh 1749: * Disable memory write invalidate. Apparently it is not supported
1750: * properly by these devices.
1751: */
1.172 msaitoh 1752: PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
1753: PCI_COMMAND_INVALIDATE_ENABLE);
1.16 thorpej 1754:
1.158 msaitoh 1755: #ifdef __brokenalpha__
1756: /*
1757: * Must insure that we do not cross an 8K (bytes) boundary
1758: * for DMA reads. Our highest limit is 1K bytes. This is a
1759: * restriction on some ALPHA platforms with early revision
1760: * 21174 PCI chipsets, such as the AlphaPC 164lx
1761: */
1762: PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1763: #endif
1.16 thorpej 1764:
1.158 msaitoh 1765: /* Set the timer prescaler (always 66MHz) */
1766: CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1.16 thorpej 1767:
1.159 msaitoh 1768: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1769: DELAY(40); /* XXX */
1770:
1771: /* Put PHY into ready state */
1772: BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1773: CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1774: DELAY(40);
1775: }
1776:
1.170 msaitoh 1777: return 0;
1.158 msaitoh 1778: }
1.16 thorpej 1779:
1.158 msaitoh 1780: static int
1781: bge_blockinit(struct bge_softc *sc)
1782: {
1783: volatile struct bge_rcb *rcb;
1784: bus_size_t rcb_addr;
1785: int i;
1786: struct ifnet *ifp = &sc->ethercom.ec_if;
1787: bge_hostaddr taddr;
1.170 msaitoh 1788: uint32_t val;
1.16 thorpej 1789:
1.158 msaitoh 1790: /*
1791: * Initialize the memory window pointer register so that
1792: * we can access the first 32K of internal NIC RAM. This will
1793: * allow us to set up the TX send ring RCBs and the RX return
1794: * ring RCBs, plus other things which live in NIC memory.
1795: */
1.55 pooka 1796:
1.158 msaitoh 1797: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1.120 tsutsui 1798:
1.158 msaitoh 1799: /* Configure mbuf memory pool */
1.172 msaitoh 1800: if (BGE_IS_5700_FAMILY(sc)) {
1801: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1802: BGE_BUFFPOOL_1);
1803:
1804: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1805: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1806: else
1807: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1.40 fvdl 1808:
1.158 msaitoh 1809: /* Configure DMA resource pool */
1810: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1811: BGE_DMA_DESCRIPTORS);
1812: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1813: }
1.40 fvdl 1814:
1.158 msaitoh 1815: /* Configure mbuf pool watermarks */
1816: #ifdef ORIG_WPAUL_VALUES
1817: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1818: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1819: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1820: #else
1.49 fvdl 1821:
1.158 msaitoh 1822: /* new broadcom docs strongly recommend these: */
1.172 msaitoh 1823: if (!BGE_IS_5705_PLUS(sc)) {
1.158 msaitoh 1824: if (ifp->if_mtu > ETHER_MAX_LEN) {
1825: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1826: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1827: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1828: } else {
1829: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
1830: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
1831: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
1832: }
1833: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1834: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1835: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1836: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1837: } else {
1838: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1839: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1840: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1841: }
1842: #endif
1.25 jonathan 1843:
1.158 msaitoh 1844: /* Configure DMA resource watermarks */
1845: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1846: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1.51 fvdl 1847:
1.158 msaitoh 1848: /* Enable buffer manager */
1.172 msaitoh 1849: CSR_WRITE_4(sc, BGE_BMAN_MODE,
1850: BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1.44 hannken 1851:
1.172 msaitoh 1852: /* Poll for buffer manager start indication */
1853: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1854: if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1855: break;
1856: DELAY(10);
1857: }
1.51 fvdl 1858:
1.172 msaitoh 1859: if (i == BGE_TIMEOUT * 2) {
1860: aprint_error_dev(sc->bge_dev,
1861: "buffer manager failed to start\n");
1862: return ENXIO;
1.158 msaitoh 1863: }
1.51 fvdl 1864:
1.158 msaitoh 1865: /* Enable flow-through queues */
1866: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1867: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1.76 cube 1868:
1.158 msaitoh 1869: /* Wait until queue initialization is complete */
1.172 msaitoh 1870: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1.158 msaitoh 1871: if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1872: break;
1873: DELAY(10);
1874: }
1.76 cube 1875:
1.172 msaitoh 1876: if (i == BGE_TIMEOUT * 2) {
1.158 msaitoh 1877: aprint_error_dev(sc->bge_dev,
1878: "flow-through queue init failed\n");
1.170 msaitoh 1879: return ENXIO;
1.158 msaitoh 1880: }
1.92 gavan 1881:
1.158 msaitoh 1882: /* Initialize the standard RX ring control block */
1883: rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1.172 msaitoh 1884: BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1885: if (BGE_IS_5705_PLUS(sc))
1.158 msaitoh 1886: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1887: else
1888: rcb->bge_maxlen_flags =
1889: BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1.172 msaitoh 1890: rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1.158 msaitoh 1891: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1892: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1893: CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1894: CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1.119 tsutsui 1895:
1.158 msaitoh 1896: /*
1897: * Initialize the jumbo RX ring control block
1898: * We set the 'ring disabled' bit in the flags
1899: * field until we're actually ready to start
1900: * using this ring (i.e. once we set the MTU
1901: * high enough to require it).
1902: */
1.166 msaitoh 1903: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1.158 msaitoh 1904: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1.172 msaitoh 1905: BGE_HOSTADDR(rcb->bge_hostaddr,
1.158 msaitoh 1906: BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1907: rcb->bge_maxlen_flags =
1908: BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1909: BGE_RCB_FLAG_RING_DISABLED);
1.172 msaitoh 1910: rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1.158 msaitoh 1911: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1912: rcb->bge_hostaddr.bge_addr_hi);
1913: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1914: rcb->bge_hostaddr.bge_addr_lo);
1915: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1916: rcb->bge_maxlen_flags);
1917: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1.149 sborrill 1918:
1.158 msaitoh 1919: /* Set up dummy disabled mini ring RCB */
1920: rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1921: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1922: BGE_RCB_FLAG_RING_DISABLED);
1923: CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1924: rcb->bge_maxlen_flags);
1.133 markd 1925:
1.158 msaitoh 1926: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1927: offsetof(struct bge_ring_data, bge_info),
1928: sizeof (struct bge_gib),
1929: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1930: }
1.133 markd 1931:
1.158 msaitoh 1932: /*
1933: * Set the BD ring replenish thresholds. The recommended
1934: * values are 1/8th the number of descriptors allocated to
1935: * each ring.
1936: */
1937: i = BGE_STD_RX_RING_CNT / 8;
1.133 markd 1938:
1.158 msaitoh 1939: /*
1940: * Use a value of 8 for the following chips to workaround HW errata.
1941: * Some of these chips have been added based on empirical
1942: * evidence (they don't work unless this is done).
1943: */
1.172 msaitoh 1944: if (BGE_IS_5705_PLUS(sc))
1.158 msaitoh 1945: i = 8;
1.16 thorpej 1946:
1.158 msaitoh 1947: CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
1.161 msaitoh 1948: CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
1.157 msaitoh 1949:
1.172 msaitoh 1950: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1951: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) {
1952: CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
1953: CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
1954: }
1955:
1.158 msaitoh 1956: /*
1957: * Disable all unused send rings by setting the 'ring disabled'
1958: * bit in the flags field of all the TX send ring control blocks.
1959: * These are located in NIC memory.
1960: */
1961: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1962: for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1963: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1964: BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1965: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1966: rcb_addr += sizeof(struct bge_rcb);
1967: }
1.157 msaitoh 1968:
1.158 msaitoh 1969: /* Configure TX RCB 0 (we use only the first ring) */
1970: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1.172 msaitoh 1971: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1.158 msaitoh 1972: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1973: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1974: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1975: BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1.172 msaitoh 1976: if (BGE_IS_5700_FAMILY(sc))
1.158 msaitoh 1977: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1978: BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1.157 msaitoh 1979:
1.158 msaitoh 1980: /* Disable all unused RX return rings */
1981: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1982: for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1983: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1984: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1985: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1.172 msaitoh 1986: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1987: BGE_RCB_FLAG_RING_DISABLED));
1.158 msaitoh 1988: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1989: bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1.170 msaitoh 1990: (i * (sizeof(uint64_t))), 0);
1.158 msaitoh 1991: rcb_addr += sizeof(struct bge_rcb);
1992: }
1.157 msaitoh 1993:
1.158 msaitoh 1994: /* Initialize RX ring indexes */
1995: bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1996: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1997: bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1.157 msaitoh 1998:
1.158 msaitoh 1999: /*
2000: * Set up RX return ring 0
2001: * Note that the NIC address for RX return rings is 0x00000000.
2002: * The return rings live entirely within the host, so the
2003: * nicaddr field in the RCB isn't used.
2004: */
2005: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1.172 msaitoh 2006: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1.158 msaitoh 2007: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2008: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2009: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2010: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2011: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1.157 msaitoh 2012:
1.158 msaitoh 2013: /* Set random backoff seed for TX */
2014: CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2015: CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2016: CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2017: CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] +
2018: BGE_TX_BACKOFF_SEED_MASK);
1.157 msaitoh 2019:
1.158 msaitoh 2020: /* Set inter-packet gap */
2021: CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1.51 fvdl 2022:
1.158 msaitoh 2023: /*
2024: * Specify which ring to use for packets that don't match
2025: * any RX rules.
2026: */
2027: CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1.157 msaitoh 2028:
1.158 msaitoh 2029: /*
2030: * Configure number of RX lists. One interrupt distribution
2031: * list, sixteen active lists, one bad frames class.
2032: */
2033: CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1.157 msaitoh 2034:
1.158 msaitoh 2035: /* Inialize RX list placement stats mask. */
2036: CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2037: CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1.157 msaitoh 2038:
1.158 msaitoh 2039: /* Disable host coalescing until we get it set up */
2040: CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1.51 fvdl 2041:
1.158 msaitoh 2042: /* Poll to make sure it's shut down. */
1.172 msaitoh 2043: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1.158 msaitoh 2044: if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2045: break;
2046: DELAY(10);
2047: }
1.151 cegger 2048:
1.172 msaitoh 2049: if (i == BGE_TIMEOUT * 2) {
1.158 msaitoh 2050: aprint_error_dev(sc->bge_dev,
2051: "host coalescing engine failed to idle\n");
1.170 msaitoh 2052: return ENXIO;
1.158 msaitoh 2053: }
1.51 fvdl 2054:
1.158 msaitoh 2055: /* Set up host coalescing defaults */
2056: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2057: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2058: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2059: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1.172 msaitoh 2060: if (BGE_IS_5700_FAMILY(sc)) {
1.158 msaitoh 2061: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2062: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1.51 fvdl 2063: }
1.158 msaitoh 2064: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2065: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1.51 fvdl 2066:
1.158 msaitoh 2067: /* Set up address of statistics block */
1.172 msaitoh 2068: if (BGE_IS_5700_FAMILY(sc)) {
2069: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1.158 msaitoh 2070: CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2071: CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2072: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2073: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1.16 thorpej 2074: }
2075:
1.158 msaitoh 2076: /* Set up address of status block */
1.172 msaitoh 2077: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1.158 msaitoh 2078: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2079: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2080: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2081: sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2082: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1.16 thorpej 2083:
1.158 msaitoh 2084: /* Turn on host coalescing state machine */
2085: CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1.7 thorpej 2086:
1.158 msaitoh 2087: /* Turn on RX BD completion state machine and enable attentions */
2088: CSR_WRITE_4(sc, BGE_RBDC_MODE,
1.161 msaitoh 2089: BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1.7 thorpej 2090:
1.158 msaitoh 2091: /* Turn on RX list placement state machine */
2092: CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1.51 fvdl 2093:
1.158 msaitoh 2094: /* Turn on RX list selector state machine. */
1.172 msaitoh 2095: if (BGE_IS_5700_FAMILY(sc))
1.158 msaitoh 2096: CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1.51 fvdl 2097:
1.161 msaitoh 2098: val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2099: BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2100: BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2101: BGE_MACMODE_FRMHDR_DMA_ENB;
2102:
2103: if (sc->bge_flags & BGE_PHY_FIBER_TBI)
2104: val |= BGE_PORTMODE_TBI;
2105: else if (sc->bge_flags & BGE_PHY_FIBER_MII)
2106: val |= BGE_PORTMODE_GMII;
2107: else
2108: val |= BGE_PORTMODE_MII;
2109:
1.158 msaitoh 2110: /* Turn on DMA, clear stats */
1.161 msaitoh 2111: CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2112:
1.51 fvdl 2113:
1.158 msaitoh 2114: /* Set misc. local control, enable interrupts on attentions */
2115: sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1.51 fvdl 2116:
1.158 msaitoh 2117: #ifdef notdef
2118: /* Assert GPIO pins for PHY reset */
2119: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2120: BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2121: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2122: BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2123: #endif
1.98 jonathan 2124:
1.158 msaitoh 2125: #if defined(not_quite_yet)
2126: /* Linux driver enables enable gpio pin #1 on 5700s */
2127: if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
2128: sc->bge_local_ctrl_reg |=
2129: (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
2130: }
2131: #endif
2132: CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1.80 fredb 2133:
1.158 msaitoh 2134: /* Turn on DMA completion state machine */
1.172 msaitoh 2135: if (BGE_IS_5700_FAMILY(sc))
1.158 msaitoh 2136: CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1.149 sborrill 2137:
1.158 msaitoh 2138: /* Turn on write DMA state machine */
2139: {
2140: uint32_t bge_wdma_mode =
2141: BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1.76 cube 2142:
1.158 msaitoh 2143: /* Enable host coalescing bug fix; see Linux tg3.c */
1.172 msaitoh 2144: if (BGE_IS_5755_PLUS(sc))
2145: bge_wdma_mode |= BGE_WDMAMODE_STATUS_TAG_FIX;
1.76 cube 2146:
1.158 msaitoh 2147: CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode);
2148: }
1.76 cube 2149:
1.158 msaitoh 2150: /* Turn on read DMA state machine */
2151: {
2152: uint32_t dma_read_modebits;
1.91 gavan 2153:
1.158 msaitoh 2154: dma_read_modebits =
2155: BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1.98 jonathan 2156:
1.172 msaitoh 2157: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2158: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2159: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2160: dma_read_modebits |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2161: BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2162: BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2163:
2164: if (sc->bge_flags & BGE_PCIE)
1.158 msaitoh 2165: dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST;
1.172 msaitoh 2166: if (sc->bge_flags & BGE_TSO)
2167: dma_read_modebits |= BGE_RDMAMODE_TSO4_ENABLE;
1.158 msaitoh 2168: CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits);
1.172 msaitoh 2169: delay(40);
1.158 msaitoh 2170: }
1.128 tron 2171:
1.158 msaitoh 2172: /* Turn on RX data completion state machine */
2173: CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1.128 tron 2174:
1.158 msaitoh 2175: /* Turn on RX BD initiator state machine */
2176: CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1.133 markd 2177:
1.158 msaitoh 2178: /* Turn on RX data and RX BD initiator state machine */
2179: CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1.133 markd 2180:
1.158 msaitoh 2181: /* Turn on Mbuf cluster free state machine */
1.172 msaitoh 2182: if (BGE_IS_5700_FAMILY(sc))
1.158 msaitoh 2183: CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1.133 markd 2184:
1.158 msaitoh 2185: /* Turn on send BD completion state machine */
2186: CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1.133 markd 2187:
1.158 msaitoh 2188: /* Turn on send data completion state machine */
1.172 msaitoh 2189: val = BGE_SDCMODE_ENABLE;
2190: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2191: val |= BGE_SDCMODE_CDELAY;
2192: CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1.106 jonathan 2193:
1.158 msaitoh 2194: /* Turn on send data initiator state machine */
1.172 msaitoh 2195: if (sc->bge_flags & BGE_TSO) {
1.158 msaitoh 2196: /* XXX: magic value from Linux driver */
2197: CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
2198: } else {
2199: CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2200: }
1.106 jonathan 2201:
1.158 msaitoh 2202: /* Turn on send BD initiator state machine */
2203: CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1.133 markd 2204:
1.158 msaitoh 2205: /* Turn on send BD selector state machine */
2206: CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1.135 taca 2207:
1.158 msaitoh 2208: CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2209: CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1.161 msaitoh 2210: BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1.133 markd 2211:
1.158 msaitoh 2212: /* ack/clear link change events */
1.161 msaitoh 2213: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2214: BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1.172 msaitoh 2215: BGE_MACSTAT_LINK_CHANGED);
1.158 msaitoh 2216: CSR_WRITE_4(sc, BGE_MI_STS, 0);
1.106 jonathan 2217:
1.158 msaitoh 2218: /* Enable PHY auto polling (for MII/GMII only) */
2219: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2220: CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2221: } else {
1.161 msaitoh 2222: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2223: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1.158 msaitoh 2224: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2225: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2226: BGE_EVTENB_MI_INTERRUPT);
2227: }
1.70 tron 2228:
1.161 msaitoh 2229: /*
2230: * Clear any pending link state attention.
2231: * Otherwise some link state change events may be lost until attention
2232: * is cleared by bge_intr() -> bge_link_upd() sequence.
2233: * It's not necessary on newer BCM chips - perhaps enabling link
2234: * state change attentions implies clearing pending attention.
2235: */
2236: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2237: BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2238: BGE_MACSTAT_LINK_CHANGED);
2239:
1.158 msaitoh 2240: /* Enable link state change attentions. */
2241: BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1.51 fvdl 2242:
1.170 msaitoh 2243: return 0;
1.158 msaitoh 2244: }
1.7 thorpej 2245:
1.158 msaitoh 2246: static const struct bge_revision *
2247: bge_lookup_rev(uint32_t chipid)
2248: {
2249: const struct bge_revision *br;
1.7 thorpej 2250:
1.158 msaitoh 2251: for (br = bge_revisions; br->br_name != NULL; br++) {
2252: if (br->br_chipid == chipid)
1.170 msaitoh 2253: return br;
1.158 msaitoh 2254: }
1.151 cegger 2255:
1.158 msaitoh 2256: for (br = bge_majorrevs; br->br_name != NULL; br++) {
2257: if (br->br_chipid == BGE_ASICREV(chipid))
1.170 msaitoh 2258: return br;
1.158 msaitoh 2259: }
1.151 cegger 2260:
1.170 msaitoh 2261: return NULL;
1.158 msaitoh 2262: }
1.7 thorpej 2263:
2264: static const struct bge_product *
2265: bge_lookup(const struct pci_attach_args *pa)
2266: {
2267: const struct bge_product *bp;
2268:
2269: for (bp = bge_products; bp->bp_name != NULL; bp++) {
2270: if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2271: PCI_PRODUCT(pa->pa_id) == bp->bp_product)
1.170 msaitoh 2272: return bp;
1.7 thorpej 2273: }
2274:
1.170 msaitoh 2275: return NULL;
1.7 thorpej 2276: }
2277:
1.104 thorpej 2278: static int
1.116 christos 2279: bge_setpowerstate(struct bge_softc *sc, int powerlevel)
1.25 jonathan 2280: {
2281: #ifdef NOTYET
1.170 msaitoh 2282: uint32_t pm_ctl = 0;
1.25 jonathan 2283:
2284: /* XXX FIXME: make sure indirect accesses enabled? */
2285: pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2286: pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2287: pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2288:
2289: /* clear the PME_assert bit and power state bits, enable PME */
2290: pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2291: pm_ctl &= ~PCIM_PSTAT_DMASK;
2292: pm_ctl |= (1 << 8);
2293:
2294: if (powerlevel == 0) {
2295: pm_ctl |= PCIM_PSTAT_D0;
2296: pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2297: pm_ctl, 2);
2298: DELAY(10000);
1.27 jonathan 2299: CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1.25 jonathan 2300: DELAY(10000);
2301:
2302: #ifdef NOTYET
2303: /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2304: bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2305: #endif
2306: DELAY(40); DELAY(40); DELAY(40);
2307: DELAY(10000); /* above not quite adequate on 5700 */
2308: return 0;
2309: }
2310:
2311:
2312: /*
2313: * Entering ACPI power states D1-D3 is achieved by wiggling
2314: * GMII gpio pins. Example code assumes all hardware vendors
2315: * followed Broadom's sample pcb layout. Until we verify that
2316: * for all supported OEM cards, states D1-D3 are unsupported.
2317: */
1.138 joerg 2318: aprint_error_dev(sc->bge_dev,
2319: "power state %d unimplemented; check GPIO pins\n",
2320: powerlevel);
1.25 jonathan 2321: #endif
2322: return EOPNOTSUPP;
2323: }
2324:
2325:
1.1 fvdl 2326: /*
2327: * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2328: * against our list and return its name if we find a match. Note
2329: * that since the Broadcom controller contains VPD support, we
2330: * can get the device name string from the controller itself instead
2331: * of the compiled-in string. This is a little slow, but it guarantees
2332: * we'll always announce the right product name.
2333: */
1.104 thorpej 2334: static int
1.116 christos 2335: bge_probe(device_t parent, cfdata_t match, void *aux)
1.1 fvdl 2336: {
2337: struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2338:
1.7 thorpej 2339: if (bge_lookup(pa) != NULL)
1.170 msaitoh 2340: return 1;
1.1 fvdl 2341:
1.170 msaitoh 2342: return 0;
1.1 fvdl 2343: }
2344:
1.104 thorpej 2345: static void
1.116 christos 2346: bge_attach(device_t parent, device_t self, void *aux)
1.1 fvdl 2347: {
1.138 joerg 2348: struct bge_softc *sc = device_private(self);
1.1 fvdl 2349: struct pci_attach_args *pa = aux;
1.164 msaitoh 2350: prop_dictionary_t dict;
1.7 thorpej 2351: const struct bge_product *bp;
1.16 thorpej 2352: const struct bge_revision *br;
1.143 tron 2353: pci_chipset_tag_t pc;
1.1 fvdl 2354: pci_intr_handle_t ih;
2355: const char *intrstr = NULL;
2356: bus_dma_segment_t seg;
2357: int rseg;
1.170 msaitoh 2358: uint32_t hwcfg = 0;
2359: uint32_t command;
1.1 fvdl 2360: struct ifnet *ifp;
1.170 msaitoh 2361: uint32_t misccfg;
1.126 christos 2362: void * kva;
1.1 fvdl 2363: u_char eaddr[ETHER_ADDR_LEN];
1.172 msaitoh 2364: pcireg_t memtype, subid;
1.1 fvdl 2365: bus_addr_t memaddr;
2366: bus_size_t memsize;
1.170 msaitoh 2367: uint32_t pm_ctl;
1.174 martin 2368: prop_data_t eaddrprop;
2369: bool no_seeprom;
1.87 perry 2370:
1.7 thorpej 2371: bp = bge_lookup(pa);
2372: KASSERT(bp != NULL);
2373:
1.141 jmcneill 2374: sc->sc_pc = pa->pa_pc;
2375: sc->sc_pcitag = pa->pa_tag;
1.138 joerg 2376: sc->bge_dev = self;
1.1 fvdl 2377:
1.172 msaitoh 2378: pc = sc->sc_pc;
2379: subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
2380:
1.30 thorpej 2381: aprint_naive(": Ethernet controller\n");
2382: aprint_normal(": %s\n", bp->bp_name);
1.1 fvdl 2383:
2384: /*
2385: * Map control/status registers.
2386: */
2387: DPRINTFN(5, ("Map control/status regs\n"));
1.141 jmcneill 2388: command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1.1 fvdl 2389: command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1.141 jmcneill 2390: pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
2391: command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1.1 fvdl 2392:
2393: if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1.138 joerg 2394: aprint_error_dev(sc->bge_dev,
2395: "failed to enable memory mapping!\n");
1.1 fvdl 2396: return;
2397: }
2398:
2399: DPRINTFN(5, ("pci_mem_find\n"));
1.141 jmcneill 2400: memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
1.1 fvdl 2401: switch (memtype) {
1.29 itojun 2402: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2403: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1.1 fvdl 2404: if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1.29 itojun 2405: memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1.1 fvdl 2406: &memaddr, &memsize) == 0)
2407: break;
2408: default:
1.138 joerg 2409: aprint_error_dev(sc->bge_dev, "can't find mem space\n");
1.1 fvdl 2410: return;
2411: }
2412:
2413: DPRINTFN(5, ("pci_intr_map\n"));
2414: if (pci_intr_map(pa, &ih)) {
1.138 joerg 2415: aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n");
1.1 fvdl 2416: return;
2417: }
2418:
2419: DPRINTFN(5, ("pci_intr_string\n"));
2420: intrstr = pci_intr_string(pc, ih);
2421:
2422: DPRINTFN(5, ("pci_intr_establish\n"));
2423: sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2424:
2425: if (sc->bge_intrhand == NULL) {
1.138 joerg 2426: aprint_error_dev(sc->bge_dev,
2427: "couldn't establish interrupt%s%s\n",
2428: intrstr ? " at " : "", intrstr ? intrstr : "");
1.1 fvdl 2429: return;
2430: }
1.138 joerg 2431: aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
1.1 fvdl 2432:
1.25 jonathan 2433: /*
2434: * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2435: * can clobber the chip's PCI config-space power control registers,
2436: * leaving the card in D3 powersave state.
2437: * We do not have memory-mapped registers in this state,
2438: * so force device into D0 state before starting initialization.
2439: */
1.141 jmcneill 2440: pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
1.25 jonathan 2441: pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2442: pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1.141 jmcneill 2443: pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1.25 jonathan 2444: DELAY(1000); /* 27 usec is allegedly sufficent */
2445:
1.76 cube 2446: /*
1.162 msaitoh 2447: * Save ASIC rev.
1.76 cube 2448: */
2449: sc->bge_chipid =
1.172 msaitoh 2450: pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2451: >> BGE_PCIMISCCTL_ASICREV_SHIFT;
2452:
2453: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2454: if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 ||
2455: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718 ||
2456: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5724)
2457: sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2458: BGE_PCI_GEN2_PRODID_ASICREV);
2459: else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 ||
2460: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 ||
2461: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 ||
2462: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 ||
2463: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
2464: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795)
2465: sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2466: BGE_PCI_GEN15_PRODID_ASICREV);
2467: else
2468: sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2469: BGE_PCI_PRODID_ASICREV);
2470: }
1.76 cube 2471:
1.141 jmcneill 2472: if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
1.171 msaitoh 2473: NULL, NULL) != 0) {
2474: /* PCIe */
1.157 msaitoh 2475: sc->bge_flags |= BGE_PCIE;
1.171 msaitoh 2476: } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
2477: BGE_PCISTATE_PCI_BUSMODE) == 0) {
2478: /* PCI-X */
1.157 msaitoh 2479: sc->bge_flags |= BGE_PCIX;
1.171 msaitoh 2480: }
1.76 cube 2481:
1.172 msaitoh 2482: /* chipid */
2483: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2484: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 ||
2485: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2486: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2487: sc->bge_flags |= BGE_5700_FAMILY;
2488:
2489: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 ||
2490: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 ||
2491: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
2492: sc->bge_flags |= BGE_5714_FAMILY;
2493:
2494: /* Intentionally exclude BGE_ASICREV_BCM5906 */
2495: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2496: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2497: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2498: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2499: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2500: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
2501: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
2502: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2503: sc->bge_flags |= BGE_5755_PLUS;
2504:
2505: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
2506: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2507: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 ||
2508: BGE_IS_5755_PLUS(sc) ||
2509: BGE_IS_5714_FAMILY(sc))
2510: sc->bge_flags |= BGE_5750_PLUS;
2511:
2512: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 ||
2513: BGE_IS_5750_OR_BEYOND(sc))
2514: sc->bge_flags |= BGE_5705_PLUS;
2515:
2516: /*
2517: * When using the BCM5701 in PCI-X mode, data corruption has
2518: * been observed in the first few bytes of some received packets.
2519: * Aligning the packet buffer in memory eliminates the corruption.
2520: * Unfortunately, this misaligns the packet payloads. On platforms
2521: * which do not support unaligned accesses, we will realign the
2522: * payloads by copying the received packets.
2523: */
2524: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2525: sc->bge_flags & BGE_PCIX)
2526: sc->bge_flags |= BGE_RX_ALIGNBUG;
2527:
2528: if (BGE_IS_5700_FAMILY(sc))
2529: sc->bge_flags |= BGE_JUMBO_CAPABLE;
2530:
2531: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2532: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2533: PCI_VENDOR(subid) == PCI_VENDOR_DELL)
2534: sc->bge_flags |= BGE_NO_3LED;
2535:
2536: misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2537: misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2538:
2539: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2540: (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2541: misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2542: sc->bge_flags |= BGE_IS_5788;
2543:
2544: /*
2545: * Some controllers seem to require a special firmware to use
2546: * TSO. But the firmware is not available to FreeBSD and Linux
2547: * claims that the TSO performed by the firmware is slower than
2548: * hardware based TSO. Moreover the firmware based TSO has one
2549: * known bug which can't handle TSO if ethernet header + IP/TCP
2550: * header is greater than 80 bytes. The workaround for the TSO
2551: * bug exist but it seems it's too expensive than not using
2552: * TSO at all. Some hardwares also have the TSO bug so limit
2553: * the TSO to the controllers that are not affected TSO issues
2554: * (e.g. 5755 or higher).
2555: */
2556: if (BGE_IS_5755_PLUS(sc)) {
2557: /*
2558: * BCM5754 and BCM5787 shares the same ASIC id so
2559: * explicit device id check is required.
2560: */
2561: if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
2562: (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
2563: sc->bge_flags |= BGE_TSO;
2564: }
2565:
2566: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2567: (misccfg == 0x4000 || misccfg == 0x8000)) ||
2568: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2569: PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2570: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2571: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2572: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2573: (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2574: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2575: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2576: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2577: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2578: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2579: sc->bge_flags |= BGE_10_100_ONLY;
2580:
2581: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2582: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2583: (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2584: sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2585: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2586: sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
2587:
1.162 msaitoh 2588: if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2589: sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2590: sc->bge_flags |= BGE_PHY_CRC_BUG;
2591: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2592: BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2593: sc->bge_flags |= BGE_PHY_ADC_BUG;
2594: if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2595: sc->bge_flags |= BGE_PHY_5704_A0_BUG;
2596:
1.172 msaitoh 2597: if (BGE_IS_5705_PLUS(sc) &&
2598: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2599: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2600: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2601: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
2602: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) {
1.162 msaitoh 2603: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1.172 msaitoh 2604: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2605: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1.162 msaitoh 2606: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2607: if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2608: PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2609: sc->bge_flags |= BGE_PHY_JITTER_BUG;
2610: if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2611: sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
2612: } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
2613: sc->bge_flags |= BGE_PHY_BER_BUG;
2614: }
2615:
1.174 martin 2616: /*
2617: * SEEPROM check.
2618: * First check if firmware knows we do not have SEEPROM.
2619: */
2620: if (prop_dictionary_get_bool(device_properties(self),
2621: "without-seeprom", &no_seeprom) && no_seeprom)
2622: sc->bge_flags |= BGE_NO_EEPROM;
2623:
2624: /* Now check the 'ROM failed' bit on the RX CPU */
2625: else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
1.172 msaitoh 2626: sc->bge_flags |= BGE_NO_EEPROM;
2627:
1.1 fvdl 2628: /* Try to reset the chip. */
2629: DPRINTFN(5, ("bge_reset\n"));
2630: bge_reset(sc);
2631:
2632: if (bge_chipinit(sc)) {
1.138 joerg 2633: aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
1.1 fvdl 2634: bge_release_resources(sc);
2635: return;
2636: }
2637:
2638: /*
1.174 martin 2639: * Get station address from the EEPROM (or use firmware values
2640: * if provided via device properties)
1.1 fvdl 2641: */
1.174 martin 2642: eaddrprop = prop_dictionary_get(device_properties(self), "mac-address");
2643:
2644: if (eaddrprop != NULL && prop_data_size(eaddrprop) == ETHER_ADDR_LEN) {
2645: memcpy(eaddr, prop_data_data_nocopy(eaddrprop),
2646: ETHER_ADDR_LEN);
2647: goto got_eaddr;
2648: }
2649:
1.151 cegger 2650: if (bge_get_eaddr(sc, eaddr)) {
2651: aprint_error_dev(sc->bge_dev,
1.170 msaitoh 2652: "failed to read station address\n");
1.1 fvdl 2653: bge_release_resources(sc);
2654: return;
2655: }
2656:
1.174 martin 2657: got_eaddr:
1.51 fvdl 2658: br = bge_lookup_rev(sc->bge_chipid);
2659:
1.16 thorpej 2660: if (br == NULL) {
1.172 msaitoh 2661: aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
2662: sc->bge_chipid);
1.16 thorpej 2663: } else {
1.172 msaitoh 2664: aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
2665: br->br_name, sc->bge_chipid);
1.16 thorpej 2666: }
1.30 thorpej 2667: aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
1.1 fvdl 2668:
2669: /* Allocate the general information block and ring buffers. */
1.41 fvdl 2670: if (pci_dma64_available(pa))
2671: sc->bge_dmatag = pa->pa_dmat64;
2672: else
2673: sc->bge_dmatag = pa->pa_dmat;
1.1 fvdl 2674: DPRINTFN(5, ("bus_dmamem_alloc\n"));
2675: if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2676: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1.138 joerg 2677: aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1.1 fvdl 2678: return;
2679: }
2680: DPRINTFN(5, ("bus_dmamem_map\n"));
2681: if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2682: sizeof(struct bge_ring_data), &kva,
2683: BUS_DMA_NOWAIT)) {
1.138 joerg 2684: aprint_error_dev(sc->bge_dev,
2685: "can't map DMA buffers (%zu bytes)\n",
2686: sizeof(struct bge_ring_data));
1.1 fvdl 2687: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2688: return;
2689: }
2690: DPRINTFN(5, ("bus_dmamem_create\n"));
2691: if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2692: sizeof(struct bge_ring_data), 0,
2693: BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
1.138 joerg 2694: aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1.1 fvdl 2695: bus_dmamem_unmap(sc->bge_dmatag, kva,
2696: sizeof(struct bge_ring_data));
2697: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2698: return;
2699: }
2700: DPRINTFN(5, ("bus_dmamem_load\n"));
2701: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2702: sizeof(struct bge_ring_data), NULL,
2703: BUS_DMA_NOWAIT)) {
2704: bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2705: bus_dmamem_unmap(sc->bge_dmatag, kva,
2706: sizeof(struct bge_ring_data));
2707: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2708: return;
2709: }
2710:
2711: DPRINTFN(5, ("bzero\n"));
2712: sc->bge_rdata = (struct bge_ring_data *)kva;
2713:
1.19 mjl 2714: memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
1.1 fvdl 2715:
2716: /* Try to allocate memory for jumbo buffers. */
1.166 msaitoh 2717: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1.44 hannken 2718: if (bge_alloc_jumbo_mem(sc)) {
1.138 joerg 2719: aprint_error_dev(sc->bge_dev,
2720: "jumbo buffer allocation failed\n");
1.44 hannken 2721: } else
2722: sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2723: }
1.1 fvdl 2724:
2725: /* Set default tuneable values. */
2726: sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2727: sc->bge_rx_coal_ticks = 150;
1.25 jonathan 2728: sc->bge_rx_max_coal_bds = 64;
2729: #ifdef ORIG_WPAUL_VALUES
1.1 fvdl 2730: sc->bge_tx_coal_ticks = 150;
2731: sc->bge_tx_max_coal_bds = 128;
1.25 jonathan 2732: #else
2733: sc->bge_tx_coal_ticks = 300;
2734: sc->bge_tx_max_coal_bds = 400;
2735: #endif
1.172 msaitoh 2736: if (BGE_IS_5705_PLUS(sc)) {
1.95 jonathan 2737: sc->bge_tx_coal_ticks = (12 * 5);
1.146 mlelstv 2738: sc->bge_tx_max_coal_bds = (12 * 5);
1.138 joerg 2739: aprint_verbose_dev(sc->bge_dev,
2740: "setting short Tx thresholds\n");
1.95 jonathan 2741: }
1.1 fvdl 2742:
1.172 msaitoh 2743: if (BGE_IS_5705_PLUS(sc))
2744: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2745: else
2746: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2747:
1.1 fvdl 2748: /* Set up ifnet structure */
2749: ifp = &sc->ethercom.ec_if;
2750: ifp->if_softc = sc;
2751: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2752: ifp->if_ioctl = bge_ioctl;
1.141 jmcneill 2753: ifp->if_stop = bge_stop;
1.1 fvdl 2754: ifp->if_start = bge_start;
2755: ifp->if_init = bge_init;
2756: ifp->if_watchdog = bge_watchdog;
1.42 ragge 2757: IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
1.1 fvdl 2758: IFQ_SET_READY(&ifp->if_snd);
1.115 tsutsui 2759: DPRINTFN(5, ("strcpy if_xname\n"));
1.138 joerg 2760: strcpy(ifp->if_xname, device_xname(sc->bge_dev));
1.1 fvdl 2761:
1.157 msaitoh 2762: if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
1.18 thorpej 2763: sc->ethercom.ec_if.if_capabilities |=
1.172 msaitoh 2764: IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
2765: #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
2766: sc->ethercom.ec_if.if_capabilities |=
1.88 yamt 2767: IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2768: IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1.172 msaitoh 2769: #endif
1.87 perry 2770: sc->ethercom.ec_capabilities |=
1.1 fvdl 2771: ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2772:
1.172 msaitoh 2773: if (sc->bge_flags & BGE_TSO)
1.95 jonathan 2774: sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
2775:
1.1 fvdl 2776: /*
2777: * Do MII setup.
2778: */
2779: DPRINTFN(5, ("mii setup\n"));
2780: sc->bge_mii.mii_ifp = ifp;
2781: sc->bge_mii.mii_readreg = bge_miibus_readreg;
2782: sc->bge_mii.mii_writereg = bge_miibus_writereg;
2783: sc->bge_mii.mii_statchg = bge_miibus_statchg;
2784:
2785: /*
2786: * Figure out what sort of media we have by checking the
1.35 jonathan 2787: * hardware config word in the first 32k of NIC internal memory,
2788: * or fall back to the config word in the EEPROM. Note: on some BCM5700
1.1 fvdl 2789: * cards, this value appears to be unset. If that's the
2790: * case, we have to rely on identifying the NIC by its PCI
2791: * subsystem ID, as we do below for the SysKonnect SK-9D41.
2792: */
1.35 jonathan 2793: if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2794: hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1.175 ! martin 2795: } else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
1.126 christos 2796: bge_read_eeprom(sc, (void *)&hwcfg,
1.1 fvdl 2797: BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1.35 jonathan 2798: hwcfg = be32toh(hwcfg);
2799: }
1.1 fvdl 2800: /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1.161 msaitoh 2801: if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
2802: (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2803: if (BGE_IS_5714_FAMILY(sc))
2804: sc->bge_flags |= BGE_PHY_FIBER_MII;
2805: else
2806: sc->bge_flags |= BGE_PHY_FIBER_TBI;
2807: }
1.1 fvdl 2808:
1.167 msaitoh 2809: /* set phyflags before mii_attach() */
2810: dict = device_properties(self);
2811: prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags);
2812:
1.157 msaitoh 2813: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.1 fvdl 2814: ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2815: bge_ifmedia_sts);
2816: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2817: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2818: 0, NULL);
2819: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2820: ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1.155 he 2821: /* Pretend the user requested this setting */
1.162 msaitoh 2822: sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1.1 fvdl 2823: } else {
2824: /*
2825: * Do transceiver setup.
2826: */
2827: ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2828: bge_ifmedia_sts);
1.138 joerg 2829: mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff,
1.69 thorpej 2830: MII_PHY_ANY, MII_OFFSET_ANY,
2831: MIIF_FORCEANEG|MIIF_DOPAUSE);
1.87 perry 2832:
1.142 dyoung 2833: if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
1.138 joerg 2834: aprint_error_dev(sc->bge_dev, "no PHY found!\n");
1.1 fvdl 2835: ifmedia_add(&sc->bge_mii.mii_media,
2836: IFM_ETHER|IFM_MANUAL, 0, NULL);
2837: ifmedia_set(&sc->bge_mii.mii_media,
2838: IFM_ETHER|IFM_MANUAL);
2839: } else
2840: ifmedia_set(&sc->bge_mii.mii_media,
2841: IFM_ETHER|IFM_AUTO);
2842: }
2843:
2844: /*
2845: * Call MI attach routine.
2846: */
2847: DPRINTFN(5, ("if_attach\n"));
2848: if_attach(ifp);
2849: DPRINTFN(5, ("ether_ifattach\n"));
2850: ether_ifattach(ifp, eaddr);
1.148 mlelstv 2851: #if NRND > 0
2852: rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
2853: RND_TYPE_NET, 0);
2854: #endif
1.72 thorpej 2855: #ifdef BGE_EVENT_COUNTERS
2856: /*
2857: * Attach event counters.
2858: */
2859: evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
1.138 joerg 2860: NULL, device_xname(sc->bge_dev), "intr");
1.72 thorpej 2861: evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
1.138 joerg 2862: NULL, device_xname(sc->bge_dev), "tx_xoff");
1.72 thorpej 2863: evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
1.138 joerg 2864: NULL, device_xname(sc->bge_dev), "tx_xon");
1.72 thorpej 2865: evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
1.138 joerg 2866: NULL, device_xname(sc->bge_dev), "rx_xoff");
1.72 thorpej 2867: evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
1.138 joerg 2868: NULL, device_xname(sc->bge_dev), "rx_xon");
1.72 thorpej 2869: evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
1.138 joerg 2870: NULL, device_xname(sc->bge_dev), "rx_macctl");
1.72 thorpej 2871: evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
1.138 joerg 2872: NULL, device_xname(sc->bge_dev), "xoffentered");
1.72 thorpej 2873: #endif /* BGE_EVENT_COUNTERS */
1.1 fvdl 2874: DPRINTFN(5, ("callout_init\n"));
1.132 ad 2875: callout_init(&sc->bge_timeout, 0);
1.82 jmcneill 2876:
1.168 tsutsui 2877: if (pmf_device_register(self, NULL, NULL))
2878: pmf_class_network_register(self, ifp);
2879: else
1.141 jmcneill 2880: aprint_error_dev(self, "couldn't establish power handler\n");
1.172 msaitoh 2881:
2882: #ifdef BGE_DEBUG
2883: bge_debug_info(sc);
2884: #endif
1.1 fvdl 2885: }
2886:
1.104 thorpej 2887: static void
2888: bge_release_resources(struct bge_softc *sc)
1.1 fvdl 2889: {
2890: if (sc->bge_vpd_prodname != NULL)
2891: free(sc->bge_vpd_prodname, M_DEVBUF);
2892:
2893: if (sc->bge_vpd_readonly != NULL)
2894: free(sc->bge_vpd_readonly, M_DEVBUF);
2895: }
2896:
1.104 thorpej 2897: static void
2898: bge_reset(struct bge_softc *sc)
1.1 fvdl 2899: {
1.170 msaitoh 2900: uint32_t cachesize, command, pcistate, new_pcistate;
1.76 cube 2901: int i, val;
1.151 cegger 2902: void (*write_op)(struct bge_softc *, int, int);
2903:
2904: if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) &&
2905: (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
1.157 msaitoh 2906: if (sc->bge_flags & BGE_PCIE) {
1.151 cegger 2907: write_op = bge_writemem_direct;
2908: } else {
2909: write_op = bge_writemem_ind;
2910: }
2911: } else {
2912: write_op = bge_writereg_ind;
2913: }
2914:
1.1 fvdl 2915:
2916: /* Save some important PCI state. */
1.141 jmcneill 2917: cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
2918: command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
2919: pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE);
1.1 fvdl 2920:
1.141 jmcneill 2921: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1.172 msaitoh 2922: BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2923: BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
1.1 fvdl 2924:
1.162 msaitoh 2925: /* Disable fastboot on controllers that support it. */
1.134 markd 2926: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1.172 msaitoh 2927: BGE_IS_5755_PLUS(sc))
1.119 tsutsui 2928: CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2929:
1.76 cube 2930: val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1);
2931: /*
2932: * XXX: from FreeBSD/Linux; no documentation
2933: */
1.157 msaitoh 2934: if (sc->bge_flags & BGE_PCIE) {
1.76 cube 2935: if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60)
1.157 msaitoh 2936: /* PCI Express 1.0 system */
1.76 cube 2937: CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20);
2938: if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1.157 msaitoh 2939: /*
2940: * Prevent PCI Express link training
2941: * during global reset.
2942: */
1.76 cube 2943: CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2944: val |= (1<<29);
2945: }
2946: }
2947:
1.161 msaitoh 2948: /*
2949: * Set GPHY Power Down Override to leave GPHY
2950: * powered up in D0 uninitialized.
2951: */
1.172 msaitoh 2952: if (BGE_IS_5705_PLUS(sc))
1.161 msaitoh 2953: val |= BGE_MISCCFG_KEEP_GPHY_POWER;
2954:
1.1 fvdl 2955: /* Issue global reset */
1.151 cegger 2956: write_op(sc, BGE_MISC_CFG, val);
2957:
2958: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2959: i = CSR_READ_4(sc, BGE_VCPU_STATUS);
2960: CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2961: i | BGE_VCPU_STATUS_DRV_RESET);
2962: i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2963: CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2964: i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2965: }
2966:
1.1 fvdl 2967: DELAY(1000);
2968:
1.76 cube 2969: /*
2970: * XXX: from FreeBSD/Linux; no documentation
2971: */
1.157 msaitoh 2972: if (sc->bge_flags & BGE_PCIE) {
1.76 cube 2973: if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2974: pcireg_t reg;
2975:
2976: DELAY(500000);
2977: /* XXX: Magic Numbers */
1.170 msaitoh 2978: reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
2979: BGE_PCI_UNKNOWN0);
2980: pci_conf_write(sc->sc_pc, sc->sc_pcitag,
2981: BGE_PCI_UNKNOWN0,
1.76 cube 2982: reg | (1 << 15));
2983: }
1.95 jonathan 2984: /*
2985: * XXX: Magic Numbers.
2986: * Sets maximal PCI-e payload and clears any PCI-e errors.
2987: * Should be replaced with references to PCI config-space
2988: * capability block for PCI-Express.
2989: */
1.141 jmcneill 2990: pci_conf_write(sc->sc_pc, sc->sc_pcitag,
1.95 jonathan 2991: BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2992:
1.76 cube 2993: }
2994:
1.1 fvdl 2995: /* Reset some of the PCI state that got zapped by reset */
1.141 jmcneill 2996: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1.172 msaitoh 2997: BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2998: BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
2999: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
1.141 jmcneill 3000: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
1.151 cegger 3001: write_op(sc, BGE_MISC_CFG, (65 << 1));
1.1 fvdl 3002:
3003: /* Enable memory arbiter. */
1.109 jonathan 3004: {
1.99 jonathan 3005: uint32_t marbmode = 0;
3006: if (BGE_IS_5714_FAMILY(sc)) {
1.100 jonathan 3007: marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
1.99 jonathan 3008: }
3009: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
1.44 hannken 3010: }
1.1 fvdl 3011:
1.172 msaitoh 3012: /*
3013: * Prevent PXE restart: write a magic number to the
3014: * general communications memory at 0xB50.
3015: */
3016: bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1.139 msaitoh 3017:
1.151 cegger 3018: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3019: for (i = 0; i < BGE_TIMEOUT; i++) {
3020: val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3021: if (val & BGE_VCPU_STATUS_INIT_DONE)
3022: break;
3023: DELAY(100);
3024: }
3025: if (i == BGE_TIMEOUT) {
3026: aprint_error_dev(sc->bge_dev, "reset timed out\n");
3027: return;
3028: }
3029: } else {
3030: /*
3031: * Poll the value location we just wrote until
3032: * we see the 1's complement of the magic number.
3033: * This indicates that the firmware initialization
3034: * is complete.
1.95 jonathan 3035: */
1.151 cegger 3036: for (i = 0; i < BGE_TIMEOUT; i++) {
3037: val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3038: if (val == ~BGE_MAGIC_NUMBER)
3039: break;
1.172 msaitoh 3040: DELAY(10);
1.151 cegger 3041: }
3042:
1.172 msaitoh 3043: if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM))) {
1.151 cegger 3044: aprint_error_dev(sc->bge_dev,
3045: "firmware handshake timed out, val = %x\n", val);
3046: /*
3047: * XXX: occasionally fired on bcm5721, but without
3048: * apparent harm. For now, keep going if we timeout
3049: * against PCI-E devices.
3050: */
1.157 msaitoh 3051: if ((sc->bge_flags & BGE_PCIE) == 0)
1.151 cegger 3052: return;
3053: }
1.1 fvdl 3054: }
3055:
3056: /*
3057: * XXX Wait for the value of the PCISTATE register to
3058: * return to its original pre-reset state. This is a
3059: * fairly good indicator of reset completion. If we don't
3060: * wait for the reset to fully complete, trying to read
3061: * from the device's non-PCI registers may yield garbage
3062: * results.
3063: */
1.172 msaitoh 3064: for (i = 0; i < BGE_TIMEOUT; i++) {
1.141 jmcneill 3065: new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
1.61 jonathan 3066: BGE_PCI_PCISTATE);
1.87 perry 3067: if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
1.62 jonathan 3068: (pcistate & ~BGE_PCISTATE_RESERVED))
1.1 fvdl 3069: break;
3070: DELAY(10);
3071: }
1.87 perry 3072: if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
1.62 jonathan 3073: (pcistate & ~BGE_PCISTATE_RESERVED)) {
1.138 joerg 3074: aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n");
1.61 jonathan 3075: }
1.1 fvdl 3076:
1.172 msaitoh 3077: #if 0
1.1 fvdl 3078: /* Enable memory arbiter. */
1.109 jonathan 3079: /* XXX why do this twice? */
3080: {
1.99 jonathan 3081: uint32_t marbmode = 0;
3082: if (BGE_IS_5714_FAMILY(sc)) {
1.100 jonathan 3083: marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
1.99 jonathan 3084: }
3085: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
1.44 hannken 3086: }
1.172 msaitoh 3087: #endif
1.1 fvdl 3088:
3089: /* Fix up byte swapping */
3090: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
3091:
3092: CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3093:
1.161 msaitoh 3094: /*
3095: * The 5704 in TBI mode apparently needs some special
3096: * adjustment to insure the SERDES drive level is set
3097: * to 1.2V.
3098: */
3099: if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
3100: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1.170 msaitoh 3101: uint32_t serdescfg;
1.161 msaitoh 3102:
3103: serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
3104: serdescfg = (serdescfg & ~0xFFF) | 0x880;
3105: CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
3106: }
3107:
3108: if (sc->bge_flags & BGE_PCIE &&
1.172 msaitoh 3109: sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3110: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3111: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3112: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) {
3113: uint32_t v;
3114:
3115: /* Enable PCI Express bug fix */
3116: v = CSR_READ_4(sc, 0x7c00);
3117: CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
3118: }
1.1 fvdl 3119: DELAY(10000);
3120: }
3121:
3122: /*
3123: * Frame reception handling. This is called if there's a frame
3124: * on the receive return list.
3125: *
3126: * Note: we have to be able to handle two possibilities here:
3127: * 1) the frame is from the jumbo recieve ring
3128: * 2) the frame is from the standard receive ring
3129: */
3130:
1.104 thorpej 3131: static void
3132: bge_rxeof(struct bge_softc *sc)
1.1 fvdl 3133: {
3134: struct ifnet *ifp;
1.172 msaitoh 3135: uint16_t rx_prod, rx_cons;
1.1 fvdl 3136: int stdcnt = 0, jumbocnt = 0;
3137: bus_dmamap_t dmamap;
3138: bus_addr_t offset, toff;
3139: bus_size_t tlen;
3140: int tosync;
3141:
1.172 msaitoh 3142: rx_cons = sc->bge_rx_saved_considx;
3143: rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3144:
3145: /* Nothing to do */
3146: if (rx_cons == rx_prod)
3147: return;
3148:
1.1 fvdl 3149: ifp = &sc->ethercom.ec_if;
3150:
3151: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3152: offsetof(struct bge_ring_data, bge_status_block),
3153: sizeof (struct bge_status_block),
3154: BUS_DMASYNC_POSTREAD);
3155:
3156: offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
1.172 msaitoh 3157: tosync = rx_prod - rx_cons;
1.1 fvdl 3158:
1.148 mlelstv 3159: #if NRND > 0
3160: if (tosync != 0 && RND_ENABLED(&sc->rnd_source))
3161: rnd_add_uint32(&sc->rnd_source, tosync);
3162: #endif
3163:
1.172 msaitoh 3164: toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
1.1 fvdl 3165:
3166: if (tosync < 0) {
1.172 msaitoh 3167: tlen = (sc->bge_return_ring_cnt - rx_cons) *
1.1 fvdl 3168: sizeof (struct bge_rx_bd);
3169: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3170: toff, tlen, BUS_DMASYNC_POSTREAD);
3171: tosync = -tosync;
3172: }
3173:
3174: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3175: offset, tosync * sizeof (struct bge_rx_bd),
3176: BUS_DMASYNC_POSTREAD);
3177:
1.172 msaitoh 3178: while (rx_cons != rx_prod) {
1.1 fvdl 3179: struct bge_rx_bd *cur_rx;
1.170 msaitoh 3180: uint32_t rxidx;
1.1 fvdl 3181: struct mbuf *m = NULL;
3182:
1.172 msaitoh 3183: cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
1.1 fvdl 3184:
3185: rxidx = cur_rx->bge_idx;
1.172 msaitoh 3186: BGE_INC(rx_cons, sc->bge_return_ring_cnt);
1.1 fvdl 3187:
3188: if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3189: BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3190: m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3191: sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3192: jumbocnt++;
1.124 bouyer 3193: bus_dmamap_sync(sc->bge_dmatag,
3194: sc->bge_cdata.bge_rx_jumbo_map,
1.126 christos 3195: mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
1.125 bouyer 3196: BGE_JLEN, BUS_DMASYNC_POSTREAD);
1.1 fvdl 3197: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3198: ifp->if_ierrors++;
3199: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3200: continue;
3201: }
3202: if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
3203: NULL)== ENOBUFS) {
3204: ifp->if_ierrors++;
3205: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3206: continue;
3207: }
3208: } else {
3209: BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3210: m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1.124 bouyer 3211:
1.1 fvdl 3212: sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3213: stdcnt++;
3214: dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3215: sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
1.125 bouyer 3216: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3217: dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3218: bus_dmamap_unload(sc->bge_dmatag, dmamap);
1.1 fvdl 3219: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3220: ifp->if_ierrors++;
3221: bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3222: continue;
3223: }
3224: if (bge_newbuf_std(sc, sc->bge_std,
3225: NULL, dmamap) == ENOBUFS) {
3226: ifp->if_ierrors++;
3227: bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3228: continue;
3229: }
3230: }
3231:
3232: ifp->if_ipackets++;
1.37 jonathan 3233: #ifndef __NO_STRICT_ALIGNMENT
3234: /*
3235: * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
3236: * the Rx buffer has the layer-2 header unaligned.
3237: * If our CPU requires alignment, re-align by copying.
3238: */
1.157 msaitoh 3239: if (sc->bge_flags & BGE_RX_ALIGNBUG) {
1.127 tsutsui 3240: memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
1.37 jonathan 3241: cur_rx->bge_len);
3242: m->m_data += ETHER_ALIGN;
3243: }
3244: #endif
1.87 perry 3245:
1.54 fvdl 3246: m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
1.1 fvdl 3247: m->m_pkthdr.rcvif = ifp;
3248:
3249: /*
3250: * Handle BPF listeners. Let the BPF user see the packet.
3251: */
3252: if (ifp->if_bpf)
1.169 pooka 3253: bpf_ops->bpf_mtap(ifp->if_bpf, m);
1.1 fvdl 3254:
1.60 drochner 3255: m->m_pkthdr.csum_flags = M_CSUM_IPv4;
1.46 jonathan 3256:
3257: if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
3258: m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
3259: /*
3260: * Rx transport checksum-offload may also
3261: * have bugs with packets which, when transmitted,
3262: * were `runts' requiring padding.
3263: */
3264: if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3265: (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
3266: m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
3267: m->m_pkthdr.csum_data =
3268: cur_rx->bge_tcp_udp_csum;
3269: m->m_pkthdr.csum_flags |=
3270: (M_CSUM_TCPv4|M_CSUM_UDPv4|
3271: M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
1.1 fvdl 3272: }
3273:
3274: /*
3275: * If we received a packet with a vlan tag, pass it
3276: * to vlan_input() instead of ether_input().
3277: */
1.150 dsl 3278: if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1.85 jdolecek 3279: VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
1.150 dsl 3280: }
1.1 fvdl 3281:
3282: (*ifp->if_input)(ifp, m);
3283: }
3284:
1.172 msaitoh 3285: sc->bge_rx_saved_considx = rx_cons;
1.151 cegger 3286: bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
1.1 fvdl 3287: if (stdcnt)
1.151 cegger 3288: bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1.1 fvdl 3289: if (jumbocnt)
1.151 cegger 3290: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1.1 fvdl 3291: }
3292:
1.104 thorpej 3293: static void
3294: bge_txeof(struct bge_softc *sc)
1.1 fvdl 3295: {
3296: struct bge_tx_bd *cur_tx = NULL;
3297: struct ifnet *ifp;
3298: struct txdmamap_pool_entry *dma;
3299: bus_addr_t offset, toff;
3300: bus_size_t tlen;
3301: int tosync;
3302: struct mbuf *m;
3303:
3304: ifp = &sc->ethercom.ec_if;
3305:
3306: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3307: offsetof(struct bge_ring_data, bge_status_block),
3308: sizeof (struct bge_status_block),
3309: BUS_DMASYNC_POSTREAD);
3310:
3311: offset = offsetof(struct bge_ring_data, bge_tx_ring);
1.87 perry 3312: tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
1.1 fvdl 3313: sc->bge_tx_saved_considx;
3314:
1.148 mlelstv 3315: #if NRND > 0
3316: if (tosync != 0 && RND_ENABLED(&sc->rnd_source))
3317: rnd_add_uint32(&sc->rnd_source, tosync);
3318: #endif
3319:
1.1 fvdl 3320: toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3321:
3322: if (tosync < 0) {
3323: tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3324: sizeof (struct bge_tx_bd);
3325: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3326: toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3327: tosync = -tosync;
3328: }
3329:
3330: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3331: offset, tosync * sizeof (struct bge_tx_bd),
3332: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3333:
3334: /*
3335: * Go through our tx ring and free mbufs for those
3336: * frames that have been sent.
3337: */
3338: while (sc->bge_tx_saved_considx !=
3339: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
1.170 msaitoh 3340: uint32_t idx = 0;
1.1 fvdl 3341:
3342: idx = sc->bge_tx_saved_considx;
3343: cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3344: if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3345: ifp->if_opackets++;
3346: m = sc->bge_cdata.bge_tx_chain[idx];
3347: if (m != NULL) {
3348: sc->bge_cdata.bge_tx_chain[idx] = NULL;
3349: dma = sc->txdma[idx];
3350: bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3351: dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3352: bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3353: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3354: sc->txdma[idx] = NULL;
3355:
3356: m_freem(m);
3357: }
3358: sc->bge_txcnt--;
3359: BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3360: ifp->if_timer = 0;
3361: }
3362:
3363: if (cur_tx != NULL)
3364: ifp->if_flags &= ~IFF_OACTIVE;
3365: }
3366:
1.104 thorpej 3367: static int
3368: bge_intr(void *xsc)
1.1 fvdl 3369: {
3370: struct bge_softc *sc;
3371: struct ifnet *ifp;
1.161 msaitoh 3372: uint32_t statusword;
1.1 fvdl 3373:
3374: sc = xsc;
3375: ifp = &sc->ethercom.ec_if;
3376:
1.161 msaitoh 3377: /* It is possible for the interrupt to arrive before
3378: * the status block is updated prior to the interrupt.
3379: * Reading the PCI State register will confirm whether the
3380: * interrupt is ours and will flush the status block.
3381: */
1.144 mlelstv 3382:
1.161 msaitoh 3383: /* read status word from status block */
3384: statusword = sc->bge_rdata->bge_status_block.bge_status;
1.144 mlelstv 3385:
1.161 msaitoh 3386: if ((statusword & BGE_STATFLAG_UPDATED) ||
3387: (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
3388: /* Ack interrupt and stop others from occuring. */
3389: bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
1.144 mlelstv 3390:
1.161 msaitoh 3391: BGE_EVCNT_INCR(sc->bge_ev_intr);
1.1 fvdl 3392:
1.161 msaitoh 3393: /* clear status word */
3394: sc->bge_rdata->bge_status_block.bge_status = 0;
1.72 thorpej 3395:
1.161 msaitoh 3396: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3397: statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3398: BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
3399: bge_link_upd(sc);
1.1 fvdl 3400:
1.161 msaitoh 3401: if (ifp->if_flags & IFF_RUNNING) {
3402: /* Check RX return ring producer/consumer */
3403: bge_rxeof(sc);
1.144 mlelstv 3404:
1.161 msaitoh 3405: /* Check TX ring producer/consumer */
3406: bge_txeof(sc);
1.1 fvdl 3407: }
3408:
1.161 msaitoh 3409: if (sc->bge_pending_rxintr_change) {
3410: uint32_t rx_ticks = sc->bge_rx_coal_ticks;
3411: uint32_t rx_bds = sc->bge_rx_max_coal_bds;
3412: uint32_t junk;
1.1 fvdl 3413:
1.161 msaitoh 3414: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
3415: DELAY(10);
3416: junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
1.1 fvdl 3417:
1.161 msaitoh 3418: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
3419: DELAY(10);
3420: junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
1.58 jonathan 3421:
1.161 msaitoh 3422: sc->bge_pending_rxintr_change = 0;
3423: }
3424: bge_handle_events(sc);
1.87 perry 3425:
1.161 msaitoh 3426: /* Re-enable interrupts. */
3427: bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
1.58 jonathan 3428:
1.161 msaitoh 3429: if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
3430: bge_start(ifp);
1.1 fvdl 3431:
1.170 msaitoh 3432: return 1;
1.161 msaitoh 3433: } else
1.170 msaitoh 3434: return 0;
1.1 fvdl 3435: }
3436:
1.104 thorpej 3437: static void
3438: bge_tick(void *xsc)
1.1 fvdl 3439: {
3440: struct bge_softc *sc = xsc;
3441: struct mii_data *mii = &sc->bge_mii;
3442: int s;
3443:
3444: s = splnet();
3445:
1.172 msaitoh 3446: if (BGE_IS_5705_PLUS(sc))
3447: bge_stats_update_regs(sc);
3448: else
3449: bge_stats_update(sc);
1.1 fvdl 3450:
1.157 msaitoh 3451: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.161 msaitoh 3452: /*
3453: * Since in TBI mode auto-polling can't be used we should poll
3454: * link status manually. Here we register pending link event
3455: * and trigger interrupt.
3456: */
3457: BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3458: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3459: } else {
3460: /*
3461: * Do not touch PHY if we have link up. This could break
3462: * IPMI/ASF mode or produce extra input errors.
3463: * (extra input errors was reported for bcm5701 & bcm5704).
3464: */
3465: if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3466: mii_tick(mii);
3467: }
3468:
3469: callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
1.1 fvdl 3470:
3471: splx(s);
3472: }
3473:
1.104 thorpej 3474: static void
1.172 msaitoh 3475: bge_stats_update_regs(struct bge_softc *sc)
3476: {
3477: struct ifnet *ifp = &sc->ethercom.ec_if;
3478:
3479: ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3480: offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3481:
3482: ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3483: ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3484: ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3485: }
3486:
3487: static void
1.104 thorpej 3488: bge_stats_update(struct bge_softc *sc)
1.1 fvdl 3489: {
3490: struct ifnet *ifp = &sc->ethercom.ec_if;
3491: bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
1.44 hannken 3492:
1.1 fvdl 3493: #define READ_STAT(sc, stats, stat) \
3494: CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3495:
3496: ifp->if_collisions +=
3497: (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3498: READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3499: READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3500: READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3501: ifp->if_collisions;
3502:
1.72 thorpej 3503: BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3504: READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3505: BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3506: READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3507: BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3508: READ_STAT(sc, stats,
3509: xoffPauseFramesReceived.bge_addr_lo));
3510: BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3511: READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3512: BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3513: READ_STAT(sc, stats,
3514: macControlFramesReceived.bge_addr_lo));
3515: BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3516: READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3517:
1.1 fvdl 3518: #undef READ_STAT
3519:
3520: #ifdef notdef
3521: ifp->if_collisions +=
3522: (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3523: sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3524: sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3525: sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3526: ifp->if_collisions;
3527: #endif
3528: }
3529:
1.46 jonathan 3530: /*
3531: * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3532: * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3533: * but when such padded frames employ the bge IP/TCP checksum offload,
3534: * the hardware checksum assist gives incorrect results (possibly
3535: * from incorporating its own padding into the UDP/TCP checksum; who knows).
3536: * If we pad such runts with zeros, the onboard checksum comes out correct.
3537: */
1.102 perry 3538: static inline int
1.46 jonathan 3539: bge_cksum_pad(struct mbuf *pkt)
3540: {
3541: struct mbuf *last = NULL;
3542: int padlen;
3543:
3544: padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3545:
3546: /* if there's only the packet-header and we can pad there, use it. */
3547: if (pkt->m_pkthdr.len == pkt->m_len &&
1.113 tsutsui 3548: M_TRAILINGSPACE(pkt) >= padlen) {
1.46 jonathan 3549: last = pkt;
3550: } else {
3551: /*
3552: * Walk packet chain to find last mbuf. We will either
1.87 perry 3553: * pad there, or append a new mbuf and pad it
1.46 jonathan 3554: * (thus perhaps avoiding the bcm5700 dma-min bug).
3555: */
3556: for (last = pkt; last->m_next != NULL; last = last->m_next) {
1.114 tsutsui 3557: continue; /* do nothing */
1.46 jonathan 3558: }
3559:
3560: /* `last' now points to last in chain. */
1.114 tsutsui 3561: if (M_TRAILINGSPACE(last) < padlen) {
1.46 jonathan 3562: /* Allocate new empty mbuf, pad it. Compact later. */
3563: struct mbuf *n;
3564: MGET(n, M_DONTWAIT, MT_DATA);
1.129 joerg 3565: if (n == NULL)
3566: return ENOBUFS;
1.46 jonathan 3567: n->m_len = 0;
3568: last->m_next = n;
3569: last = n;
3570: }
3571: }
3572:
1.114 tsutsui 3573: KDASSERT(!M_READONLY(last));
3574: KDASSERT(M_TRAILINGSPACE(last) >= padlen);
3575:
1.46 jonathan 3576: /* Now zero the pad area, to avoid the bge cksum-assist bug */
1.126 christos 3577: memset(mtod(last, char *) + last->m_len, 0, padlen);
1.46 jonathan 3578: last->m_len += padlen;
3579: pkt->m_pkthdr.len += padlen;
3580: return 0;
3581: }
1.45 jonathan 3582:
3583: /*
3584: * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3585: */
1.102 perry 3586: static inline int
1.45 jonathan 3587: bge_compact_dma_runt(struct mbuf *pkt)
3588: {
3589: struct mbuf *m, *prev;
3590: int totlen, prevlen;
3591:
3592: prev = NULL;
3593: totlen = 0;
3594: prevlen = -1;
3595:
3596: for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3597: int mlen = m->m_len;
3598: int shortfall = 8 - mlen ;
3599:
3600: totlen += mlen;
3601: if (mlen == 0) {
3602: continue;
3603: }
3604: if (mlen >= 8)
3605: continue;
3606:
3607: /* If we get here, mbuf data is too small for DMA engine.
3608: * Try to fix by shuffling data to prev or next in chain.
3609: * If that fails, do a compacting deep-copy of the whole chain.
3610: */
3611:
3612: /* Internal frag. If fits in prev, copy it there. */
1.113 tsutsui 3613: if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
1.115 tsutsui 3614: memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
1.45 jonathan 3615: prev->m_len += mlen;
3616: m->m_len = 0;
3617: /* XXX stitch chain */
3618: prev->m_next = m_free(m);
3619: m = prev;
3620: continue;
3621: }
1.113 tsutsui 3622: else if (m->m_next != NULL &&
1.45 jonathan 3623: M_TRAILINGSPACE(m) >= shortfall &&
3624: m->m_next->m_len >= (8 + shortfall)) {
3625: /* m is writable and have enough data in next, pull up. */
3626:
1.115 tsutsui 3627: memcpy(m->m_data + m->m_len, m->m_next->m_data,
3628: shortfall);
1.45 jonathan 3629: m->m_len += shortfall;
3630: m->m_next->m_len -= shortfall;
3631: m->m_next->m_data += shortfall;
3632: }
3633: else if (m->m_next == NULL || 1) {
3634: /* Got a runt at the very end of the packet.
3635: * borrow data from the tail of the preceding mbuf and
3636: * update its length in-place. (The original data is still
3637: * valid, so we can do this even if prev is not writable.)
3638: */
3639:
3640: /* if we'd make prev a runt, just move all of its data. */
3641: KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3642: KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
1.111 christos 3643:
1.45 jonathan 3644: if ((prev->m_len - shortfall) < 8)
3645: shortfall = prev->m_len;
1.87 perry 3646:
1.45 jonathan 3647: #ifdef notyet /* just do the safe slow thing for now */
3648: if (!M_READONLY(m)) {
3649: if (M_LEADINGSPACE(m) < shorfall) {
3650: void *m_dat;
3651: m_dat = (m->m_flags & M_PKTHDR) ?
3652: m->m_pktdat : m->dat;
3653: memmove(m_dat, mtod(m, void*), m->m_len);
3654: m->m_data = m_dat;
3655: }
3656: } else
3657: #endif /* just do the safe slow thing */
3658: {
3659: struct mbuf * n = NULL;
3660: int newprevlen = prev->m_len - shortfall;
3661:
3662: MGET(n, M_NOWAIT, MT_DATA);
3663: if (n == NULL)
3664: return ENOBUFS;
3665: KASSERT(m->m_len + shortfall < MLEN
3666: /*,
3667: ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3668:
3669: /* first copy the data we're stealing from prev */
1.115 tsutsui 3670: memcpy(n->m_data, prev->m_data + newprevlen,
3671: shortfall);
1.45 jonathan 3672:
3673: /* update prev->m_len accordingly */
3674: prev->m_len -= shortfall;
3675:
3676: /* copy data from runt m */
1.115 tsutsui 3677: memcpy(n->m_data + shortfall, m->m_data,
3678: m->m_len);
1.45 jonathan 3679:
3680: /* n holds what we stole from prev, plus m */
3681: n->m_len = shortfall + m->m_len;
3682:
3683: /* stitch n into chain and free m */
3684: n->m_next = m->m_next;
3685: prev->m_next = n;
3686: /* KASSERT(m->m_next == NULL); */
3687: m->m_next = NULL;
3688: m_free(m);
3689: m = n; /* for continuing loop */
3690: }
3691: }
3692: prevlen = m->m_len;
3693: }
3694: return 0;
3695: }
3696:
1.1 fvdl 3697: /*
3698: * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3699: * pointers to descriptors.
3700: */
1.104 thorpej 3701: static int
1.170 msaitoh 3702: bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1.1 fvdl 3703: {
3704: struct bge_tx_bd *f = NULL;
1.170 msaitoh 3705: uint32_t frag, cur;
3706: uint16_t csum_flags = 0;
3707: uint16_t txbd_tso_flags = 0;
1.1 fvdl 3708: struct txdmamap_pool_entry *dma;
3709: bus_dmamap_t dmamap;
3710: int i = 0;
1.29 itojun 3711: struct m_tag *mtag;
1.95 jonathan 3712: int use_tso, maxsegsize, error;
1.107 blymn 3713:
1.1 fvdl 3714: cur = frag = *txidx;
3715:
3716: if (m_head->m_pkthdr.csum_flags) {
3717: if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3718: csum_flags |= BGE_TXBDFLAG_IP_CSUM;
1.8 thorpej 3719: if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
1.1 fvdl 3720: csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3721: }
3722:
1.87 perry 3723: /*
1.46 jonathan 3724: * If we were asked to do an outboard checksum, and the NIC
3725: * has the bug where it sometimes adds in the Ethernet padding,
3726: * explicitly pad with zeros so the cksum will be correct either way.
3727: * (For now, do this for all chip versions, until newer
3728: * are confirmed to not require the workaround.)
3729: */
3730: if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3731: #ifdef notyet
3732: (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
1.87 perry 3733: #endif
1.46 jonathan 3734: m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3735: goto check_dma_bug;
3736:
1.170 msaitoh 3737: if (bge_cksum_pad(m_head) != 0)
1.46 jonathan 3738: return ENOBUFS;
3739:
3740: check_dma_bug:
1.157 msaitoh 3741: if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
1.29 itojun 3742: goto doit;
1.157 msaitoh 3743:
1.25 jonathan 3744: /*
3745: * bcm5700 Revision B silicon cannot handle DMA descriptors with
1.87 perry 3746: * less than eight bytes. If we encounter a teeny mbuf
1.25 jonathan 3747: * at the end of a chain, we can pad. Otherwise, copy.
3748: */
1.45 jonathan 3749: if (bge_compact_dma_runt(m_head) != 0)
3750: return ENOBUFS;
1.25 jonathan 3751:
3752: doit:
1.1 fvdl 3753: dma = SLIST_FIRST(&sc->txdma_list);
3754: if (dma == NULL)
3755: return ENOBUFS;
3756: dmamap = dma->dmamap;
3757:
3758: /*
1.95 jonathan 3759: * Set up any necessary TSO state before we start packing...
3760: */
3761: use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
3762: if (!use_tso) {
3763: maxsegsize = 0;
3764: } else { /* TSO setup */
3765: unsigned mss;
3766: struct ether_header *eh;
3767: unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
3768: struct mbuf * m0 = m_head;
3769: struct ip *ip;
3770: struct tcphdr *th;
3771: int iphl, hlen;
3772:
3773: /*
3774: * XXX It would be nice if the mbuf pkthdr had offset
3775: * fields for the protocol headers.
3776: */
3777:
3778: eh = mtod(m0, struct ether_header *);
3779: switch (htons(eh->ether_type)) {
3780: case ETHERTYPE_IP:
3781: offset = ETHER_HDR_LEN;
3782: break;
3783:
3784: case ETHERTYPE_VLAN:
3785: offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3786: break;
3787:
3788: default:
3789: /*
3790: * Don't support this protocol or encapsulation.
3791: */
1.170 msaitoh 3792: return ENOBUFS;
1.95 jonathan 3793: }
3794:
3795: /*
3796: * TCP/IP headers are in the first mbuf; we can do
3797: * this the easy way.
3798: */
3799: iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
3800: hlen = iphl + offset;
3801: if (__predict_false(m0->m_len <
3802: (hlen + sizeof(struct tcphdr)))) {
3803:
1.138 joerg 3804: aprint_debug_dev(sc->bge_dev,
3805: "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
3806: "not handled yet\n",
3807: m0->m_len, hlen+ sizeof(struct tcphdr));
1.95 jonathan 3808: #ifdef NOTYET
3809: /*
3810: * XXX jonathan@NetBSD.org: untested.
3811: * how to force this branch to be taken?
3812: */
3813: BGE_EVCNT_INCR(&sc->sc_ev_txtsopain);
3814:
3815: m_copydata(m0, offset, sizeof(ip), &ip);
3816: m_copydata(m0, hlen, sizeof(th), &th);
3817:
3818: ip.ip_len = 0;
3819:
3820: m_copyback(m0, hlen + offsetof(struct ip, ip_len),
3821: sizeof(ip.ip_len), &ip.ip_len);
3822:
3823: th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
3824: ip.ip_dst.s_addr, htons(IPPROTO_TCP));
3825:
3826: m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
3827: sizeof(th.th_sum), &th.th_sum);
3828:
3829: hlen += th.th_off << 2;
3830: iptcp_opt_words = hlen;
3831: #else
3832: /*
3833: * if_wm "hard" case not yet supported, can we not
3834: * mandate it out of existence?
3835: */
3836: (void) ip; (void)th; (void) ip_tcp_hlen;
3837:
3838: return ENOBUFS;
3839: #endif
3840: } else {
1.126 christos 3841: ip = (struct ip *) (mtod(m0, char *) + offset);
3842: th = (struct tcphdr *) (mtod(m0, char *) + hlen);
1.95 jonathan 3843: ip_tcp_hlen = iphl + (th->th_off << 2);
3844:
3845: /* Total IP/TCP options, in 32-bit words */
3846: iptcp_opt_words = (ip_tcp_hlen
3847: - sizeof(struct tcphdr)
3848: - sizeof(struct ip)) >> 2;
3849: }
3850: if (BGE_IS_5750_OR_BEYOND(sc)) {
3851: th->th_sum = 0;
3852: csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
3853: } else {
3854: /*
1.107 blymn 3855: * XXX jonathan@NetBSD.org: 5705 untested.
1.95 jonathan 3856: * Requires TSO firmware patch for 5701/5703/5704.
3857: */
3858: th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3859: ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3860: }
3861:
3862: mss = m_head->m_pkthdr.segsz;
1.107 blymn 3863: txbd_tso_flags |=
1.95 jonathan 3864: BGE_TXBDFLAG_CPU_PRE_DMA |
3865: BGE_TXBDFLAG_CPU_POST_DMA;
3866:
3867: /*
3868: * Our NIC TSO-assist assumes TSO has standard, optionless
3869: * IPv4 and TCP headers, which total 40 bytes. By default,
3870: * the NIC copies 40 bytes of IP/TCP header from the
3871: * supplied header into the IP/TCP header portion of
3872: * each post-TSO-segment. If the supplied packet has IP or
3873: * TCP options, we need to tell the NIC to copy those extra
3874: * bytes into each post-TSO header, in addition to the normal
3875: * 40-byte IP/TCP header (and to leave space accordingly).
3876: * Unfortunately, the driver encoding of option length
3877: * varies across different ASIC families.
3878: */
3879: tcp_seg_flags = 0;
3880: if (iptcp_opt_words) {
1.172 msaitoh 3881: if (BGE_IS_5705_PLUS(sc)) {
1.95 jonathan 3882: tcp_seg_flags =
3883: iptcp_opt_words << 11;
3884: } else {
3885: txbd_tso_flags |=
3886: iptcp_opt_words << 12;
3887: }
3888: }
3889: maxsegsize = mss | tcp_seg_flags;
3890: ip->ip_len = htons(mss + ip_tcp_hlen);
3891:
3892: } /* TSO setup */
3893:
3894: /*
1.1 fvdl 3895: * Start packing the mbufs in this chain into
3896: * the fragment pointers. Stop when we run out
3897: * of fragments or hit the end of the mbuf chain.
3898: */
1.95 jonathan 3899: error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3900: BUS_DMA_NOWAIT);
1.170 msaitoh 3901: if (error)
3902: return ENOBUFS;
1.118 tsutsui 3903: /*
3904: * Sanity check: avoid coming within 16 descriptors
3905: * of the end of the ring.
3906: */
3907: if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3908: BGE_TSO_PRINTF(("%s: "
3909: " dmamap_load_mbuf too close to ring wrap\n",
1.138 joerg 3910: device_xname(sc->bge_dev)));
1.118 tsutsui 3911: goto fail_unload;
3912: }
1.95 jonathan 3913:
3914: mtag = sc->ethercom.ec_nvlans ?
3915: m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
1.1 fvdl 3916:
1.6 thorpej 3917:
1.95 jonathan 3918: /* Iterate over dmap-map fragments. */
1.1 fvdl 3919: for (i = 0; i < dmamap->dm_nsegs; i++) {
3920: f = &sc->bge_rdata->bge_tx_ring[frag];
3921: if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3922: break;
1.107 blymn 3923:
1.172 msaitoh 3924: BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
1.1 fvdl 3925: f->bge_len = dmamap->dm_segs[i].ds_len;
1.95 jonathan 3926:
3927: /*
3928: * For 5751 and follow-ons, for TSO we must turn
3929: * off checksum-assist flag in the tx-descr, and
3930: * supply the ASIC-revision-specific encoding
3931: * of TSO flags and segsize.
3932: */
3933: if (use_tso) {
3934: if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) {
3935: f->bge_rsvd = maxsegsize;
3936: f->bge_flags = csum_flags | txbd_tso_flags;
3937: } else {
3938: f->bge_rsvd = 0;
3939: f->bge_flags =
3940: (csum_flags | txbd_tso_flags) & 0x0fff;
3941: }
3942: } else {
3943: f->bge_rsvd = 0;
3944: f->bge_flags = csum_flags;
3945: }
1.1 fvdl 3946:
1.28 itojun 3947: if (mtag != NULL) {
1.1 fvdl 3948: f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
1.85 jdolecek 3949: f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
1.1 fvdl 3950: } else {
3951: f->bge_vlan_tag = 0;
3952: }
3953: cur = frag;
3954: BGE_INC(frag, BGE_TX_RING_CNT);
3955: }
3956:
1.95 jonathan 3957: if (i < dmamap->dm_nsegs) {
3958: BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
1.138 joerg 3959: device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
1.118 tsutsui 3960: goto fail_unload;
1.95 jonathan 3961: }
1.1 fvdl 3962:
3963: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3964: BUS_DMASYNC_PREWRITE);
3965:
1.95 jonathan 3966: if (frag == sc->bge_tx_saved_considx) {
3967: BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
1.138 joerg 3968: device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
1.95 jonathan 3969:
1.118 tsutsui 3970: goto fail_unload;
1.95 jonathan 3971: }
1.1 fvdl 3972:
3973: sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3974: sc->bge_cdata.bge_tx_chain[cur] = m_head;
3975: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3976: sc->txdma[cur] = dma;
1.118 tsutsui 3977: sc->bge_txcnt += dmamap->dm_nsegs;
1.1 fvdl 3978:
3979: *txidx = frag;
3980:
1.170 msaitoh 3981: return 0;
1.118 tsutsui 3982:
1.158 msaitoh 3983: fail_unload:
1.118 tsutsui 3984: bus_dmamap_unload(sc->bge_dmatag, dmamap);
3985:
3986: return ENOBUFS;
1.1 fvdl 3987: }
3988:
3989: /*
3990: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3991: * to the mbuf data regions directly in the transmit descriptors.
3992: */
1.104 thorpej 3993: static void
3994: bge_start(struct ifnet *ifp)
1.1 fvdl 3995: {
3996: struct bge_softc *sc;
3997: struct mbuf *m_head = NULL;
1.170 msaitoh 3998: uint32_t prodidx;
1.1 fvdl 3999: int pkts = 0;
4000:
4001: sc = ifp->if_softc;
4002:
1.131 mlelstv 4003: if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1.1 fvdl 4004: return;
4005:
1.94 jonathan 4006: prodidx = sc->bge_tx_prodidx;
1.1 fvdl 4007:
1.170 msaitoh 4008: while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
1.1 fvdl 4009: IFQ_POLL(&ifp->if_snd, m_head);
4010: if (m_head == NULL)
4011: break;
4012:
4013: #if 0
4014: /*
4015: * XXX
4016: * safety overkill. If this is a fragmented packet chain
4017: * with delayed TCP/UDP checksums, then only encapsulate
4018: * it if we have enough descriptors to handle the entire
4019: * chain at once.
4020: * (paranoia -- may not actually be needed)
4021: */
4022: if (m_head->m_flags & M_FIRSTFRAG &&
4023: m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4024: if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
1.86 thorpej 4025: M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
1.1 fvdl 4026: ifp->if_flags |= IFF_OACTIVE;
4027: break;
4028: }
4029: }
4030: #endif
4031:
4032: /*
4033: * Pack the data into the transmit ring. If we
4034: * don't have room, set the OACTIVE flag and wait
4035: * for the NIC to drain the ring.
4036: */
4037: if (bge_encap(sc, m_head, &prodidx)) {
4038: ifp->if_flags |= IFF_OACTIVE;
4039: break;
4040: }
4041:
4042: /* now we are committed to transmit the packet */
4043: IFQ_DEQUEUE(&ifp->if_snd, m_head);
4044: pkts++;
4045:
4046: /*
4047: * If there's a BPF listener, bounce a copy of this frame
4048: * to him.
4049: */
4050: if (ifp->if_bpf)
1.169 pooka 4051: bpf_ops->bpf_mtap(ifp->if_bpf, m_head);
1.1 fvdl 4052: }
4053: if (pkts == 0)
4054: return;
4055:
4056: /* Transmit */
1.151 cegger 4057: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
1.158 msaitoh 4058: /* 5700 b2 errata */
4059: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 4060: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
1.1 fvdl 4061:
1.94 jonathan 4062: sc->bge_tx_prodidx = prodidx;
4063:
1.1 fvdl 4064: /*
4065: * Set a timeout in case the chip goes out to lunch.
4066: */
4067: ifp->if_timer = 5;
4068: }
4069:
1.104 thorpej 4070: static int
4071: bge_init(struct ifnet *ifp)
1.1 fvdl 4072: {
4073: struct bge_softc *sc = ifp->if_softc;
1.170 msaitoh 4074: const uint16_t *m;
1.142 dyoung 4075: int s, error = 0;
1.1 fvdl 4076:
4077: s = splnet();
4078:
4079: ifp = &sc->ethercom.ec_if;
4080:
4081: /* Cancel pending I/O and flush buffers. */
1.141 jmcneill 4082: bge_stop(ifp, 0);
1.1 fvdl 4083: bge_reset(sc);
4084: bge_chipinit(sc);
4085:
4086: /*
4087: * Init the various state machines, ring
4088: * control blocks and firmware.
4089: */
4090: error = bge_blockinit(sc);
4091: if (error != 0) {
1.138 joerg 4092: aprint_error_dev(sc->bge_dev, "initialization error %d\n",
1.1 fvdl 4093: error);
4094: splx(s);
4095: return error;
4096: }
4097:
4098: ifp = &sc->ethercom.ec_if;
4099:
4100: /* Specify MTU. */
4101: CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
1.107 blymn 4102: ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
1.1 fvdl 4103:
4104: /* Load our MAC address. */
1.170 msaitoh 4105: m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
1.1 fvdl 4106: CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4107: CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4108:
4109: /* Enable or disable promiscuous mode as needed. */
4110: if (ifp->if_flags & IFF_PROMISC) {
4111: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4112: } else {
4113: BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4114: }
4115:
4116: /* Program multicast filter. */
4117: bge_setmulti(sc);
4118:
4119: /* Init RX ring. */
4120: bge_init_rx_ring_std(sc);
4121:
1.161 msaitoh 4122: /*
4123: * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4124: * memory to insure that the chip has in fact read the first
4125: * entry of the ring.
4126: */
4127: if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
1.170 msaitoh 4128: uint32_t v, i;
1.161 msaitoh 4129: for (i = 0; i < 10; i++) {
4130: DELAY(20);
4131: v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4132: if (v == (MCLBYTES - ETHER_ALIGN))
4133: break;
4134: }
4135: if (i == 10)
4136: aprint_error_dev(sc->bge_dev,
4137: "5705 A0 chip failed to load RX ring\n");
4138: }
4139:
1.1 fvdl 4140: /* Init jumbo RX ring. */
4141: if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
4142: bge_init_rx_ring_jumbo(sc);
4143:
4144: /* Init our RX return ring index */
4145: sc->bge_rx_saved_considx = 0;
4146:
4147: /* Init TX ring. */
4148: bge_init_tx_ring(sc);
4149:
4150: /* Turn on transmitter */
4151: BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4152:
4153: /* Turn on receiver */
4154: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4155:
1.71 thorpej 4156: CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4157:
1.1 fvdl 4158: /* Tell firmware we're alive. */
4159: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4160:
4161: /* Enable host interrupts. */
4162: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4163: BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
1.151 cegger 4164: bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
1.1 fvdl 4165:
1.142 dyoung 4166: if ((error = bge_ifmedia_upd(ifp)) != 0)
4167: goto out;
1.1 fvdl 4168:
4169: ifp->if_flags |= IFF_RUNNING;
4170: ifp->if_flags &= ~IFF_OACTIVE;
4171:
1.142 dyoung 4172: callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
4173:
4174: out:
1.1 fvdl 4175: splx(s);
4176:
1.142 dyoung 4177: return error;
1.1 fvdl 4178: }
4179:
4180: /*
4181: * Set media options.
4182: */
1.104 thorpej 4183: static int
4184: bge_ifmedia_upd(struct ifnet *ifp)
1.1 fvdl 4185: {
4186: struct bge_softc *sc = ifp->if_softc;
4187: struct mii_data *mii = &sc->bge_mii;
4188: struct ifmedia *ifm = &sc->bge_ifmedia;
1.142 dyoung 4189: int rc;
1.1 fvdl 4190:
4191: /* If this is a 1000baseX NIC, enable the TBI port. */
1.157 msaitoh 4192: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.1 fvdl 4193: if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1.170 msaitoh 4194: return EINVAL;
4195: switch (IFM_SUBTYPE(ifm->ifm_media)) {
1.1 fvdl 4196: case IFM_AUTO:
1.161 msaitoh 4197: /*
4198: * The BCM5704 ASIC appears to have a special
4199: * mechanism for programming the autoneg
4200: * advertisement registers in TBI mode.
4201: */
4202: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1.170 msaitoh 4203: uint32_t sgdig;
1.161 msaitoh 4204: sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4205: if (sgdig & BGE_SGDIGSTS_DONE) {
4206: CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4207: sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4208: sgdig |= BGE_SGDIGCFG_AUTO |
4209: BGE_SGDIGCFG_PAUSE_CAP |
4210: BGE_SGDIGCFG_ASYM_PAUSE;
4211: CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4212: sgdig | BGE_SGDIGCFG_SEND);
4213: DELAY(5);
4214: CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4215: }
4216: }
1.1 fvdl 4217: break;
4218: case IFM_1000_SX:
4219: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4220: BGE_CLRBIT(sc, BGE_MAC_MODE,
4221: BGE_MACMODE_HALF_DUPLEX);
4222: } else {
4223: BGE_SETBIT(sc, BGE_MAC_MODE,
4224: BGE_MACMODE_HALF_DUPLEX);
4225: }
4226: break;
4227: default:
1.170 msaitoh 4228: return EINVAL;
1.1 fvdl 4229: }
1.69 thorpej 4230: /* XXX 802.3x flow control for 1000BASE-SX */
1.170 msaitoh 4231: return 0;
1.1 fvdl 4232: }
4233:
1.161 msaitoh 4234: BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
1.142 dyoung 4235: if ((rc = mii_mediachg(mii)) == ENXIO)
4236: return 0;
1.161 msaitoh 4237:
4238: /*
4239: * Force an interrupt so that we will call bge_link_upd
4240: * if needed and clear any pending link state attention.
4241: * Without this we are not getting any further interrupts
4242: * for link state changes and thus will not UP the link and
4243: * not be able to send in bge_start. The only way to get
4244: * things working was to receive a packet and get a RX intr.
4245: */
4246: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4247: sc->bge_flags & BGE_IS_5788)
4248: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4249: else
4250: BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4251:
1.142 dyoung 4252: return rc;
1.1 fvdl 4253: }
4254:
4255: /*
4256: * Report current media status.
4257: */
1.104 thorpej 4258: static void
4259: bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1.1 fvdl 4260: {
4261: struct bge_softc *sc = ifp->if_softc;
4262: struct mii_data *mii = &sc->bge_mii;
4263:
1.157 msaitoh 4264: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.1 fvdl 4265: ifmr->ifm_status = IFM_AVALID;
4266: ifmr->ifm_active = IFM_ETHER;
4267: if (CSR_READ_4(sc, BGE_MAC_STS) &
4268: BGE_MACSTAT_TBI_PCS_SYNCHED)
4269: ifmr->ifm_status |= IFM_ACTIVE;
4270: ifmr->ifm_active |= IFM_1000_SX;
4271: if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4272: ifmr->ifm_active |= IFM_HDX;
4273: else
4274: ifmr->ifm_active |= IFM_FDX;
4275: return;
4276: }
4277:
4278: mii_pollstat(mii);
4279: ifmr->ifm_status = mii->mii_media_status;
1.69 thorpej 4280: ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4281: sc->bge_flowflags;
1.1 fvdl 4282: }
4283:
1.104 thorpej 4284: static int
1.126 christos 4285: bge_ioctl(struct ifnet *ifp, u_long command, void *data)
1.1 fvdl 4286: {
4287: struct bge_softc *sc = ifp->if_softc;
4288: struct ifreq *ifr = (struct ifreq *) data;
4289: int s, error = 0;
4290: struct mii_data *mii;
4291:
4292: s = splnet();
4293:
1.170 msaitoh 4294: switch (command) {
1.1 fvdl 4295: case SIOCSIFFLAGS:
1.153 dyoung 4296: if ((error = ifioctl_common(ifp, command, data)) != 0)
4297: break;
1.1 fvdl 4298: if (ifp->if_flags & IFF_UP) {
4299: /*
4300: * If only the state of the PROMISC flag changed,
4301: * then just use the 'set promisc mode' command
4302: * instead of reinitializing the entire NIC. Doing
4303: * a full re-init means reloading the firmware and
4304: * waiting for it to start up, which may take a
4305: * second or two.
4306: */
4307: if (ifp->if_flags & IFF_RUNNING &&
4308: ifp->if_flags & IFF_PROMISC &&
4309: !(sc->bge_if_flags & IFF_PROMISC)) {
4310: BGE_SETBIT(sc, BGE_RX_MODE,
4311: BGE_RXMODE_RX_PROMISC);
4312: } else if (ifp->if_flags & IFF_RUNNING &&
4313: !(ifp->if_flags & IFF_PROMISC) &&
4314: sc->bge_if_flags & IFF_PROMISC) {
4315: BGE_CLRBIT(sc, BGE_RX_MODE,
4316: BGE_RXMODE_RX_PROMISC);
1.103 rpaulo 4317: } else if (!(sc->bge_if_flags & IFF_UP))
1.1 fvdl 4318: bge_init(ifp);
4319: } else {
1.141 jmcneill 4320: if (ifp->if_flags & IFF_RUNNING)
4321: bge_stop(ifp, 1);
1.1 fvdl 4322: }
4323: sc->bge_if_flags = ifp->if_flags;
4324: error = 0;
4325: break;
4326: case SIOCSIFMEDIA:
1.69 thorpej 4327: /* XXX Flow control is not supported for 1000BASE-SX */
1.157 msaitoh 4328: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.69 thorpej 4329: ifr->ifr_media &= ~IFM_ETH_FMASK;
4330: sc->bge_flowflags = 0;
4331: }
4332:
4333: /* Flow control requires full-duplex mode. */
4334: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4335: (ifr->ifr_media & IFM_FDX) == 0) {
4336: ifr->ifr_media &= ~IFM_ETH_FMASK;
4337: }
4338: if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4339: if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1.157 msaitoh 4340: /* We can do both TXPAUSE and RXPAUSE. */
1.69 thorpej 4341: ifr->ifr_media |=
4342: IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4343: }
4344: sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4345: }
4346: /* FALLTHROUGH */
1.1 fvdl 4347: case SIOCGIFMEDIA:
1.157 msaitoh 4348: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1.1 fvdl 4349: error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4350: command);
4351: } else {
4352: mii = &sc->bge_mii;
4353: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4354: command);
4355: }
4356: break;
4357: default:
1.152 tron 4358: if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4359: break;
4360:
4361: error = 0;
4362:
4363: if (command != SIOCADDMULTI && command != SIOCDELMULTI)
4364: ;
4365: else if (ifp->if_flags & IFF_RUNNING)
4366: bge_setmulti(sc);
1.1 fvdl 4367: break;
4368: }
4369:
4370: splx(s);
4371:
1.170 msaitoh 4372: return error;
1.1 fvdl 4373: }
4374:
1.104 thorpej 4375: static void
4376: bge_watchdog(struct ifnet *ifp)
1.1 fvdl 4377: {
4378: struct bge_softc *sc;
4379:
4380: sc = ifp->if_softc;
4381:
1.138 joerg 4382: aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
1.1 fvdl 4383:
4384: ifp->if_flags &= ~IFF_RUNNING;
4385: bge_init(ifp);
4386:
4387: ifp->if_oerrors++;
4388: }
4389:
1.11 thorpej 4390: static void
4391: bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
4392: {
4393: int i;
4394:
4395: BGE_CLRBIT(sc, reg, bit);
4396:
4397: for (i = 0; i < BGE_TIMEOUT; i++) {
4398: if ((CSR_READ_4(sc, reg) & bit) == 0)
4399: return;
4400: delay(100);
1.157 msaitoh 4401: if (sc->bge_flags & BGE_PCIE)
1.95 jonathan 4402: DELAY(1000);
1.11 thorpej 4403: }
4404:
1.165 msaitoh 4405: /*
4406: * Doesn't print only when the register is BGE_SRS_MODE. It occurs
4407: * on some environment (and once after boot?)
4408: */
4409: if (reg != BGE_SRS_MODE)
4410: aprint_error_dev(sc->bge_dev,
4411: "block failed to stop: reg 0x%lx, bit 0x%08x\n",
4412: (u_long)reg, bit);
1.11 thorpej 4413: }
4414:
1.1 fvdl 4415: /*
4416: * Stop the adapter and free any mbufs allocated to the
4417: * RX and TX lists.
4418: */
1.104 thorpej 4419: static void
1.141 jmcneill 4420: bge_stop(struct ifnet *ifp, int disable)
1.1 fvdl 4421: {
1.141 jmcneill 4422: struct bge_softc *sc = ifp->if_softc;
1.1 fvdl 4423:
4424: callout_stop(&sc->bge_timeout);
4425:
4426: /*
4427: * Disable all of the receiver blocks
4428: */
1.11 thorpej 4429: bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4430: bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4431: bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1.172 msaitoh 4432: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 4433: bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1.11 thorpej 4434: bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4435: bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4436: bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
1.1 fvdl 4437:
4438: /*
4439: * Disable all of the transmit blocks
4440: */
1.11 thorpej 4441: bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4442: bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4443: bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4444: bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4445: bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1.172 msaitoh 4446: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 4447: bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1.11 thorpej 4448: bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1.1 fvdl 4449:
4450: /*
4451: * Shut down all of the memory managers and related
4452: * state machines.
4453: */
1.11 thorpej 4454: bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4455: bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
1.172 msaitoh 4456: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 4457: bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1.11 thorpej 4458:
1.1 fvdl 4459: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4460: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1.11 thorpej 4461:
1.172 msaitoh 4462: if (BGE_IS_5700_FAMILY(sc)) {
1.44 hannken 4463: bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4464: bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4465: }
1.1 fvdl 4466:
4467: /* Disable host interrupts. */
4468: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
1.151 cegger 4469: bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
1.1 fvdl 4470:
4471: /*
4472: * Tell firmware we're shutting down.
4473: */
4474: BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4475:
4476: /* Free the RX lists. */
4477: bge_free_rx_ring_std(sc);
4478:
4479: /* Free jumbo RX list. */
1.172 msaitoh 4480: if (BGE_IS_JUMBO_CAPABLE(sc))
4481: bge_free_rx_ring_jumbo(sc);
1.1 fvdl 4482:
4483: /* Free TX buffers. */
4484: bge_free_tx_ring(sc);
4485:
4486: /*
4487: * Isolate/power down the PHY.
4488: */
1.157 msaitoh 4489: if (!(sc->bge_flags & BGE_PHY_FIBER_TBI))
1.1 fvdl 4490: mii_down(&sc->bge_mii);
4491:
1.161 msaitoh 4492: sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
1.1 fvdl 4493:
1.161 msaitoh 4494: /* Clear MAC's link state (PHY may still have link UP). */
4495: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1.1 fvdl 4496:
4497: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4498: }
4499:
1.161 msaitoh 4500: static void
4501: bge_link_upd(struct bge_softc *sc)
4502: {
4503: struct ifnet *ifp = &sc->ethercom.ec_if;
4504: struct mii_data *mii = &sc->bge_mii;
1.170 msaitoh 4505: uint32_t status;
1.161 msaitoh 4506: int link;
4507:
4508: /* Clear 'pending link event' flag */
4509: BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4510:
4511: /*
4512: * Process link state changes.
4513: * Grrr. The link status word in the status block does
4514: * not work correctly on the BCM5700 rev AX and BX chips,
4515: * according to all available information. Hence, we have
4516: * to enable MII interrupts in order to properly obtain
4517: * async link changes. Unfortunately, this also means that
4518: * we have to read the MAC status register to detect link
4519: * changes, thereby adding an additional register access to
4520: * the interrupt handler.
4521: */
4522:
4523: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4524: status = CSR_READ_4(sc, BGE_MAC_STS);
4525: if (status & BGE_MACSTAT_MI_INTERRUPT) {
4526: mii_pollstat(mii);
4527:
4528: if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4529: mii->mii_media_status & IFM_ACTIVE &&
4530: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4531: BGE_STS_SETBIT(sc, BGE_STS_LINK);
4532: else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4533: (!(mii->mii_media_status & IFM_ACTIVE) ||
4534: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4535: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4536:
4537: /* Clear the interrupt */
4538: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4539: BGE_EVTENB_MI_INTERRUPT);
4540: bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4541: bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4542: BRGPHY_INTRS);
4543: }
4544: return;
4545: }
4546:
4547: if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
4548: status = CSR_READ_4(sc, BGE_MAC_STS);
4549: if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4550: if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4551: BGE_STS_SETBIT(sc, BGE_STS_LINK);
4552: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4553: BGE_CLRBIT(sc, BGE_MAC_MODE,
4554: BGE_MACMODE_TBI_SEND_CFGS);
4555: CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4556: if_link_state_change(ifp, LINK_STATE_UP);
4557: }
4558: } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4559: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4560: if_link_state_change(ifp, LINK_STATE_DOWN);
4561: }
4562: /*
4563: * Discard link events for MII/GMII cards if MI auto-polling disabled.
4564: * This should not happen since mii callouts are locked now, but
4565: * we keep this check for debug.
4566: */
4567: } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4568: /*
4569: * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
4570: * bit in status word always set. Workaround this bug by
4571: * reading PHY link status directly.
4572: */
4573: link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4574: BGE_STS_LINK : 0;
4575:
4576: if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4577: mii_pollstat(mii);
4578:
4579: if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4580: mii->mii_media_status & IFM_ACTIVE &&
4581: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4582: BGE_STS_SETBIT(sc, BGE_STS_LINK);
4583: else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4584: (!(mii->mii_media_status & IFM_ACTIVE) ||
4585: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4586: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4587: }
4588: }
4589:
4590: /* Clear the attention */
4591: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4592: BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4593: BGE_MACSTAT_LINK_CHANGED);
4594: }
4595:
1.64 jonathan 4596: static int
4597: sysctl_bge_verify(SYSCTLFN_ARGS)
4598: {
4599: int error, t;
4600: struct sysctlnode node;
4601:
4602: node = *rnode;
4603: t = *(int*)rnode->sysctl_data;
4604: node.sysctl_data = &t;
4605: error = sysctl_lookup(SYSCTLFN_CALL(&node));
4606: if (error || newp == NULL)
1.170 msaitoh 4607: return error;
1.64 jonathan 4608:
4609: #if 0
4610: DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
4611: node.sysctl_num, rnode->sysctl_num));
4612: #endif
4613:
4614: if (node.sysctl_num == bge_rxthresh_nodenum) {
4615: if (t < 0 || t >= NBGE_RX_THRESH)
1.170 msaitoh 4616: return EINVAL;
1.64 jonathan 4617: bge_update_all_threshes(t);
4618: } else
1.170 msaitoh 4619: return EINVAL;
1.64 jonathan 4620:
4621: *(int*)rnode->sysctl_data = t;
4622:
1.170 msaitoh 4623: return 0;
1.64 jonathan 4624: }
4625:
4626: /*
1.65 atatat 4627: * Set up sysctl(3) MIB, hw.bge.*.
1.64 jonathan 4628: *
4629: * TBD condition SYSCTL_PERMANENT on being an LKM or not
4630: */
4631: SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
4632: {
1.66 atatat 4633: int rc, bge_root_num;
1.90 atatat 4634: const struct sysctlnode *node;
1.64 jonathan 4635:
4636: if ((rc = sysctl_createv(clog, 0, NULL, NULL,
4637: CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
4638: NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
4639: goto err;
4640: }
4641:
4642: if ((rc = sysctl_createv(clog, 0, NULL, &node,
1.73 atatat 4643: CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge",
4644: SYSCTL_DESCR("BGE interface controls"),
1.64 jonathan 4645: NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
4646: goto err;
4647: }
4648:
1.66 atatat 4649: bge_root_num = node->sysctl_num;
4650:
1.64 jonathan 4651: /* BGE Rx interrupt mitigation level */
1.87 perry 4652: if ((rc = sysctl_createv(clog, 0, NULL, &node,
1.64 jonathan 4653: CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1.73 atatat 4654: CTLTYPE_INT, "rx_lvl",
4655: SYSCTL_DESCR("BGE receive interrupt mitigation level"),
4656: sysctl_bge_verify, 0,
1.64 jonathan 4657: &bge_rx_thresh_lvl,
1.66 atatat 4658: 0, CTL_HW, bge_root_num, CTL_CREATE,
1.64 jonathan 4659: CTL_EOL)) != 0) {
4660: goto err;
4661: }
4662:
4663: bge_rxthresh_nodenum = node->sysctl_num;
4664:
4665: return;
4666:
4667: err:
1.138 joerg 4668: aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
1.64 jonathan 4669: }
1.151 cegger 4670:
1.172 msaitoh 4671: #ifdef BGE_DEBUG
4672: void
4673: bge_debug_info(struct bge_softc *sc)
4674: {
4675:
4676: printf("Hardware Flags:\n");
4677: if (BGE_IS_5755_PLUS(sc))
4678: printf(" - 5755 Plus\n");
4679: if (BGE_IS_5750_OR_BEYOND(sc))
4680: printf(" - 5750 Plus\n");
4681: if (BGE_IS_5705_PLUS(sc))
4682: printf(" - 5705 Plus\n");
4683: if (BGE_IS_5714_FAMILY(sc))
4684: printf(" - 5714 Family\n");
4685: if (BGE_IS_5700_FAMILY(sc))
4686: printf(" - 5700 Family\n");
4687: if (sc->bge_flags & BGE_IS_5788)
4688: printf(" - 5788\n");
4689: if (sc->bge_flags & BGE_JUMBO_CAPABLE)
4690: printf(" - Supports Jumbo Frames\n");
4691: if (sc->bge_flags & BGE_NO_EEPROM)
1.173 msaitoh 4692: printf(" - No EEPROM\n");
1.172 msaitoh 4693: if (sc->bge_flags & BGE_PCIX)
4694: printf(" - PCI-X Bus\n");
4695: if (sc->bge_flags & BGE_PCIE)
4696: printf(" - PCI Express Bus\n");
4697: if (sc->bge_flags & BGE_NO_3LED)
4698: printf(" - No 3 LEDs\n");
4699: if (sc->bge_flags & BGE_RX_ALIGNBUG)
4700: printf(" - RX Alignment Bug\n");
4701: if (sc->bge_flags & BGE_TSO)
4702: printf(" - TSO\n");
4703: }
4704: #endif /* BGE_DEBUG */
4705:
4706: static int
4707: bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4708: {
4709: prop_dictionary_t dict;
4710: prop_data_t ea;
4711:
4712: if ((sc->bge_flags & BGE_NO_EEPROM) == 0)
4713: return 1;
4714:
4715: dict = device_properties(sc->bge_dev);
4716: ea = prop_dictionary_get(dict, "mac-address");
4717: if (ea != NULL) {
4718: KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
4719: KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
4720: memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
4721: return 0;
4722: }
4723:
4724: return 1;
4725: }
4726:
1.151 cegger 4727: static int
1.170 msaitoh 4728: bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 4729: {
1.170 msaitoh 4730: uint32_t mac_addr;
1.151 cegger 4731:
4732: mac_addr = bge_readmem_ind(sc, 0x0c14);
4733: if ((mac_addr >> 16) == 0x484b) {
4734: ether_addr[0] = (uint8_t)(mac_addr >> 8);
4735: ether_addr[1] = (uint8_t)mac_addr;
4736: mac_addr = bge_readmem_ind(sc, 0x0c18);
4737: ether_addr[2] = (uint8_t)(mac_addr >> 24);
4738: ether_addr[3] = (uint8_t)(mac_addr >> 16);
4739: ether_addr[4] = (uint8_t)(mac_addr >> 8);
4740: ether_addr[5] = (uint8_t)mac_addr;
1.170 msaitoh 4741: return 0;
1.151 cegger 4742: }
1.170 msaitoh 4743: return 1;
1.151 cegger 4744: }
4745:
4746: static int
1.170 msaitoh 4747: bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 4748: {
4749: int mac_offset = BGE_EE_MAC_OFFSET;
4750:
4751: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4752: mac_offset = BGE_EE_MAC_OFFSET_5906;
4753: }
4754:
4755: return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4756: ETHER_ADDR_LEN));
4757: }
4758:
4759: static int
1.170 msaitoh 4760: bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 4761: {
4762:
1.170 msaitoh 4763: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4764: return 1;
1.151 cegger 4765:
4766: return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4767: ETHER_ADDR_LEN));
4768: }
4769:
4770: static int
1.170 msaitoh 4771: bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
1.151 cegger 4772: {
4773: static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4774: /* NOTE: Order is critical */
1.172 msaitoh 4775: bge_get_eaddr_fw,
1.151 cegger 4776: bge_get_eaddr_mem,
4777: bge_get_eaddr_nvram,
4778: bge_get_eaddr_eeprom,
4779: NULL
4780: };
4781: const bge_eaddr_fcn_t *func;
4782:
4783: for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4784: if ((*func)(sc, eaddr) == 0)
4785: break;
4786: }
4787: return (*func == NULL ? ENXIO : 0);
4788: }
CVSweb <webmaster@jp.NetBSD.org>