Annotation of src/sys/dev/pci/if_bge.c, Revision 1.378
1.378 ! skrll 1: /* $NetBSD: if_bge.c,v 1.377 2022/08/14 09:04:17 skrll Exp $ */
1.8 thorpej 2:
1.1 fvdl 3: /*
4: * Copyright (c) 2001 Wind River Systems
5: * Copyright (c) 1997, 1998, 1999, 2001
6: * Bill Paul <wpaul@windriver.com>. All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Bill Paul.
19: * 4. Neither the name of the author nor the names of any co-contributors
20: * may be used to endorse or promote products derived from this software
21: * without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33: * THE POSSIBILITY OF SUCH DAMAGE.
34: *
35: * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36: */
37:
38: /*
1.12 thorpej 39: * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
1.1 fvdl 40: *
1.12 thorpej 41: * NetBSD version by:
42: *
43: * Frank van der Linden <fvdl@wasabisystems.com>
44: * Jason Thorpe <thorpej@wasabisystems.com>
1.32 tron 45: * Jonathan Stone <jonathan@dsg.stanford.edu>
1.12 thorpej 46: *
47: * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com>
1.1 fvdl 48: * Senior Engineer, Wind River Systems
49: */
50:
51: /*
52: * The Broadcom BCM5700 is based on technology originally developed by
53: * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
1.203 msaitoh 54: * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
1.1 fvdl 55: * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56: * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57: * frames, highly configurable RX filtering, and 16 RX and TX queues
58: * (which, along with RX filter rules, can be used for QOS applications).
59: * Other features, such as TCP segmentation, may be available as part
60: * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61: * firmware images can be stored in hardware and need not be compiled
62: * into the driver.
63: *
64: * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
1.33 tsutsui 65: * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
1.1 fvdl 66: *
67: * The BCM5701 is a single-chip solution incorporating both the BCM5700
1.25 jonathan 68: * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
1.1 fvdl 69: * does not support external SSRAM.
70: *
71: * Broadcom also produces a variation of the BCM5700 under the "Altima"
72: * brand name, which is functionally similar but lacks PCI-X support.
73: *
74: * Without external SSRAM, you can only have at most 4 TX rings,
75: * and the use of the mini RX ring is disabled. This seems to imply
76: * that these features are simply not available on the BCM5701. As a
77: * result, this driver does not implement any support for the mini RX
78: * ring.
79: */
1.43 lukem 80:
81: #include <sys/cdefs.h>
1.378 ! skrll 82: __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.377 2022/08/14 09:04:17 skrll Exp $");
1.1 fvdl 83:
84: #include <sys/param.h>
1.370 skrll 85: #include <sys/types.h>
1.355 skrll 86:
1.1 fvdl 87: #include <sys/callout.h>
1.355 skrll 88: #include <sys/device.h>
1.364 skrll 89: #include <sys/kernel.h>
1.366 skrll 90: #include <sys/kmem.h>
1.1 fvdl 91: #include <sys/mbuf.h>
1.355 skrll 92: #include <sys/rndsource.h>
1.1 fvdl 93: #include <sys/socket.h>
1.355 skrll 94: #include <sys/sockio.h>
1.64 jonathan 95: #include <sys/sysctl.h>
1.355 skrll 96: #include <sys/systm.h>
1.1 fvdl 97:
98: #include <net/if.h>
99: #include <net/if_dl.h>
100: #include <net/if_media.h>
101: #include <net/if_ether.h>
1.330 msaitoh 102: #include <net/bpf.h>
1.148 mlelstv 103:
1.1 fvdl 104: #ifdef INET
105: #include <netinet/in.h>
106: #include <netinet/in_systm.h>
107: #include <netinet/in_var.h>
108: #include <netinet/ip.h>
109: #endif
110:
1.247 msaitoh 111: /* Headers for TCP Segmentation Offload (TSO) */
1.95 jonathan 112: #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
113: #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
114: #include <netinet/ip.h> /* for struct ip */
115: #include <netinet/tcp.h> /* for struct tcphdr */
116:
1.1 fvdl 117: #include <dev/pci/pcireg.h>
118: #include <dev/pci/pcivar.h>
119: #include <dev/pci/pcidevs.h>
120:
121: #include <dev/mii/mii.h>
122: #include <dev/mii/miivar.h>
123: #include <dev/mii/miidevs.h>
124: #include <dev/mii/brgphyreg.h>
125:
126: #include <dev/pci/if_bgereg.h>
1.164 msaitoh 127: #include <dev/pci/if_bgevar.h>
1.1 fvdl 128:
1.164 msaitoh 129: #include <prop/proplib.h>
1.1 fvdl 130:
1.46 jonathan 131: #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
132:
1.63 jonathan 133:
134: /*
135: * Tunable thresholds for rx-side bge interrupt mitigation.
136: */
137:
138: /*
139: * The pairs of values below were obtained from empirical measurement
140: * on bcm5700 rev B2; they ar designed to give roughly 1 receive
141: * interrupt for every N packets received, where N is, approximately,
142: * the second value (rx_max_bds) in each pair. The values are chosen
143: * such that moving from one pair to the succeeding pair was observed
144: * to roughly halve interrupt rate under sustained input packet load.
145: * The values were empirically chosen to avoid overflowing internal
1.184 njoly 146: * limits on the bcm5700: increasing rx_ticks much beyond 600
1.63 jonathan 147: * results in internal wrapping and higher interrupt rates.
148: * The limit of 46 frames was chosen to match NFS workloads.
1.87 perry 149: *
1.63 jonathan 150: * These values also work well on bcm5701, bcm5704C, and (less
151: * tested) bcm5703. On other chipsets, (including the Altima chip
152: * family), the larger values may overflow internal chip limits,
153: * leading to increasing interrupt rates rather than lower interrupt
154: * rates.
155: *
156: * Applications using heavy interrupt mitigation (interrupting every
157: * 32 or 46 frames) in both directions may need to increase the TCP
158: * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
1.87 perry 159: * full link bandwidth, due to ACKs and window updates lingering
1.63 jonathan 160: * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
161: */
1.104 thorpej 162: static const struct bge_load_rx_thresh {
1.63 jonathan 163: int rx_ticks;
164: int rx_max_bds; }
165: bge_rx_threshes[] = {
1.330 msaitoh 166: { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */
167: { 32, 2 },
168: { 50, 4 },
169: { 100, 8 },
1.63 jonathan 170: { 192, 16 },
171: { 416, 32 },
172: { 598, 46 }
173: };
174: #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
175:
176: /* XXX patchable; should be sysctl'able */
1.177 msaitoh 177: static int bge_auto_thresh = 1;
178: static int bge_rx_thresh_lvl;
1.64 jonathan 179:
1.177 msaitoh 180: static int bge_rxthresh_nodenum;
1.1 fvdl 181:
1.170 msaitoh 182: typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
1.151 cegger 183:
1.237 msaitoh 184: static uint32_t bge_chipid(const struct pci_attach_args *);
1.288 msaitoh 185: static int bge_can_use_msi(struct bge_softc *);
1.177 msaitoh 186: static int bge_probe(device_t, cfdata_t, void *);
187: static void bge_attach(device_t, device_t, void *);
1.227 msaitoh 188: static int bge_detach(device_t, int);
1.177 msaitoh 189: static void bge_release_resources(struct bge_softc *);
190:
191: static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
192: static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
193: static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
194: static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
195: static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
196:
197: static void bge_txeof(struct bge_softc *);
1.219 msaitoh 198: static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
1.177 msaitoh 199: static void bge_rxeof(struct bge_softc *);
200:
201: static void bge_asf_driver_up (struct bge_softc *);
202: static void bge_tick(void *);
203: static void bge_stats_update(struct bge_softc *);
204: static void bge_stats_update_regs(struct bge_softc *);
205: static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
206:
207: static int bge_intr(void *);
208: static void bge_start(struct ifnet *);
1.375 skrll 209: static void bge_start_locked(struct ifnet *);
1.186 msaitoh 210: static int bge_ifflags_cb(struct ethercom *);
1.177 msaitoh 211: static int bge_ioctl(struct ifnet *, u_long, void *);
212: static int bge_init(struct ifnet *);
1.375 skrll 213: static int bge_init_locked(struct ifnet *);
1.177 msaitoh 214: static void bge_stop(struct ifnet *, int);
1.375 skrll 215: static void bge_stop_locked(struct ifnet *, int);
216: static bool bge_watchdog(struct ifnet *);
1.177 msaitoh 217: static int bge_ifmedia_upd(struct ifnet *);
218: static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
1.375 skrll 219: static void bge_handle_reset_work(struct work *, void *);
1.177 msaitoh 220:
221: static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
222: static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
223:
224: static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
225: static int bge_read_eeprom(struct bge_softc *, void *, int, int);
226: static void bge_setmulti(struct bge_softc *);
1.104 thorpej 227:
1.177 msaitoh 228: static void bge_handle_events(struct bge_softc *);
229: static int bge_alloc_jumbo_mem(struct bge_softc *);
230: static void bge_free_jumbo_mem(struct bge_softc *);
231: static void *bge_jalloc(struct bge_softc *);
232: static void bge_jfree(struct mbuf *, void *, size_t, void *);
233: static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
234: static int bge_init_rx_ring_jumbo(struct bge_softc *);
235: static void bge_free_rx_ring_jumbo(struct bge_softc *);
1.376 skrll 236:
237: static int bge_newbuf_std(struct bge_softc *, int);
238: static int bge_init_rx_ring_std(struct bge_softc *);
239: static void bge_fill_rx_ring_std(struct bge_softc *);
240: static void bge_free_rx_ring_std(struct bge_softc *m);
241:
1.320 bouyer 242: static void bge_free_tx_ring(struct bge_softc *m, bool);
1.177 msaitoh 243: static int bge_init_tx_ring(struct bge_softc *);
244:
245: static int bge_chipinit(struct bge_softc *);
246: static int bge_blockinit(struct bge_softc *);
1.216 msaitoh 247: static int bge_phy_addr(struct bge_softc *);
1.177 msaitoh 248: static uint32_t bge_readmem_ind(struct bge_softc *, int);
249: static void bge_writemem_ind(struct bge_softc *, int, int);
250: static void bge_writembx(struct bge_softc *, int, int);
1.211 msaitoh 251: static void bge_writembx_flush(struct bge_softc *, int, int);
1.177 msaitoh 252: static void bge_writemem_direct(struct bge_softc *, int, int);
253: static void bge_writereg_ind(struct bge_softc *, int, int);
254: static void bge_set_max_readrq(struct bge_softc *);
255:
1.322 msaitoh 256: static int bge_miibus_readreg(device_t, int, int, uint16_t *);
257: static int bge_miibus_writereg(device_t, int, int, uint16_t);
1.201 matt 258: static void bge_miibus_statchg(struct ifnet *);
1.177 msaitoh 259:
1.216 msaitoh 260: #define BGE_RESET_SHUTDOWN 0
261: #define BGE_RESET_START 1
262: #define BGE_RESET_SUSPEND 2
1.177 msaitoh 263: static void bge_sig_post_reset(struct bge_softc *, int);
264: static void bge_sig_legacy(struct bge_softc *, int);
265: static void bge_sig_pre_reset(struct bge_softc *, int);
1.216 msaitoh 266: static void bge_wait_for_event_ack(struct bge_softc *);
1.177 msaitoh 267: static void bge_stop_fw(struct bge_softc *);
268: static int bge_reset(struct bge_softc *);
269: static void bge_link_upd(struct bge_softc *);
1.207 msaitoh 270: static void bge_sysctl_init(struct bge_softc *);
271: static int bge_sysctl_verify(SYSCTLFN_PROTO);
1.95 jonathan 272:
1.216 msaitoh 273: static void bge_ape_lock_init(struct bge_softc *);
274: static void bge_ape_read_fw_ver(struct bge_softc *);
275: static int bge_ape_lock(struct bge_softc *, int);
276: static void bge_ape_unlock(struct bge_softc *, int);
277: static void bge_ape_send_event(struct bge_softc *, uint32_t);
278: static void bge_ape_driver_state_change(struct bge_softc *, int);
279:
1.1 fvdl 280: #ifdef BGE_DEBUG
281: #define DPRINTF(x) if (bgedebug) printf x
1.331 msaitoh 282: #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x
1.95 jonathan 283: #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
1.1 fvdl 284: int bgedebug = 0;
1.95 jonathan 285: int bge_tso_debug = 0;
1.369 skrll 286: void bge_debug_info(struct bge_softc *);
1.1 fvdl 287: #else
288: #define DPRINTF(x)
1.331 msaitoh 289: #define DPRINTFN(n, x)
1.95 jonathan 290: #define BGE_TSO_PRINTF(x)
1.1 fvdl 291: #endif
292:
1.72 thorpej 293: #ifdef BGE_EVENT_COUNTERS
294: #define BGE_EVCNT_INCR(ev) (ev).ev_count++
295: #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
296: #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
297: #else
298: #define BGE_EVCNT_INCR(ev) /* nothing */
299: #define BGE_EVCNT_ADD(ev, val) /* nothing */
300: #define BGE_EVCNT_UPD(ev, val) /* nothing */
301: #endif
302:
1.325 msaitoh 303: #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b
304: /*
305: * The BCM5700 documentation seems to indicate that the hardware still has the
306: * Alteon vendor ID burned into it, though it should always be overridden by
307: * the value in the EEPROM. We'll check for it anyway.
308: */
1.158 msaitoh 309: static const struct bge_product {
310: pci_vendor_id_t bp_vendor;
311: pci_product_id_t bp_product;
312: const char *bp_name;
313: } bge_products[] = {
1.325 msaitoh 314: { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" },
315: { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" },
316: { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" },
317: { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" },
318: { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" },
319: { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" },
320: { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" },
321: { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" },
322: { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" },
323: { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" },
1.326 msaitoh 324: { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" },
1.325 msaitoh 325: { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" },
326: { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" },
327: { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" },
328: { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" },
329: { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" },
330: { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" },
1.326 msaitoh 331: { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" },
1.325 msaitoh 332: { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" },
333: { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" },
334: { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" },
335: { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" },
336: { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" },
337: { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" },
338: { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" },
339: { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" },
340: { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" },
341: { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" },
342: { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" },
343: { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" },
344: { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" },
345: { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" },
346: { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" },
347: { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" },
348: { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" },
1.327 msaitoh 349: { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" },
350: { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" },
1.325 msaitoh 351: { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" },
352: { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" },
353: { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" },
354: { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" },
355: { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" },
356: { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" },
357: { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" },
358: { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" },
359: { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" },
360: { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" },
361: { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" },
362: { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" },
363: { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" },
364: { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" },
365: { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" },
366: { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" },
367: { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" },
368: { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" },
1.327 msaitoh 369: { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" },
1.325 msaitoh 370: { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" },
371: { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" },
372: { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" },
373: { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" },
374: { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" },
375: { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" },
376: { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" },
377: { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" },
378: { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" },
379: { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" },
380: { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" },
381: { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" },
382: { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" },
383: { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" },
384: { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" },
385: { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" },
386: { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" },
387: { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" },
388: { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" },
389: { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" },
390: { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" },
391: { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" },
1.327 msaitoh 392: { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" },
1.325 msaitoh 393: { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" },
394: { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" },
1.327 msaitoh 395: { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" },
1.325 msaitoh 396: { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" },
397: { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" },
398: { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" },
399: { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" },
400: { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" },
1.327 msaitoh 401: { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" },
1.325 msaitoh 402: { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" },
403: { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" },
404: { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" },
405: { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" },
406: { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" },
1.326 msaitoh 407: { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" },
1.325 msaitoh 408: { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" },
409: { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" },
410: { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" },
411: { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" },
412: { 0, 0, NULL },
1.158 msaitoh 413: };
414:
1.261 msaitoh 415: #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE)
416: #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY)
417: #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS)
418: #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY)
419: #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS)
420: #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS)
421: #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY)
422: #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS)
423: #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS)
1.166 msaitoh 424:
1.158 msaitoh 425: static const struct bge_revision {
426: uint32_t br_chipid;
427: const char *br_name;
428: } bge_revisions[] = {
429: { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
430: { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
431: { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
432: { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
433: { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
434: { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
435: { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
436: { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
437: { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
438: { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
439: { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
440: { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
1.172 msaitoh 441: { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
442: { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
443: { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
444: { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
445: { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
1.158 msaitoh 446: { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
447: { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
448: { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
449: { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
1.159 msaitoh 450: { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
1.158 msaitoh 451: { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
452: { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
453: { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
454: { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
455: { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
456: { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
1.161 msaitoh 457: { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
458: { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
459: { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
460: { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
461: { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
462: { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
1.158 msaitoh 463: { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
464: { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
465: { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
1.159 msaitoh 466: { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
467: { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
468: { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
469: { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
470: { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
471: { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
1.216 msaitoh 472: { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
473: { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
474: { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
475: { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
1.158 msaitoh 476: { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
477: { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
478: { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
479: { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
1.172 msaitoh 480: { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
481: { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
1.327 msaitoh 482: { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
483: { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" },
1.172 msaitoh 484: { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
485: { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
1.284 msaitoh 486: { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" },
1.172 msaitoh 487: /* 5754 and 5787 share the same ASIC ID */
1.158 msaitoh 488: { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
489: { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
490: { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
1.206 msaitoh 491: { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
1.161 msaitoh 492: { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
493: { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
1.214 msaitoh 494: { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
495: { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
1.305 msaitoh 496: { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
1.172 msaitoh 497: { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
498: { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
499:
1.158 msaitoh 500: { 0, NULL }
501: };
502:
503: /*
504: * Some defaults for major revisions, so that newer steppings
505: * that we don't know about have a shot at working.
506: */
507: static const struct bge_revision bge_majorrevs[] = {
508: { BGE_ASICREV_BCM5700, "unknown BCM5700" },
509: { BGE_ASICREV_BCM5701, "unknown BCM5701" },
510: { BGE_ASICREV_BCM5703, "unknown BCM5703" },
511: { BGE_ASICREV_BCM5704, "unknown BCM5704" },
512: { BGE_ASICREV_BCM5705, "unknown BCM5705" },
1.162 msaitoh 513: { BGE_ASICREV_BCM5750, "unknown BCM5750" },
1.216 msaitoh 514: { BGE_ASICREV_BCM5714, "unknown BCM5714" },
1.158 msaitoh 515: { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
1.172 msaitoh 516: { BGE_ASICREV_BCM5752, "unknown BCM5752" },
517: { BGE_ASICREV_BCM5780, "unknown BCM5780" },
1.158 msaitoh 518: { BGE_ASICREV_BCM5755, "unknown BCM5755" },
1.172 msaitoh 519: { BGE_ASICREV_BCM5761, "unknown BCM5761" },
520: { BGE_ASICREV_BCM5784, "unknown BCM5784" },
521: { BGE_ASICREV_BCM5785, "unknown BCM5785" },
1.162 msaitoh 522: /* 5754 and 5787 share the same ASIC ID */
1.166 msaitoh 523: { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
1.172 msaitoh 524: { BGE_ASICREV_BCM5906, "unknown BCM5906" },
1.216 msaitoh 525: { BGE_ASICREV_BCM57765, "unknown BCM57765" },
526: { BGE_ASICREV_BCM57766, "unknown BCM57766" },
1.172 msaitoh 527: { BGE_ASICREV_BCM57780, "unknown BCM57780" },
528: { BGE_ASICREV_BCM5717, "unknown BCM5717" },
1.216 msaitoh 529: { BGE_ASICREV_BCM5719, "unknown BCM5719" },
530: { BGE_ASICREV_BCM5720, "unknown BCM5720" },
1.327 msaitoh 531: { BGE_ASICREV_BCM5762, "unknown BCM5762" },
1.172 msaitoh 532:
1.158 msaitoh 533: { 0, NULL }
534: };
1.17 thorpej 535:
1.177 msaitoh 536: static int bge_allow_asf = 1;
537:
1.375 skrll 538: #ifndef BGE_WATCHDOG_TIMEOUT
539: #define BGE_WATCHDOG_TIMEOUT 5
540: #endif
541: static int bge_watchdog_timeout = BGE_WATCHDOG_TIMEOUT;
542:
543:
1.227 msaitoh 544: CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc),
545: bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1.1 fvdl 546:
1.170 msaitoh 547: static uint32_t
1.104 thorpej 548: bge_readmem_ind(struct bge_softc *sc, int off)
1.1 fvdl 549: {
550: pcireg_t val;
551:
1.216 msaitoh 552: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
553: off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
554: return 0;
555:
1.141 jmcneill 556: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
557: val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
1.216 msaitoh 558: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1.1 fvdl 559: return val;
560: }
561:
1.104 thorpej 562: static void
563: bge_writemem_ind(struct bge_softc *sc, int off, int val)
1.1 fvdl 564: {
1.216 msaitoh 565:
1.141 jmcneill 566: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
567: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
1.216 msaitoh 568: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1.1 fvdl 569: }
570:
1.177 msaitoh 571: /*
572: * PCI Express only
573: */
574: static void
575: bge_set_max_readrq(struct bge_softc *sc)
576: {
577: pcireg_t val;
578:
1.180 msaitoh 579: val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
1.238 msaitoh 580: + PCIE_DCSR);
581: val &= ~PCIE_DCSR_MAX_READ_REQ;
1.216 msaitoh 582: switch (sc->bge_expmrq) {
583: case 2048:
584: val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048;
585: break;
586: case 4096:
1.177 msaitoh 587: val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
1.216 msaitoh 588: break;
589: default:
590: panic("incorrect expmrq value(%d)", sc->bge_expmrq);
591: break;
1.177 msaitoh 592: }
1.216 msaitoh 593: pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
1.238 msaitoh 594: + PCIE_DCSR, val);
1.177 msaitoh 595: }
596:
1.1 fvdl 597: #ifdef notdef
1.170 msaitoh 598: static uint32_t
1.104 thorpej 599: bge_readreg_ind(struct bge_softc *sc, int off)
1.1 fvdl 600: {
1.141 jmcneill 601: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
1.362 skrll 602: return pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA);
1.1 fvdl 603: }
604: #endif
605:
1.104 thorpej 606: static void
607: bge_writereg_ind(struct bge_softc *sc, int off, int val)
1.1 fvdl 608: {
1.141 jmcneill 609: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
610: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
1.1 fvdl 611: }
612:
1.151 cegger 613: static void
614: bge_writemem_direct(struct bge_softc *sc, int off, int val)
615: {
616: CSR_WRITE_4(sc, off, val);
617: }
618:
619: static void
620: bge_writembx(struct bge_softc *sc, int off, int val)
621: {
622: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
623: off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624:
625: CSR_WRITE_4(sc, off, val);
626: }
627:
1.211 msaitoh 628: static void
629: bge_writembx_flush(struct bge_softc *sc, int off, int val)
630: {
631: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
632: off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
633:
634: CSR_WRITE_4_FLUSH(sc, off, val);
635: }
636:
1.216 msaitoh 637: /*
638: * Clear all stale locks and select the lock for this driver instance.
639: */
640: void
641: bge_ape_lock_init(struct bge_softc *sc)
642: {
643: struct pci_attach_args *pa = &(sc->bge_pa);
644: uint32_t bit, regbase;
645: int i;
646:
647: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
648: regbase = BGE_APE_LOCK_GRANT;
649: else
650: regbase = BGE_APE_PER_LOCK_GRANT;
651:
652: /* Clear any stale locks. */
653: for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
654: switch (i) {
655: case BGE_APE_LOCK_PHY0:
656: case BGE_APE_LOCK_PHY1:
657: case BGE_APE_LOCK_PHY2:
658: case BGE_APE_LOCK_PHY3:
659: bit = BGE_APE_LOCK_GRANT_DRIVER0;
660: break;
661: default:
1.231 msaitoh 662: if (pa->pa_function == 0)
1.216 msaitoh 663: bit = BGE_APE_LOCK_GRANT_DRIVER0;
664: else
665: bit = (1 << pa->pa_function);
666: }
667: APE_WRITE_4(sc, regbase + 4 * i, bit);
668: }
669:
670: /* Select the PHY lock based on the device's function number. */
671: switch (pa->pa_function) {
672: case 0:
673: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
674: break;
675: case 1:
676: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
677: break;
678: case 2:
679: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
680: break;
681: case 3:
682: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
683: break;
684: default:
685: printf("%s: PHY lock not supported on function\n",
686: device_xname(sc->bge_dev));
687: break;
688: }
689: }
690:
691: /*
692: * Check for APE firmware, set flags, and print version info.
693: */
694: void
695: bge_ape_read_fw_ver(struct bge_softc *sc)
696: {
697: const char *fwtype;
698: uint32_t apedata, features;
699:
700: /* Check for a valid APE signature in shared memory. */
701: apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
702: if (apedata != BGE_APE_SEG_SIG_MAGIC) {
703: sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
704: return;
705: }
706:
707: /* Check if APE firmware is running. */
708: apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
709: if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
710: printf("%s: APE signature found but FW status not ready! "
711: "0x%08x\n", device_xname(sc->bge_dev), apedata);
712: return;
713: }
714:
715: sc->bge_mfw_flags |= BGE_MFW_ON_APE;
716:
717: /* Fetch the APE firwmare type and version. */
718: apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
719: features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
720: if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
721: sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
722: fwtype = "NCSI";
723: } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
724: sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
725: fwtype = "DASH";
726: } else
727: fwtype = "UNKN";
728:
729: /* Print the APE firmware version. */
1.271 msaitoh 730: aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype,
1.216 msaitoh 731: (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
732: (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
733: (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
734: (apedata & BGE_APE_FW_VERSION_BLDMSK));
735: }
736:
737: int
738: bge_ape_lock(struct bge_softc *sc, int locknum)
739: {
740: struct pci_attach_args *pa = &(sc->bge_pa);
741: uint32_t bit, gnt, req, status;
742: int i, off;
743:
744: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
1.362 skrll 745: return 0;
1.216 msaitoh 746:
747: /* Lock request/grant registers have different bases. */
748: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
749: req = BGE_APE_LOCK_REQ;
750: gnt = BGE_APE_LOCK_GRANT;
751: } else {
752: req = BGE_APE_PER_LOCK_REQ;
753: gnt = BGE_APE_PER_LOCK_GRANT;
754: }
755:
756: off = 4 * locknum;
757:
758: switch (locknum) {
759: case BGE_APE_LOCK_GPIO:
760: /* Lock required when using GPIO. */
761: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
1.362 skrll 762: return 0;
1.216 msaitoh 763: if (pa->pa_function == 0)
764: bit = BGE_APE_LOCK_REQ_DRIVER0;
765: else
766: bit = (1 << pa->pa_function);
767: break;
768: case BGE_APE_LOCK_GRC:
769: /* Lock required to reset the device. */
770: if (pa->pa_function == 0)
771: bit = BGE_APE_LOCK_REQ_DRIVER0;
772: else
773: bit = (1 << pa->pa_function);
774: break;
775: case BGE_APE_LOCK_MEM:
776: /* Lock required when accessing certain APE memory. */
777: if (pa->pa_function == 0)
778: bit = BGE_APE_LOCK_REQ_DRIVER0;
779: else
780: bit = (1 << pa->pa_function);
781: break;
782: case BGE_APE_LOCK_PHY0:
783: case BGE_APE_LOCK_PHY1:
784: case BGE_APE_LOCK_PHY2:
785: case BGE_APE_LOCK_PHY3:
786: /* Lock required when accessing PHYs. */
787: bit = BGE_APE_LOCK_REQ_DRIVER0;
788: break;
789: default:
1.362 skrll 790: return EINVAL;
1.216 msaitoh 791: }
792:
793: /* Request a lock. */
794: APE_WRITE_4_FLUSH(sc, req + off, bit);
795:
796: /* Wait up to 1 second to acquire lock. */
797: for (i = 0; i < 20000; i++) {
798: status = APE_READ_4(sc, gnt + off);
799: if (status == bit)
800: break;
801: DELAY(50);
802: }
803:
804: /* Handle any errors. */
805: if (status != bit) {
806: printf("%s: APE lock %d request failed! "
807: "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
808: device_xname(sc->bge_dev),
809: locknum, req + off, bit & 0xFFFF, gnt + off,
810: status & 0xFFFF);
811: /* Revoke the lock request. */
812: APE_WRITE_4(sc, gnt + off, bit);
1.362 skrll 813: return EBUSY;
1.216 msaitoh 814: }
815:
1.362 skrll 816: return 0;
1.216 msaitoh 817: }
818:
819: void
820: bge_ape_unlock(struct bge_softc *sc, int locknum)
821: {
822: struct pci_attach_args *pa = &(sc->bge_pa);
823: uint32_t bit, gnt;
824: int off;
825:
826: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
827: return;
828:
829: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
830: gnt = BGE_APE_LOCK_GRANT;
831: else
832: gnt = BGE_APE_PER_LOCK_GRANT;
833:
834: off = 4 * locknum;
835:
836: switch (locknum) {
837: case BGE_APE_LOCK_GPIO:
838: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
839: return;
840: if (pa->pa_function == 0)
841: bit = BGE_APE_LOCK_GRANT_DRIVER0;
842: else
843: bit = (1 << pa->pa_function);
844: break;
845: case BGE_APE_LOCK_GRC:
846: if (pa->pa_function == 0)
847: bit = BGE_APE_LOCK_GRANT_DRIVER0;
848: else
849: bit = (1 << pa->pa_function);
850: break;
851: case BGE_APE_LOCK_MEM:
852: if (pa->pa_function == 0)
853: bit = BGE_APE_LOCK_GRANT_DRIVER0;
854: else
855: bit = (1 << pa->pa_function);
856: break;
857: case BGE_APE_LOCK_PHY0:
858: case BGE_APE_LOCK_PHY1:
859: case BGE_APE_LOCK_PHY2:
860: case BGE_APE_LOCK_PHY3:
861: bit = BGE_APE_LOCK_GRANT_DRIVER0;
862: break;
863: default:
864: return;
865: }
866:
867: /* Write and flush for consecutive bge_ape_lock() */
868: APE_WRITE_4_FLUSH(sc, gnt + off, bit);
869: }
870:
871: /*
872: * Send an event to the APE firmware.
873: */
874: void
875: bge_ape_send_event(struct bge_softc *sc, uint32_t event)
876: {
877: uint32_t apedata;
878: int i;
879:
880: /* NCSI does not support APE events. */
881: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
882: return;
883:
884: /* Wait up to 1ms for APE to service previous event. */
885: for (i = 10; i > 0; i--) {
886: if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
887: break;
888: apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
889: if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
890: APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
891: BGE_APE_EVENT_STATUS_EVENT_PENDING);
892: bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
893: APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
894: break;
895: }
896: bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
897: DELAY(100);
898: }
899: if (i == 0) {
900: printf("%s: APE event 0x%08x send timed out\n",
901: device_xname(sc->bge_dev), event);
902: }
903: }
904:
905: void
906: bge_ape_driver_state_change(struct bge_softc *sc, int kind)
907: {
908: uint32_t apedata, event;
909:
910: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
911: return;
912:
913: switch (kind) {
914: case BGE_RESET_START:
915: /* If this is the first load, clear the load counter. */
916: apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
917: if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
918: APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
919: else {
920: apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
921: APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
922: }
923: APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
924: BGE_APE_HOST_SEG_SIG_MAGIC);
925: APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
926: BGE_APE_HOST_SEG_LEN_MAGIC);
927:
928: /* Add some version info if bge(4) supports it. */
929: APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
930: BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
931: APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
932: BGE_APE_HOST_BEHAV_NO_PHYLOCK);
933: APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
934: BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
935: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
936: BGE_APE_HOST_DRVR_STATE_START);
937: event = BGE_APE_EVENT_STATUS_STATE_START;
938: break;
939: case BGE_RESET_SHUTDOWN:
940: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
941: BGE_APE_HOST_DRVR_STATE_UNLOAD);
942: event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
943: break;
944: case BGE_RESET_SUSPEND:
945: event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
946: break;
947: default:
948: return;
949: }
950:
951: bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
952: BGE_APE_EVENT_STATUS_STATE_CHNGE);
953: }
954:
1.170 msaitoh 955: static uint8_t
956: bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1.151 cegger 957: {
1.170 msaitoh 958: uint32_t access, byte = 0;
1.151 cegger 959: int i;
960:
961: /* Lock. */
962: CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
963: for (i = 0; i < 8000; i++) {
964: if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
965: break;
966: DELAY(20);
967: }
968: if (i == 8000)
1.170 msaitoh 969: return 1;
1.151 cegger 970:
971: /* Enable access. */
972: access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
973: CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
974:
975: CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
976: CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
977: for (i = 0; i < BGE_TIMEOUT * 10; i++) {
978: DELAY(10);
979: if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
980: DELAY(10);
981: break;
982: }
983: }
984:
985: if (i == BGE_TIMEOUT * 10) {
986: aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
1.170 msaitoh 987: return 1;
1.151 cegger 988: }
989:
990: /* Get result. */
991: byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
992:
993: *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
994:
995: /* Disable access. */
996: CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
997:
998: /* Unlock. */
1.211 msaitoh 999: CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
1.151 cegger 1000:
1.170 msaitoh 1001: return 0;
1.151 cegger 1002: }
1003:
1004: /*
1005: * Read a sequence of bytes from NVRAM.
1006: */
1007: static int
1.170 msaitoh 1008: bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
1.151 cegger 1009: {
1.203 msaitoh 1010: int error = 0, i;
1.170 msaitoh 1011: uint8_t byte = 0;
1.151 cegger 1012:
1013: if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1.170 msaitoh 1014: return 1;
1.151 cegger 1015:
1016: for (i = 0; i < cnt; i++) {
1.203 msaitoh 1017: error = bge_nvram_getbyte(sc, off + i, &byte);
1018: if (error)
1.151 cegger 1019: break;
1020: *(dest + i) = byte;
1021: }
1022:
1.362 skrll 1023: return error ? 1 : 0;
1.151 cegger 1024: }
1025:
1.1 fvdl 1026: /*
1027: * Read a byte of data stored in the EEPROM at address 'addr.' The
1028: * BCM570x supports both the traditional bitbang interface and an
1029: * auto access interface for reading the EEPROM. We use the auto
1030: * access method.
1031: */
1.170 msaitoh 1032: static uint8_t
1033: bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1.1 fvdl 1034: {
1035: int i;
1.170 msaitoh 1036: uint32_t byte = 0;
1.1 fvdl 1037:
1038: /*
1039: * Enable use of auto EEPROM access so we can avoid
1040: * having to use the bitbang method.
1041: */
1.341 msaitoh 1042: BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1.1 fvdl 1043:
1044: /* Reset the EEPROM, load the clock period. */
1.341 msaitoh 1045: CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR,
1.161 msaitoh 1046: BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1.1 fvdl 1047: DELAY(20);
1048:
1049: /* Issue the read EEPROM command. */
1050: CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1051:
1052: /* Wait for completion */
1.170 msaitoh 1053: for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1.1 fvdl 1054: DELAY(10);
1055: if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1056: break;
1057: }
1058:
1.172 msaitoh 1059: if (i == BGE_TIMEOUT * 10) {
1.138 joerg 1060: aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
1.177 msaitoh 1061: return 1;
1.1 fvdl 1062: }
1063:
1064: /* Get result. */
1065: byte = CSR_READ_4(sc, BGE_EE_DATA);
1066:
1067: *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1068:
1.170 msaitoh 1069: return 0;
1.1 fvdl 1070: }
1071:
1072: /*
1073: * Read a sequence of bytes from the EEPROM.
1074: */
1.104 thorpej 1075: static int
1.126 christos 1076: bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
1.1 fvdl 1077: {
1.203 msaitoh 1078: int error = 0, i;
1.170 msaitoh 1079: uint8_t byte = 0;
1.126 christos 1080: char *dest = destv;
1.1 fvdl 1081:
1082: for (i = 0; i < cnt; i++) {
1.203 msaitoh 1083: error = bge_eeprom_getbyte(sc, off + i, &byte);
1084: if (error)
1.1 fvdl 1085: break;
1086: *(dest + i) = byte;
1087: }
1088:
1.362 skrll 1089: return error ? 1 : 0;
1.1 fvdl 1090: }
1091:
1.104 thorpej 1092: static int
1.322 msaitoh 1093: bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
1.1 fvdl 1094: {
1.354 skrll 1095: struct bge_softc * const sc = device_private(dev);
1.322 msaitoh 1096: uint32_t data;
1.172 msaitoh 1097: uint32_t autopoll;
1.322 msaitoh 1098: int rv = 0;
1.1 fvdl 1099: int i;
1100:
1.216 msaitoh 1101: if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1.322 msaitoh 1102: return -1;
1.1 fvdl 1103:
1.25 jonathan 1104: /* Reading with autopolling on may trigger PCI errors */
1.172 msaitoh 1105: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1106: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 1107: BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1.211 msaitoh 1108: BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.216 msaitoh 1109: DELAY(80);
1.25 jonathan 1110: }
1111:
1.211 msaitoh 1112: CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1.172 msaitoh 1113: BGE_MIPHY(phy) | BGE_MIREG(reg));
1.1 fvdl 1114:
1115: for (i = 0; i < BGE_TIMEOUT; i++) {
1.216 msaitoh 1116: delay(10);
1.322 msaitoh 1117: data = CSR_READ_4(sc, BGE_MI_COMM);
1118: if (!(data & BGE_MICOMM_BUSY)) {
1.216 msaitoh 1119: DELAY(5);
1.322 msaitoh 1120: data = CSR_READ_4(sc, BGE_MI_COMM);
1.1 fvdl 1121: break;
1.216 msaitoh 1122: }
1.1 fvdl 1123: }
1124:
1125: if (i == BGE_TIMEOUT) {
1.138 joerg 1126: aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
1.322 msaitoh 1127: rv = ETIMEDOUT;
1.342 msaitoh 1128: } else if ((data & BGE_MICOMM_READFAIL) != 0) {
1129: /* XXX This error occurs on some devices while attaching. */
1130: aprint_debug_dev(sc->bge_dev, "PHY read I/O error\n");
1131: rv = EIO;
1132: } else
1.322 msaitoh 1133: *val = data & BGE_MICOMM_DATA;
1.1 fvdl 1134:
1.172 msaitoh 1135: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 1136: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1.211 msaitoh 1137: BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.216 msaitoh 1138: DELAY(80);
1.25 jonathan 1139: }
1.29 itojun 1140:
1.216 msaitoh 1141: bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1142:
1.322 msaitoh 1143: return rv;
1.1 fvdl 1144: }
1145:
1.322 msaitoh 1146: static int
1147: bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
1.1 fvdl 1148: {
1.354 skrll 1149: struct bge_softc * const sc = device_private(dev);
1.342 msaitoh 1150: uint32_t data, autopoll;
1151: int rv = 0;
1.29 itojun 1152: int i;
1.1 fvdl 1153:
1.278 msaitoh 1154: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1.321 msaitoh 1155: (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL))
1.322 msaitoh 1156: return 0;
1.151 cegger 1157:
1.278 msaitoh 1158: if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1.322 msaitoh 1159: return -1;
1.151 cegger 1160:
1.161 msaitoh 1161: /* Reading with autopolling on may trigger PCI errors */
1.172 msaitoh 1162: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1163: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 1164: BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1.211 msaitoh 1165: BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.216 msaitoh 1166: DELAY(80);
1.25 jonathan 1167: }
1.29 itojun 1168:
1.211 msaitoh 1169: CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1.177 msaitoh 1170: BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1.1 fvdl 1171:
1172: for (i = 0; i < BGE_TIMEOUT; i++) {
1.151 cegger 1173: delay(10);
1.342 msaitoh 1174: data = CSR_READ_4(sc, BGE_MI_COMM);
1175: if (!(data & BGE_MICOMM_BUSY)) {
1.151 cegger 1176: delay(5);
1.342 msaitoh 1177: data = CSR_READ_4(sc, BGE_MI_COMM);
1.1 fvdl 1178: break;
1.151 cegger 1179: }
1.1 fvdl 1180: }
1181:
1.342 msaitoh 1182: if (i == BGE_TIMEOUT) {
1183: aprint_error_dev(sc->bge_dev, "PHY write timed out\n");
1184: rv = ETIMEDOUT;
1185: } else if ((data & BGE_MICOMM_READFAIL) != 0) {
1186: aprint_error_dev(sc->bge_dev, "PHY write I/O error\n");
1187: rv = EIO;
1188: }
1189:
1.172 msaitoh 1190: if (autopoll & BGE_MIMODE_AUTOPOLL) {
1.161 msaitoh 1191: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1.211 msaitoh 1192: BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1.216 msaitoh 1193: delay(80);
1.25 jonathan 1194: }
1.29 itojun 1195:
1.216 msaitoh 1196: bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1197:
1.342 msaitoh 1198: return rv;
1.1 fvdl 1199: }
1200:
1.104 thorpej 1201: static void
1.201 matt 1202: bge_miibus_statchg(struct ifnet *ifp)
1.1 fvdl 1203: {
1.354 skrll 1204: struct bge_softc * const sc = ifp->if_softc;
1.1 fvdl 1205: struct mii_data *mii = &sc->bge_mii;
1.216 msaitoh 1206: uint32_t mac_mode, rx_mode, tx_mode;
1.1 fvdl 1207:
1.69 thorpej 1208: /*
1209: * Get flow control negotiation result.
1210: */
1211: if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1.256 msaitoh 1212: (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1.69 thorpej 1213: sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1.256 msaitoh 1214:
1215: if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1216: mii->mii_media_status & IFM_ACTIVE &&
1217: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1218: BGE_STS_SETBIT(sc, BGE_STS_LINK);
1219: else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1220: (!(mii->mii_media_status & IFM_ACTIVE) ||
1221: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1222: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1223:
1224: if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1225: return;
1.69 thorpej 1226:
1.216 msaitoh 1227: /* Set the port mode (MII/GMII) to match the link speed. */
1228: mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1229: ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1230: tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1231: rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1.161 msaitoh 1232: if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1233: IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1.216 msaitoh 1234: mac_mode |= BGE_PORTMODE_GMII;
1.161 msaitoh 1235: else
1.216 msaitoh 1236: mac_mode |= BGE_PORTMODE_MII;
1237:
1238: tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1239: rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1.256 msaitoh 1240: if ((mii->mii_media_active & IFM_FDX) != 0) {
1.216 msaitoh 1241: if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1242: tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1243: if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1244: rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1245: } else
1246: mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1.1 fvdl 1247:
1.216 msaitoh 1248: CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode);
1.211 msaitoh 1249: DELAY(40);
1.216 msaitoh 1250: CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1251: CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1.1 fvdl 1252: }
1253:
1254: /*
1.63 jonathan 1255: * Update rx threshold levels to values in a particular slot
1256: * of the interrupt-mitigation table bge_rx_threshes.
1257: */
1.104 thorpej 1258: static void
1.63 jonathan 1259: bge_set_thresh(struct ifnet *ifp, int lvl)
1260: {
1.354 skrll 1261: struct bge_softc * const sc = ifp->if_softc;
1.63 jonathan 1262:
1.357 skrll 1263: /*
1264: * For now, just save the new Rx-intr thresholds and record
1.63 jonathan 1265: * that a threshold update is pending. Updating the hardware
1266: * registers here (even at splhigh()) is observed to
1.352 andvar 1267: * occasionally cause glitches where Rx-interrupts are not
1.68 keihan 1268: * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05
1.63 jonathan 1269: */
1.375 skrll 1270: mutex_enter(sc->sc_core_lock);
1.63 jonathan 1271: sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
1272: sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
1273: sc->bge_pending_rxintr_change = 1;
1.375 skrll 1274: mutex_exit(sc->sc_core_lock);
1.63 jonathan 1275: }
1276:
1277:
1278: /*
1279: * Update Rx thresholds of all bge devices
1280: */
1.104 thorpej 1281: static void
1.63 jonathan 1282: bge_update_all_threshes(int lvl)
1283: {
1.360 skrll 1284: const char * const namebuf = "bge";
1285: const size_t namelen = strlen(namebuf);
1.63 jonathan 1286: struct ifnet *ifp;
1287:
1288: if (lvl < 0)
1289: lvl = 0;
1.170 msaitoh 1290: else if (lvl >= NBGE_RX_THRESH)
1.63 jonathan 1291: lvl = NBGE_RX_THRESH - 1;
1.87 perry 1292:
1.63 jonathan 1293: /*
1294: * Now search all the interfaces for this name/number
1295: */
1.360 skrll 1296: int s = pserialize_read_enter();
1.296 ozaki-r 1297: IFNET_READER_FOREACH(ifp) {
1.67 jonathan 1298: if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
1.367 skrll 1299: continue;
1.63 jonathan 1300: /* We got a match: update if doing auto-threshold-tuning */
1301: if (bge_auto_thresh)
1.67 jonathan 1302: bge_set_thresh(ifp, lvl);
1.63 jonathan 1303: }
1.296 ozaki-r 1304: pserialize_read_exit(s);
1.63 jonathan 1305: }
1306:
1307: /*
1.1 fvdl 1308: * Handle events that have triggered interrupts.
1309: */
1.104 thorpej 1310: static void
1.116 christos 1311: bge_handle_events(struct bge_softc *sc)
1.1 fvdl 1312: {
1313:
1314: return;
1315: }
1316:
1317: /*
1318: * Memory management for jumbo frames.
1319: */
1320:
1.104 thorpej 1321: static int
1322: bge_alloc_jumbo_mem(struct bge_softc *sc)
1.1 fvdl 1323: {
1.126 christos 1324: char *ptr, *kva;
1.375 skrll 1325: int i, rseg, state, error;
1326: struct bge_jpool_entry *entry;
1.1 fvdl 1327:
1328: state = error = 0;
1329:
1330: /* Grab a big chunk o' storage. */
1331: if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
1.377 skrll 1332: &sc->bge_cdata.bge_rx_jumbo_seg, 1, &rseg, BUS_DMA_WAITOK)) {
1.138 joerg 1333: aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1.1 fvdl 1334: return ENOBUFS;
1335: }
1336:
1337: state = 1;
1.373 skrll 1338: if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_cdata.bge_rx_jumbo_seg,
1.377 skrll 1339: rseg, BGE_JMEM, (void **)&kva, BUS_DMA_WAITOK)) {
1.138 joerg 1340: aprint_error_dev(sc->bge_dev,
1341: "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
1.1 fvdl 1342: error = ENOBUFS;
1343: goto out;
1344: }
1345:
1346: state = 2;
1347: if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
1.377 skrll 1348: BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_jumbo_map)) {
1.138 joerg 1349: aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1.1 fvdl 1350: error = ENOBUFS;
1351: goto out;
1352: }
1353:
1354: state = 3;
1355: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1.377 skrll 1356: kva, BGE_JMEM, NULL, BUS_DMA_WAITOK)) {
1.138 joerg 1357: aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
1.1 fvdl 1358: error = ENOBUFS;
1359: goto out;
1360: }
1361:
1362: state = 4;
1.126 christos 1363: sc->bge_cdata.bge_jumbo_buf = (void *)kva;
1.89 christos 1364: DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
1.1 fvdl 1365:
1366: SLIST_INIT(&sc->bge_jfree_listhead);
1367: SLIST_INIT(&sc->bge_jinuse_listhead);
1368:
1369: /*
1370: * Now divide it up into 9K pieces and save the addresses
1371: * in an array.
1372: */
1373: ptr = sc->bge_cdata.bge_jumbo_buf;
1374: for (i = 0; i < BGE_JSLOTS; i++) {
1375: sc->bge_cdata.bge_jslots[i] = ptr;
1376: ptr += BGE_JLEN;
1.366 skrll 1377: entry = kmem_alloc(sizeof(*entry), KM_SLEEP);
1.1 fvdl 1378: entry->slot = i;
1379: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
1380: entry, jpool_entries);
1381: }
1382: out:
1383: if (error != 0) {
1384: switch (state) {
1385: case 4:
1386: bus_dmamap_unload(sc->bge_dmatag,
1387: sc->bge_cdata.bge_rx_jumbo_map);
1.323 mrg 1388: /* FALLTHROUGH */
1.1 fvdl 1389: case 3:
1390: bus_dmamap_destroy(sc->bge_dmatag,
1391: sc->bge_cdata.bge_rx_jumbo_map);
1.323 mrg 1392: /* FALLTHROUGH */
1.1 fvdl 1393: case 2:
1394: bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
1.323 mrg 1395: /* FALLTHROUGH */
1.1 fvdl 1396: case 1:
1.373 skrll 1397: bus_dmamem_free(sc->bge_dmatag,
1398: &sc->bge_cdata.bge_rx_jumbo_seg, rseg);
1.1 fvdl 1399: break;
1400: default:
1401: break;
1402: }
1403: }
1404:
1405: return error;
1406: }
1407:
1.373 skrll 1408: static void
1409: bge_free_jumbo_mem(struct bge_softc *sc)
1410: {
1411: struct bge_jpool_entry *entry, *tmp;
1412:
1413: KASSERT(SLIST_EMPTY(&sc->bge_jinuse_listhead));
1414:
1415: SLIST_FOREACH_SAFE(entry, &sc->bge_jfree_listhead, jpool_entries, tmp) {
1416: kmem_free(entry, sizeof(*entry));
1417: }
1418:
1419: bus_dmamap_unload(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map);
1420:
1421: bus_dmamap_destroy(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map);
1422:
1423: bus_dmamem_unmap(sc->bge_dmatag, sc->bge_cdata.bge_jumbo_buf, BGE_JMEM);
1424:
1425: bus_dmamem_free(sc->bge_dmatag, &sc->bge_cdata.bge_rx_jumbo_seg, 1);
1426: }
1427:
1.1 fvdl 1428: /*
1429: * Allocate a jumbo buffer.
1430: */
1.104 thorpej 1431: static void *
1432: bge_jalloc(struct bge_softc *sc)
1.1 fvdl 1433: {
1.330 msaitoh 1434: struct bge_jpool_entry *entry;
1.1 fvdl 1435:
1436: entry = SLIST_FIRST(&sc->bge_jfree_listhead);
1437:
1438: if (entry == NULL) {
1.138 joerg 1439: aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
1.170 msaitoh 1440: return NULL;
1.1 fvdl 1441: }
1442:
1443: SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
1444: SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
1.362 skrll 1445: return sc->bge_cdata.bge_jslots[entry->slot];
1.1 fvdl 1446: }
1447:
1448: /*
1449: * Release a jumbo buffer.
1450: */
1.104 thorpej 1451: static void
1.126 christos 1452: bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1.1 fvdl 1453: {
1454: struct bge_jpool_entry *entry;
1.354 skrll 1455: struct bge_softc * const sc = arg;
1.1 fvdl 1456:
1457: if (sc == NULL)
1458: panic("bge_jfree: can't find softc pointer!");
1459:
1460: /* calculate the slot this buffer belongs to */
1.371 skrll 1461: int i = ((char *)buf - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
1.1 fvdl 1462:
1.371 skrll 1463: if (i < 0 || i >= BGE_JSLOTS)
1.1 fvdl 1464: panic("bge_jfree: asked to free buffer that we don't manage!");
1465:
1.375 skrll 1466: mutex_enter(sc->sc_core_lock);
1.1 fvdl 1467: entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
1468: if (entry == NULL)
1469: panic("bge_jfree: buffer not in use!");
1470: entry->slot = i;
1471: SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
1472: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
1.375 skrll 1473: mutex_exit(sc->sc_core_lock);
1.1 fvdl 1474:
1475: if (__predict_true(m != NULL))
1.330 msaitoh 1476: pool_cache_put(mb_cache, m);
1.1 fvdl 1477: }
1478:
1479:
1480: /*
1.184 njoly 1481: * Initialize a standard receive ring descriptor.
1.1 fvdl 1482: */
1.104 thorpej 1483: static int
1.376 skrll 1484: bge_newbuf_std(struct bge_softc *sc, int i)
1.1 fvdl 1485: {
1.376 skrll 1486: const bus_dmamap_t dmamap = sc->bge_cdata.bge_rx_std_map[i];
1487: struct mbuf *m;
1.1 fvdl 1488:
1.376 skrll 1489: MGETHDR(m, M_DONTWAIT, MT_DATA);
1490: if (m == NULL)
1491: return ENOBUFS;
1.320 bouyer 1492:
1.376 skrll 1493: MCLGET(m, M_DONTWAIT);
1494: if (!(m->m_flags & M_EXT)) {
1495: m_freem(m);
1496: return ENOBUFS;
1.1 fvdl 1497: }
1.376 skrll 1498: m->m_len = m->m_pkthdr.len = MCLBYTES;
1.1 fvdl 1499:
1.261 msaitoh 1500: if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1.376 skrll 1501: m_adj(m, ETHER_ALIGN);
1502: if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
1.331 msaitoh 1503: BUS_DMA_READ | BUS_DMA_NOWAIT)) {
1.376 skrll 1504: m_freem(m);
1.170 msaitoh 1505: return ENOBUFS;
1.283 christos 1506: }
1.178 msaitoh 1507: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
1.124 bouyer 1508: BUS_DMASYNC_PREREAD);
1.376 skrll 1509: sc->bge_cdata.bge_rx_std_chain[i] = m;
1.1 fvdl 1510:
1.376 skrll 1511: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1512: offsetof(struct bge_ring_data, bge_rx_std_ring) +
1513: i * sizeof(struct bge_rx_bd),
1514: sizeof(struct bge_rx_bd),
1515: BUS_DMASYNC_POSTWRITE);
1516:
1517: struct bge_rx_bd * const r = &sc->bge_rdata->bge_rx_std_ring[i];
1.172 msaitoh 1518: BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
1.1 fvdl 1519: r->bge_flags = BGE_RXBDFLAG_END;
1.376 skrll 1520: r->bge_len = m->m_len;
1.1 fvdl 1521: r->bge_idx = i;
1522:
1523: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1524: offsetof(struct bge_ring_data, bge_rx_std_ring) +
1.364 skrll 1525: i * sizeof(struct bge_rx_bd),
1526: sizeof(struct bge_rx_bd),
1.376 skrll 1527: BUS_DMASYNC_PREWRITE);
1528:
1529: sc->bge_std_cnt++;
1.1 fvdl 1530:
1.170 msaitoh 1531: return 0;
1.1 fvdl 1532: }
1533:
1534: /*
1535: * Initialize a jumbo receive ring descriptor. This allocates
1536: * a jumbo buffer from the pool managed internally by the driver.
1537: */
1.104 thorpej 1538: static int
1539: bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1.1 fvdl 1540: {
1541: struct mbuf *m_new = NULL;
1542: struct bge_rx_bd *r;
1.126 christos 1543: void *buf = NULL;
1.1 fvdl 1544:
1545: if (m == NULL) {
1546:
1547: /* Allocate the mbuf. */
1548: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1.158 msaitoh 1549: if (m_new == NULL)
1.170 msaitoh 1550: return ENOBUFS;
1.1 fvdl 1551:
1552: /* Allocate the jumbo buffer */
1553: buf = bge_jalloc(sc);
1554: if (buf == NULL) {
1555: m_freem(m_new);
1.138 joerg 1556: aprint_error_dev(sc->bge_dev,
1557: "jumbo allocation failed -- packet dropped!\n");
1.170 msaitoh 1558: return ENOBUFS;
1.1 fvdl 1559: }
1560:
1561: /* Attach the buffer to the mbuf. */
1562: m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1563: MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1564: bge_jfree, sc);
1.74 yamt 1565: m_new->m_flags |= M_EXT_RW;
1.1 fvdl 1566: } else {
1567: m_new = m;
1.124 bouyer 1568: buf = m_new->m_data = m_new->m_ext.ext_buf;
1.1 fvdl 1569: m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1570: }
1.261 msaitoh 1571: if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
1.125 bouyer 1572: m_adj(m_new, ETHER_ALIGN);
1.124 bouyer 1573: bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1.332 msaitoh 1574: mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
1575: BGE_JLEN, BUS_DMASYNC_PREREAD);
1.375 skrll 1576:
1.1 fvdl 1577: /* Set up the descriptor. */
1578: r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1579: sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1.172 msaitoh 1580: BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1.331 msaitoh 1581: r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING;
1.1 fvdl 1582: r->bge_len = m_new->m_len;
1583: r->bge_idx = i;
1584:
1585: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1586: offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1.364 skrll 1587: i * sizeof(struct bge_rx_bd),
1588: sizeof(struct bge_rx_bd),
1.331 msaitoh 1589: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1.1 fvdl 1590:
1.170 msaitoh 1591: return 0;
1.1 fvdl 1592: }
1593:
1.104 thorpej 1594: static int
1595: bge_init_rx_ring_std(struct bge_softc *sc)
1.1 fvdl 1596: {
1.376 skrll 1597: bus_dmamap_t dmamap;
1598: int error = 0;
1599: u_int i;
1.1 fvdl 1600:
1.261 msaitoh 1601: if (sc->bge_flags & BGEF_RXRING_VALID)
1.1 fvdl 1602: return 0;
1603:
1.376 skrll 1604: for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1605: error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
1606: MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dmamap);
1607: if (error)
1608: goto uncreate;
1609:
1610: sc->bge_cdata.bge_rx_std_map[i] = dmamap;
1611: memset(&sc->bge_rdata->bge_rx_std_ring[i], 0,
1612: sizeof(struct bge_rx_bd));
1.1 fvdl 1613: }
1614:
1615: sc->bge_std = i - 1;
1.376 skrll 1616: sc->bge_std_cnt = 0;
1617: bge_fill_rx_ring_std(sc);
1.1 fvdl 1618:
1.261 msaitoh 1619: sc->bge_flags |= BGEF_RXRING_VALID;
1.1 fvdl 1620:
1.170 msaitoh 1621: return 0;
1.376 skrll 1622:
1623: uncreate:
1624: while (--i) {
1625: bus_dmamap_destroy(sc->bge_dmatag,
1626: sc->bge_cdata.bge_rx_std_map[i]);
1627: }
1628: return error;
1.1 fvdl 1629: }
1630:
1.104 thorpej 1631: static void
1.376 skrll 1632: bge_fill_rx_ring_std(struct bge_softc *sc)
1633: {
1634: int i = sc->bge_std;
1635: bool post = false;
1636:
1637: while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) {
1638: BGE_INC(i, BGE_STD_RX_RING_CNT);
1639:
1640: if (bge_newbuf_std(sc, i) != 0)
1641: break;
1642:
1643: sc->bge_std = i;
1644: post = true;
1645: }
1646:
1647: if (post)
1648: bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1649: }
1650:
1651:
1652: static void
1653: bge_free_rx_ring_std(struct bge_softc *sc)
1.1 fvdl 1654: {
1655:
1.261 msaitoh 1656: if (!(sc->bge_flags & BGEF_RXRING_VALID))
1.1 fvdl 1657: return;
1658:
1.376 skrll 1659: for (u_int i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1660: const bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i];
1661: struct mbuf * const m = sc->bge_cdata.bge_rx_std_chain[i];
1662: if (m != NULL) {
1663: bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1664: dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1665: bus_dmamap_unload(sc->bge_dmatag, dmap);
1666: m_freem(m);
1.1 fvdl 1667: sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1668: }
1.376 skrll 1669: bus_dmamap_destroy(sc->bge_dmatag,
1670: sc->bge_cdata.bge_rx_std_map[i]);
1671: sc->bge_cdata.bge_rx_std_map[i] = NULL;
1.1 fvdl 1672: memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1673: sizeof(struct bge_rx_bd));
1674: }
1675:
1.261 msaitoh 1676: sc->bge_flags &= ~BGEF_RXRING_VALID;
1.1 fvdl 1677: }
1678:
1.104 thorpej 1679: static int
1680: bge_init_rx_ring_jumbo(struct bge_softc *sc)
1.1 fvdl 1681: {
1682: int i;
1.34 jonathan 1683: volatile struct bge_rcb *rcb;
1.1 fvdl 1684:
1.261 msaitoh 1685: if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID)
1.59 martin 1686: return 0;
1687:
1.1 fvdl 1688: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1689: if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1.170 msaitoh 1690: return ENOBUFS;
1.205 msaitoh 1691: }
1.1 fvdl 1692:
1693: sc->bge_jumbo = i - 1;
1.261 msaitoh 1694: sc->bge_flags |= BGEF_JUMBO_RXRING_VALID;
1.1 fvdl 1695:
1696: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1.34 jonathan 1697: rcb->bge_maxlen_flags = 0;
1698: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1.1 fvdl 1699:
1.151 cegger 1700: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1.1 fvdl 1701:
1.170 msaitoh 1702: return 0;
1.1 fvdl 1703: }
1704:
1.104 thorpej 1705: static void
1706: bge_free_rx_ring_jumbo(struct bge_softc *sc)
1.1 fvdl 1707: {
1708: int i;
1709:
1.261 msaitoh 1710: if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID))
1.1 fvdl 1711: return;
1712:
1713: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1714: if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1715: m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1716: sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1717: }
1718: memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1719: sizeof(struct bge_rx_bd));
1720: }
1721:
1.261 msaitoh 1722: sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID;
1.1 fvdl 1723: }
1724:
1.104 thorpej 1725: static void
1.320 bouyer 1726: bge_free_tx_ring(struct bge_softc *sc, bool disable)
1.1 fvdl 1727: {
1.204 msaitoh 1728: int i;
1.1 fvdl 1729: struct txdmamap_pool_entry *dma;
1730:
1.261 msaitoh 1731: if (!(sc->bge_flags & BGEF_TXRING_VALID))
1.1 fvdl 1732: return;
1733:
1734: for (i = 0; i < BGE_TX_RING_CNT; i++) {
1735: if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1736: m_freem(sc->bge_cdata.bge_tx_chain[i]);
1737: sc->bge_cdata.bge_tx_chain[i] = NULL;
1738: SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1739: link);
1740: sc->txdma[i] = 0;
1741: }
1742: memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1743: sizeof(struct bge_tx_bd));
1744: }
1745:
1.320 bouyer 1746: if (disable) {
1747: while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1748: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1749: bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1750: if (sc->bge_dma64) {
1751: bus_dmamap_destroy(sc->bge_dmatag32,
1752: dma->dmamap32);
1753: }
1.366 skrll 1754: kmem_free(dma, sizeof(*dma));
1.320 bouyer 1755: }
1756: SLIST_INIT(&sc->txdma_list);
1.1 fvdl 1757: }
1758:
1.261 msaitoh 1759: sc->bge_flags &= ~BGEF_TXRING_VALID;
1.1 fvdl 1760: }
1761:
1.104 thorpej 1762: static int
1763: bge_init_tx_ring(struct bge_softc *sc)
1.1 fvdl 1764: {
1.354 skrll 1765: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.1 fvdl 1766: int i;
1.317 bouyer 1767: bus_dmamap_t dmamap, dmamap32;
1.258 msaitoh 1768: bus_size_t maxsegsz;
1.1 fvdl 1769: struct txdmamap_pool_entry *dma;
1770:
1.261 msaitoh 1771: if (sc->bge_flags & BGEF_TXRING_VALID)
1.1 fvdl 1772: return 0;
1773:
1774: sc->bge_txcnt = 0;
1775: sc->bge_tx_saved_considx = 0;
1.94 jonathan 1776:
1777: /* Initialize transmit producer index for host-memory send ring. */
1778: sc->bge_tx_prodidx = 0;
1.151 cegger 1779: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1.158 msaitoh 1780: /* 5700 b2 errata */
1781: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 1782: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1.25 jonathan 1783:
1.158 msaitoh 1784: /* NIC-memory send ring not used; initialize to zero. */
1.151 cegger 1785: bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1.158 msaitoh 1786: /* 5700 b2 errata */
1787: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 1788: bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1.1 fvdl 1789:
1.258 msaitoh 1790: /* Limit DMA segment size for some chips */
1791: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) &&
1792: (ifp->if_mtu <= ETHERMTU))
1793: maxsegsz = 2048;
1794: else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1795: maxsegsz = 4096;
1796: else
1797: maxsegsz = ETHER_MAX_LEN_JUMBO;
1.317 bouyer 1798:
1.320 bouyer 1799: if (SLIST_FIRST(&sc->txdma_list) != NULL)
1800: goto alloc_done;
1801:
1.246 msaitoh 1802: for (i = 0; i < BGE_TX_RING_CNT; i++) {
1.95 jonathan 1803: if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1.317 bouyer 1804: BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1.1 fvdl 1805: &dmamap))
1.170 msaitoh 1806: return ENOBUFS;
1.1 fvdl 1807: if (dmamap == NULL)
1808: panic("dmamap NULL in bge_init_tx_ring");
1.317 bouyer 1809: if (sc->bge_dma64) {
1810: if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX,
1811: BGE_NTXSEG, maxsegsz, 0,
1812: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1813: &dmamap32)) {
1814: bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1815: return ENOBUFS;
1816: }
1817: if (dmamap32 == NULL)
1818: panic("dmamap32 NULL in bge_init_tx_ring");
1819: } else
1820: dmamap32 = dmamap;
1.366 skrll 1821: dma = kmem_alloc(sizeof(*dma), KM_NOSLEEP);
1.1 fvdl 1822: if (dma == NULL) {
1.138 joerg 1823: aprint_error_dev(sc->bge_dev,
1824: "can't alloc txdmamap_pool_entry\n");
1.1 fvdl 1825: bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1.317 bouyer 1826: if (sc->bge_dma64)
1827: bus_dmamap_destroy(sc->bge_dmatag32, dmamap32);
1.170 msaitoh 1828: return ENOMEM;
1.1 fvdl 1829: }
1830: dma->dmamap = dmamap;
1.317 bouyer 1831: dma->dmamap32 = dmamap32;
1.1 fvdl 1832: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1833: }
1.320 bouyer 1834: alloc_done:
1.261 msaitoh 1835: sc->bge_flags |= BGEF_TXRING_VALID;
1.1 fvdl 1836:
1.170 msaitoh 1837: return 0;
1.1 fvdl 1838: }
1839:
1.104 thorpej 1840: static void
1841: bge_setmulti(struct bge_softc *sc)
1.1 fvdl 1842: {
1.354 skrll 1843: struct ethercom * const ec = &sc->ethercom;
1.1 fvdl 1844: struct ether_multi *enm;
1.330 msaitoh 1845: struct ether_multistep step;
1.170 msaitoh 1846: uint32_t hashes[4] = { 0, 0, 0, 0 };
1847: uint32_t h;
1.1 fvdl 1848: int i;
1849:
1.375 skrll 1850: KASSERT(mutex_owned(sc->sc_core_lock));
1851: if (sc->bge_if_flags & IFF_PROMISC)
1.13 thorpej 1852: goto allmulti;
1.1 fvdl 1853:
1854: /* Now program new ones. */
1.333 msaitoh 1855: ETHER_LOCK(ec);
1.332 msaitoh 1856: ETHER_FIRST_MULTI(step, ec, enm);
1.1 fvdl 1857: while (enm != NULL) {
1.13 thorpej 1858: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1859: /*
1860: * We must listen to a range of multicast addresses.
1861: * For now, just accept all multicasts, rather than
1862: * trying to set only those filter bits needed to match
1863: * the range. (At this time, the only use of address
1864: * ranges is for IP multicast routing, for which the
1865: * range is big enough to require all bits set.)
1866: */
1.333 msaitoh 1867: ETHER_UNLOCK(ec);
1.13 thorpej 1868: goto allmulti;
1869: }
1870:
1.158 msaitoh 1871: h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1.1 fvdl 1872:
1.158 msaitoh 1873: /* Just want the 7 least-significant bits. */
1874: h &= 0x7f;
1.1 fvdl 1875:
1.336 msaitoh 1876: hashes[(h & 0x60) >> 5] |= 1U << (h & 0x1F);
1.158 msaitoh 1877: ETHER_NEXT_MULTI(step, enm);
1.25 jonathan 1878: }
1.375 skrll 1879: ec->ec_flags &= ~ETHER_F_ALLMULTI;
1.333 msaitoh 1880: ETHER_UNLOCK(ec);
1.25 jonathan 1881:
1.158 msaitoh 1882: goto setit;
1.1 fvdl 1883:
1.158 msaitoh 1884: allmulti:
1.375 skrll 1885: ETHER_LOCK(ec);
1886: ec->ec_flags |= ETHER_F_ALLMULTI;
1887: ETHER_UNLOCK(ec);
1.158 msaitoh 1888: hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1.133 markd 1889:
1.158 msaitoh 1890: setit:
1891: for (i = 0; i < 4; i++)
1892: CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1893: }
1.133 markd 1894:
1.177 msaitoh 1895: static void
1.178 msaitoh 1896: bge_sig_pre_reset(struct bge_softc *sc, int type)
1.177 msaitoh 1897: {
1.208 msaitoh 1898:
1.177 msaitoh 1899: /*
1900: * Some chips don't like this so only do this if ASF is enabled
1901: */
1902: if (sc->bge_asf_mode)
1.216 msaitoh 1903: bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1.1 fvdl 1904:
1.177 msaitoh 1905: if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1906: switch (type) {
1907: case BGE_RESET_START:
1.216 msaitoh 1908: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1909: BGE_FW_DRV_STATE_START);
1910: break;
1911: case BGE_RESET_SHUTDOWN:
1912: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1913: BGE_FW_DRV_STATE_UNLOAD);
1.177 msaitoh 1914: break;
1.216 msaitoh 1915: case BGE_RESET_SUSPEND:
1916: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1917: BGE_FW_DRV_STATE_SUSPEND);
1.177 msaitoh 1918: break;
1919: }
1920: }
1.216 msaitoh 1921:
1922: if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1923: bge_ape_driver_state_change(sc, type);
1.177 msaitoh 1924: }
1925:
1926: static void
1.178 msaitoh 1927: bge_sig_post_reset(struct bge_softc *sc, int type)
1.177 msaitoh 1928: {
1.178 msaitoh 1929:
1.177 msaitoh 1930: if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1931: switch (type) {
1932: case BGE_RESET_START:
1.216 msaitoh 1933: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1934: BGE_FW_DRV_STATE_START_DONE);
1.177 msaitoh 1935: /* START DONE */
1936: break;
1.216 msaitoh 1937: case BGE_RESET_SHUTDOWN:
1938: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1939: BGE_FW_DRV_STATE_UNLOAD_DONE);
1.177 msaitoh 1940: break;
1941: }
1942: }
1.216 msaitoh 1943:
1944: if (type == BGE_RESET_SHUTDOWN)
1945: bge_ape_driver_state_change(sc, type);
1.177 msaitoh 1946: }
1947:
1948: static void
1.178 msaitoh 1949: bge_sig_legacy(struct bge_softc *sc, int type)
1.177 msaitoh 1950: {
1.178 msaitoh 1951:
1.177 msaitoh 1952: if (sc->bge_asf_mode) {
1953: switch (type) {
1954: case BGE_RESET_START:
1.216 msaitoh 1955: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1956: BGE_FW_DRV_STATE_START);
1.177 msaitoh 1957: break;
1.216 msaitoh 1958: case BGE_RESET_SHUTDOWN:
1959: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1960: BGE_FW_DRV_STATE_UNLOAD);
1.177 msaitoh 1961: break;
1962: }
1963: }
1964: }
1965:
1966: static void
1.216 msaitoh 1967: bge_wait_for_event_ack(struct bge_softc *sc)
1968: {
1969: int i;
1970:
1971: /* wait up to 2500usec */
1972: for (i = 0; i < 250; i++) {
1973: if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1974: BGE_RX_CPU_DRV_EVENT))
1975: break;
1976: DELAY(10);
1977: }
1978: }
1979:
1980: static void
1.178 msaitoh 1981: bge_stop_fw(struct bge_softc *sc)
1.177 msaitoh 1982: {
1.1 fvdl 1983:
1.177 msaitoh 1984: if (sc->bge_asf_mode) {
1.216 msaitoh 1985: bge_wait_for_event_ack(sc);
1986:
1987: bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1988: CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
1989: CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1.177 msaitoh 1990:
1.216 msaitoh 1991: bge_wait_for_event_ack(sc);
1.177 msaitoh 1992: }
1993: }
1.1 fvdl 1994:
1.180 msaitoh 1995: static int
1996: bge_poll_fw(struct bge_softc *sc)
1997: {
1998: uint32_t val;
1999: int i;
2000:
2001: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2002: for (i = 0; i < BGE_TIMEOUT; i++) {
2003: val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2004: if (val & BGE_VCPU_STATUS_INIT_DONE)
2005: break;
2006: DELAY(100);
2007: }
2008: if (i >= BGE_TIMEOUT) {
2009: aprint_error_dev(sc->bge_dev, "reset timed out\n");
2010: return -1;
2011: }
1.274 msaitoh 2012: } else {
1.180 msaitoh 2013: /*
2014: * Poll the value location we just wrote until
2015: * we see the 1's complement of the magic number.
2016: * This indicates that the firmware initialization
2017: * is complete.
2018: * XXX 1000ms for Flash and 10000ms for SEEPROM.
2019: */
2020: for (i = 0; i < BGE_TIMEOUT; i++) {
1.216 msaitoh 2021: val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
2022: if (val == ~BGE_SRAM_FW_MB_MAGIC)
1.180 msaitoh 2023: break;
2024: DELAY(10);
2025: }
2026:
1.274 msaitoh 2027: if ((i >= BGE_TIMEOUT)
2028: && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) {
1.180 msaitoh 2029: aprint_error_dev(sc->bge_dev,
2030: "firmware handshake timed out, val = %x\n", val);
2031: return -1;
2032: }
2033: }
2034:
1.214 msaitoh 2035: if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
2036: /* tg3 says we have to wait extra time */
2037: delay(10 * 1000);
2038: }
2039:
1.180 msaitoh 2040: return 0;
2041: }
2042:
1.216 msaitoh 2043: int
2044: bge_phy_addr(struct bge_softc *sc)
2045: {
2046: struct pci_attach_args *pa = &(sc->bge_pa);
2047: int phy_addr = 1;
2048:
2049: /*
2050: * PHY address mapping for various devices.
2051: *
1.330 msaitoh 2052: * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
1.216 msaitoh 2053: * ---------+-------+-------+-------+-------+
1.330 msaitoh 2054: * BCM57XX | 1 | X | X | X |
2055: * BCM5704 | 1 | X | 1 | X |
2056: * BCM5717 | 1 | 8 | 2 | 9 |
2057: * BCM5719 | 1 | 8 | 2 | 9 |
2058: * BCM5720 | 1 | 8 | 2 | 9 |
1.216 msaitoh 2059: *
1.330 msaitoh 2060: * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
1.216 msaitoh 2061: * ---------+-------+-------+-------+-------+
1.330 msaitoh 2062: * BCM57XX | X | X | X | X |
2063: * BCM5704 | X | X | X | X |
2064: * BCM5717 | X | X | X | X |
2065: * BCM5719 | 3 | 10 | 4 | 11 |
2066: * BCM5720 | X | X | X | X |
1.216 msaitoh 2067: *
2068: * Other addresses may respond but they are not
2069: * IEEE compliant PHYs and should be ignored.
2070: */
2071: switch (BGE_ASICREV(sc->bge_chipid)) {
2072: case BGE_ASICREV_BCM5717:
2073: case BGE_ASICREV_BCM5719:
2074: case BGE_ASICREV_BCM5720:
2075: phy_addr = pa->pa_function;
1.234 msaitoh 2076: if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
1.216 msaitoh 2077: phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
2078: BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
2079: } else {
2080: phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2081: BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
2082: }
2083: }
2084:
2085: return phy_addr;
2086: }
2087:
1.158 msaitoh 2088: /*
2089: * Do endian, PCI and DMA initialization. Also check the on-board ROM
2090: * self-test results.
2091: */
2092: static int
2093: bge_chipinit(struct bge_softc *sc)
2094: {
1.288 msaitoh 2095: uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg;
1.178 msaitoh 2096: int i;
1.1 fvdl 2097:
1.158 msaitoh 2098: /* Set endianness before we access any non-PCI registers. */
1.288 msaitoh 2099: misc_ctl = BGE_INIT;
2100: if (sc->bge_flags & BGEF_TAGGED_STATUS)
2101: misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1.158 msaitoh 2102: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1.288 msaitoh 2103: misc_ctl);
1.1 fvdl 2104:
1.158 msaitoh 2105: /*
2106: * Clear the MAC statistics block in the NIC's
2107: * internal memory.
2108: */
2109: for (i = BGE_STATS_BLOCK;
1.170 msaitoh 2110: i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1.158 msaitoh 2111: BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1.1 fvdl 2112:
1.158 msaitoh 2113: for (i = BGE_STATUS_BLOCK;
1.170 msaitoh 2114: i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1.158 msaitoh 2115: BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1.1 fvdl 2116:
1.214 msaitoh 2117: /* 5717 workaround from tg3 */
2118: if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2119: /* Save */
2120: mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2121:
2122: /* Temporary modify MODE_CTL to control TLP */
2123: reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2124: CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1);
2125:
2126: /* Control TLP */
2127: reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2128: BGE_TLP_PHYCTL1);
2129: CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1,
2130: reg | BGE_TLP_PHYCTL1_EN_L1PLLPD);
2131:
2132: /* Restore */
2133: CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2134: }
1.330 msaitoh 2135:
1.257 msaitoh 2136: if (BGE_IS_57765_FAMILY(sc)) {
1.214 msaitoh 2137: if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
2138: /* Save */
2139: mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2140:
2141: /* Temporary modify MODE_CTL to control TLP */
2142: reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2143: CSR_WRITE_4(sc, BGE_MODE_CTL,
2144: reg | BGE_MODECTL_PCIE_TLPADDR1);
1.330 msaitoh 2145:
1.214 msaitoh 2146: /* Control TLP */
2147: reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2148: BGE_TLP_PHYCTL5);
2149: CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5,
2150: reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ);
2151:
2152: /* Restore */
2153: CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2154: }
2155: if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
1.305 msaitoh 2156: /*
2157: * For the 57766 and non Ax versions of 57765, bootcode
2158: * needs to setup the PCIE Fast Training Sequence (FTS)
2159: * value to prevent transmit hangs.
2160: */
1.214 msaitoh 2161: reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
2162: CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
2163: reg | BGE_CPMU_PADRNG_CTL_RDIV2);
2164:
2165: /* Save */
2166: mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
2167:
2168: /* Temporary modify MODE_CTL to control TLP */
2169: reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
2170: CSR_WRITE_4(sc, BGE_MODE_CTL,
2171: reg | BGE_MODECTL_PCIE_TLPADDR0);
2172:
2173: /* Control TLP */
2174: reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
2175: BGE_TLP_FTSMAX);
2176: reg &= ~BGE_TLP_FTSMAX_MSK;
2177: CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX,
2178: reg | BGE_TLP_FTSMAX_VAL);
2179:
2180: /* Restore */
2181: CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
2182: }
2183:
2184: reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
2185: reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
2186: reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
2187: CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
2188: }
2189:
1.158 msaitoh 2190: /* Set up the PCI DMA control register. */
1.166 msaitoh 2191: dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
1.261 msaitoh 2192: if (sc->bge_flags & BGEF_PCIE) {
1.166 msaitoh 2193: /* Read watermark not used, 128 bytes for write. */
1.158 msaitoh 2194: DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
2195: device_xname(sc->bge_dev)));
1.253 msaitoh 2196: if (sc->bge_mps >= 256)
2197: dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2198: else
2199: dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1.261 msaitoh 2200: } else if (sc->bge_flags & BGEF_PCIX) {
1.330 msaitoh 2201: DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
1.158 msaitoh 2202: device_xname(sc->bge_dev)));
2203: /* PCI-X bus */
1.172 msaitoh 2204: if (BGE_IS_5714_FAMILY(sc)) {
2205: /* 256 bytes for read and write. */
1.204 msaitoh 2206: dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
2207: BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1.172 msaitoh 2208:
2209: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
2210: dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2211: else
2212: dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1.276 msaitoh 2213: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
2214: /*
2215: * In the BCM5703, the DMA read watermark should
2216: * be set to less than or equal to the maximum
2217: * memory read byte count of the PCI-X command
2218: * register.
2219: */
2220: dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
2221: BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1.172 msaitoh 2222: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2223: /* 1536 bytes for read, 384 bytes for write. */
1.204 msaitoh 2224: dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2225: BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1.172 msaitoh 2226: } else {
2227: /* 384 bytes for read and write. */
1.204 msaitoh 2228: dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
2229: BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1.172 msaitoh 2230: (0x0F);
2231: }
2232:
2233: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2234: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2235: uint32_t tmp;
2236:
2237: /* Set ONEDMA_ATONCE for hardware workaround. */
1.226 msaitoh 2238: tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1.172 msaitoh 2239: if (tmp == 6 || tmp == 7)
2240: dma_rw_ctl |=
2241: BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
2242:
2243: /* Set PCI-X DMA write workaround. */
2244: dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1.158 msaitoh 2245: }
2246: } else {
1.172 msaitoh 2247: /* Conventional PCI bus: 256 bytes for read and write. */
1.330 msaitoh 2248: DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
1.158 msaitoh 2249: device_xname(sc->bge_dev)));
1.204 msaitoh 2250: dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
2251: BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
2252:
1.160 msaitoh 2253: if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
2254: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1.158 msaitoh 2255: dma_rw_ctl |= 0x0F;
2256: }
1.157 msaitoh 2257:
1.161 msaitoh 2258: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2259: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
2260: dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
2261: BGE_PCIDMARWCTL_ASRT_ALL_BE;
1.178 msaitoh 2262:
1.161 msaitoh 2263: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
2264: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2265: dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
2266:
1.257 msaitoh 2267: if (BGE_IS_57765_PLUS(sc)) {
1.214 msaitoh 2268: dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
2269: if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
2270: dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
2271:
2272: /*
2273: * Enable HW workaround for controllers that misinterpret
2274: * a status tag update and leave interrupts permanently
2275: * disabled.
2276: */
1.257 msaitoh 2277: if (!BGE_IS_57765_FAMILY(sc) &&
1.327 msaitoh 2278: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2279: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
1.214 msaitoh 2280: dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
2281: }
2282:
1.177 msaitoh 2283: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
2284: dma_rw_ctl);
1.120 tsutsui 2285:
1.158 msaitoh 2286: /*
2287: * Set up general mode register.
2288: */
1.216 msaitoh 2289: mode_ctl = BGE_DMA_SWAP_OPTIONS;
1.327 msaitoh 2290: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2291: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1.216 msaitoh 2292: /* Retain Host-2-BMC settings written by APE firmware. */
2293: mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
2294: (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
2295: BGE_MODECTL_WORDSWAP_B2HRX_DATA |
2296: BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
2297: }
2298: mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
2299: BGE_MODECTL_TX_NO_PHDR_CSUM;
1.16 thorpej 2300:
1.158 msaitoh 2301: /*
1.172 msaitoh 2302: * BCM5701 B5 have a bug causing data corruption when using
2303: * 64-bit DMA reads, which can be terminated early and then
2304: * completed later as 32-bit accesses, in combination with
2305: * certain bridges.
2306: */
2307: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2308: sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1.216 msaitoh 2309: mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1.172 msaitoh 2310:
2311: /*
1.177 msaitoh 2312: * Tell the firmware the driver is running
2313: */
2314: if (sc->bge_asf_mode & ASF_STACKUP)
1.216 msaitoh 2315: mode_ctl |= BGE_MODECTL_STACKUP;
2316:
2317: CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1.177 msaitoh 2318:
2319: /*
1.158 msaitoh 2320: * Disable memory write invalidate. Apparently it is not supported
2321: * properly by these devices.
2322: */
1.172 msaitoh 2323: PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
2324: PCI_COMMAND_INVALIDATE_ENABLE);
1.16 thorpej 2325:
1.158 msaitoh 2326: #ifdef __brokenalpha__
2327: /*
2328: * Must insure that we do not cross an 8K (bytes) boundary
2329: * for DMA reads. Our highest limit is 1K bytes. This is a
2330: * restriction on some ALPHA platforms with early revision
2331: * 21174 PCI chipsets, such as the AlphaPC 164lx
2332: */
2333: PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
2334: #endif
1.16 thorpej 2335:
1.158 msaitoh 2336: /* Set the timer prescaler (always 66MHz) */
1.341 msaitoh 2337: CSR_WRITE_4_FLUSH(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1.16 thorpej 2338:
1.159 msaitoh 2339: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2340: DELAY(40); /* XXX */
2341:
2342: /* Put PHY into ready state */
1.211 msaitoh 2343: BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1.159 msaitoh 2344: DELAY(40);
2345: }
2346:
1.170 msaitoh 2347: return 0;
1.158 msaitoh 2348: }
1.16 thorpej 2349:
1.158 msaitoh 2350: static int
2351: bge_blockinit(struct bge_softc *sc)
2352: {
1.177 msaitoh 2353: volatile struct bge_rcb *rcb;
2354: bus_size_t rcb_addr;
1.354 skrll 2355: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.177 msaitoh 2356: bge_hostaddr taddr;
1.327 msaitoh 2357: uint32_t dmactl, rdmareg, mimode, val;
1.222 msaitoh 2358: int i, limit;
1.16 thorpej 2359:
1.158 msaitoh 2360: /*
2361: * Initialize the memory window pointer register so that
2362: * we can access the first 32K of internal NIC RAM. This will
2363: * allow us to set up the TX send ring RCBs and the RX return
2364: * ring RCBs, plus other things which live in NIC memory.
2365: */
2366: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1.120 tsutsui 2367:
1.216 msaitoh 2368: if (!BGE_IS_5705_PLUS(sc)) {
1.236 msaitoh 2369: /* 57XX step 33 */
2370: /* Configure mbuf memory pool */
1.332 msaitoh 2371: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1.172 msaitoh 2372:
2373: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
2374: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
2375: else
2376: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1.40 fvdl 2377:
1.236 msaitoh 2378: /* 57XX step 34 */
1.158 msaitoh 2379: /* Configure DMA resource pool */
2380: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
2381: BGE_DMA_DESCRIPTORS);
2382: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
2383: }
1.40 fvdl 2384:
1.236 msaitoh 2385: /* 5718 step 11, 57XX step 35 */
2386: /*
2387: * Configure mbuf pool watermarks. New broadcom docs strongly
2388: * recommend these.
2389: */
1.216 msaitoh 2390: if (BGE_IS_5717_PLUS(sc)) {
1.202 tsutsui 2391: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1.316 bouyer 2392: if (ifp->if_mtu > ETHERMTU) {
2393: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
2394: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
2395: } else {
2396: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
2397: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
2398: }
1.202 tsutsui 2399: } else if (BGE_IS_5705_PLUS(sc)) {
2400: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2401:
2402: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2403: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
2404: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
2405: } else {
2406: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2407: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2408: }
1.158 msaitoh 2409: } else {
1.218 msaitoh 2410: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2411: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1.158 msaitoh 2412: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2413: }
1.25 jonathan 2414:
1.236 msaitoh 2415: /* 57XX step 36 */
2416: /* Configure DMA resource watermarks */
1.158 msaitoh 2417: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2418: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1.51 fvdl 2419:
1.236 msaitoh 2420: /* 5718 step 13, 57XX step 38 */
2421: /* Enable buffer manager */
1.216 msaitoh 2422: val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
2423: /*
2424: * Change the arbitration algorithm of TXMBUF read request to
2425: * round-robin instead of priority based for BCM5719. When
2426: * TXFIFO is almost empty, RDMA will hold its request until
2427: * TXFIFO is not almost empty.
2428: */
2429: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2430: val |= BGE_BMANMODE_NO_TX_UNDERRUN;
2431: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2432: sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2433: sc->bge_chipid == BGE_CHIPID_BCM5720_A0)
2434: val |= BGE_BMANMODE_LOMBUF_ATTN;
2435: CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1.44 hannken 2436:
1.236 msaitoh 2437: /* 57XX step 39 */
2438: /* Poll for buffer manager start indication */
1.172 msaitoh 2439: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1.216 msaitoh 2440: DELAY(10);
1.172 msaitoh 2441: if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2442: break;
2443: }
1.51 fvdl 2444:
1.172 msaitoh 2445: if (i == BGE_TIMEOUT * 2) {
2446: aprint_error_dev(sc->bge_dev,
2447: "buffer manager failed to start\n");
2448: return ENXIO;
1.158 msaitoh 2449: }
1.51 fvdl 2450:
1.236 msaitoh 2451: /* 57XX step 40 */
2452: /* Enable flow-through queues */
1.158 msaitoh 2453: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2454: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1.76 cube 2455:
1.158 msaitoh 2456: /* Wait until queue initialization is complete */
1.172 msaitoh 2457: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1.158 msaitoh 2458: if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2459: break;
2460: DELAY(10);
2461: }
1.76 cube 2462:
1.172 msaitoh 2463: if (i == BGE_TIMEOUT * 2) {
1.158 msaitoh 2464: aprint_error_dev(sc->bge_dev,
2465: "flow-through queue init failed\n");
1.170 msaitoh 2466: return ENXIO;
1.158 msaitoh 2467: }
1.92 gavan 2468:
1.222 msaitoh 2469: /*
2470: * Summary of rings supported by the controller:
2471: *
2472: * Standard Receive Producer Ring
2473: * - This ring is used to feed receive buffers for "standard"
2474: * sized frames (typically 1536 bytes) to the controller.
2475: *
2476: * Jumbo Receive Producer Ring
2477: * - This ring is used to feed receive buffers for jumbo sized
2478: * frames (i.e. anything bigger than the "standard" frames)
2479: * to the controller.
2480: *
2481: * Mini Receive Producer Ring
2482: * - This ring is used to feed receive buffers for "mini"
2483: * sized frames to the controller.
2484: * - This feature required external memory for the controller
2485: * but was never used in a production system. Should always
2486: * be disabled.
2487: *
2488: * Receive Return Ring
2489: * - After the controller has placed an incoming frame into a
2490: * receive buffer that buffer is moved into a receive return
2491: * ring. The driver is then responsible to passing the
2492: * buffer up to the stack. Many versions of the controller
2493: * support multiple RR rings.
2494: *
2495: * Send Ring
2496: * - This ring is used for outgoing frames. Many versions of
2497: * the controller support multiple send rings.
2498: */
2499:
1.236 msaitoh 2500: /* 5718 step 15, 57XX step 41 */
2501: /* Initialize the standard RX ring control block */
1.158 msaitoh 2502: rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1.172 msaitoh 2503: BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1.236 msaitoh 2504: /* 5718 step 16 */
1.257 msaitoh 2505: if (BGE_IS_57765_PLUS(sc)) {
1.222 msaitoh 2506: /*
2507: * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2508: * Bits 15-2 : Maximum RX frame size
1.309 snj 2509: * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1.222 msaitoh 2510: * Bit 0 : Reserved
2511: */
1.202 tsutsui 2512: rcb->bge_maxlen_flags =
2513: BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1.222 msaitoh 2514: } else if (BGE_IS_5705_PLUS(sc)) {
2515: /*
2516: * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2517: * Bits 15-2 : Reserved (should be 0)
2518: * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2519: * Bit 0 : Reserved
2520: */
1.158 msaitoh 2521: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1.222 msaitoh 2522: } else {
2523: /*
2524: * Ring size is always XXX entries
2525: * Bits 31-16: Maximum RX frame size
2526: * Bits 15-2 : Reserved (should be 0)
2527: * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2528: * Bit 0 : Reserved
2529: */
1.158 msaitoh 2530: rcb->bge_maxlen_flags =
2531: BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1.222 msaitoh 2532: }
1.216 msaitoh 2533: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2534: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2535: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2536: rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2537: else
2538: rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1.222 msaitoh 2539: /* Write the standard receive producer ring control block. */
1.158 msaitoh 2540: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2541: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2542: CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2543: CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1.119 tsutsui 2544:
1.222 msaitoh 2545: /* Reset the standard receive producer ring producer index. */
2546: bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2547:
1.236 msaitoh 2548: /* 57XX step 42 */
1.158 msaitoh 2549: /*
1.236 msaitoh 2550: * Initialize the jumbo RX ring control block
1.158 msaitoh 2551: * We set the 'ring disabled' bit in the flags
2552: * field until we're actually ready to start
2553: * using this ring (i.e. once we set the MTU
2554: * high enough to require it).
2555: */
1.166 msaitoh 2556: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1.158 msaitoh 2557: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1.172 msaitoh 2558: BGE_HOSTADDR(rcb->bge_hostaddr,
1.158 msaitoh 2559: BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1.222 msaitoh 2560: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2561: BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1.216 msaitoh 2562: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2563: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2564: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2565: rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2566: else
2567: rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1.158 msaitoh 2568: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2569: rcb->bge_hostaddr.bge_addr_hi);
2570: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2571: rcb->bge_hostaddr.bge_addr_lo);
1.222 msaitoh 2572: /* Program the jumbo receive producer ring RCB parameters. */
1.158 msaitoh 2573: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2574: rcb->bge_maxlen_flags);
2575: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1.216 msaitoh 2576: /* Reset the jumbo receive producer ring producer index. */
2577: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2578: }
1.149 sborrill 2579:
1.236 msaitoh 2580: /* 57XX step 43 */
1.216 msaitoh 2581: /* Disable the mini receive producer ring RCB. */
2582: if (BGE_IS_5700_FAMILY(sc)) {
1.158 msaitoh 2583: /* Set up dummy disabled mini ring RCB */
2584: rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1.222 msaitoh 2585: rcb->bge_maxlen_flags =
2586: BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1.158 msaitoh 2587: CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2588: rcb->bge_maxlen_flags);
1.216 msaitoh 2589: /* Reset the mini receive producer ring producer index. */
2590: bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1.133 markd 2591:
1.158 msaitoh 2592: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2593: offsetof(struct bge_ring_data, bge_info),
1.364 skrll 2594: sizeof(struct bge_gib),
1.331 msaitoh 2595: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1.158 msaitoh 2596: }
1.133 markd 2597:
1.206 msaitoh 2598: /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2599: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2600: if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2601: sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2602: sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2603: CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2604: (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2605: }
1.236 msaitoh 2606: /* 5718 step 14, 57XX step 44 */
1.158 msaitoh 2607: /*
1.222 msaitoh 2608: * The BD ring replenish thresholds control how often the
2609: * hardware fetches new BD's from the producer rings in host
2610: * memory. Setting the value too low on a busy system can
2611: * starve the hardware and recue the throughpout.
2612: *
1.158 msaitoh 2613: * Set the BD ring replenish thresholds. The recommended
2614: * values are 1/8th the number of descriptors allocated to
1.222 msaitoh 2615: * each ring, but since we try to avoid filling the entire
2616: * ring we set these to the minimal value of 8. This needs to
2617: * be done on several of the supported chip revisions anyway,
2618: * to work around HW bugs.
1.158 msaitoh 2619: */
1.222 msaitoh 2620: CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2621: if (BGE_IS_JUMBO_CAPABLE(sc))
2622: CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
1.157 msaitoh 2623:
1.236 msaitoh 2624: /* 5718 step 18 */
1.216 msaitoh 2625: if (BGE_IS_5717_PLUS(sc)) {
1.172 msaitoh 2626: CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2627: CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2628: }
2629:
1.236 msaitoh 2630: /* 57XX step 45 */
1.158 msaitoh 2631: /*
1.222 msaitoh 2632: * Disable all send rings by setting the 'ring disabled' bit
2633: * in the flags field of all the TX send ring control blocks,
2634: * located in NIC memory.
1.158 msaitoh 2635: */
1.222 msaitoh 2636: if (BGE_IS_5700_FAMILY(sc)) {
2637: /* 5700 to 5704 had 16 send rings. */
2638: limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1.258 msaitoh 2639: } else if (BGE_IS_5717_PLUS(sc)) {
2640: limit = BGE_TX_RINGS_5717_MAX;
1.327 msaitoh 2641: } else if (BGE_IS_57765_FAMILY(sc) ||
2642: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1.258 msaitoh 2643: limit = BGE_TX_RINGS_57765_MAX;
1.222 msaitoh 2644: } else
2645: limit = 1;
1.158 msaitoh 2646: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1.222 msaitoh 2647: for (i = 0; i < limit; i++) {
1.158 msaitoh 2648: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2649: BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2650: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2651: rcb_addr += sizeof(struct bge_rcb);
2652: }
1.157 msaitoh 2653:
1.236 msaitoh 2654: /* 57XX step 46 and 47 */
1.222 msaitoh 2655: /* Configure send ring RCB 0 (we use only the first ring) */
1.158 msaitoh 2656: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1.172 msaitoh 2657: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1.158 msaitoh 2658: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2659: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1.216 msaitoh 2660: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2661: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2662: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2663: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2664: else
2665: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1.158 msaitoh 2666: BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1.222 msaitoh 2667: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2668: BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1.157 msaitoh 2669:
1.236 msaitoh 2670: /* 57XX step 48 */
1.222 msaitoh 2671: /*
2672: * Disable all receive return rings by setting the
2673: * 'ring diabled' bit in the flags field of all the receive
2674: * return ring control blocks, located in NIC memory.
2675: */
1.257 msaitoh 2676: if (BGE_IS_5717_PLUS(sc)) {
1.222 msaitoh 2677: /* Should be 17, use 16 until we get an SRAM map. */
2678: limit = 16;
2679: } else if (BGE_IS_5700_FAMILY(sc))
2680: limit = BGE_RX_RINGS_MAX;
2681: else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1.327 msaitoh 2682: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
1.257 msaitoh 2683: BGE_IS_57765_FAMILY(sc))
1.222 msaitoh 2684: limit = 4;
2685: else
2686: limit = 1;
2687: /* Disable all receive return rings */
1.158 msaitoh 2688: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1.222 msaitoh 2689: for (i = 0; i < limit; i++) {
1.158 msaitoh 2690: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2691: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2692: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1.172 msaitoh 2693: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2694: BGE_RCB_FLAG_RING_DISABLED));
1.158 msaitoh 2695: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2696: bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1.170 msaitoh 2697: (i * (sizeof(uint64_t))), 0);
1.158 msaitoh 2698: rcb_addr += sizeof(struct bge_rcb);
2699: }
1.157 msaitoh 2700:
1.236 msaitoh 2701: /* 57XX step 49 */
1.158 msaitoh 2702: /*
1.222 msaitoh 2703: * Set up receive return ring 0. Note that the NIC address
2704: * for RX return rings is 0x0. The return rings live entirely
2705: * within the host, so the nicaddr field in the RCB isn't used.
1.158 msaitoh 2706: */
2707: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1.172 msaitoh 2708: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1.158 msaitoh 2709: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2710: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2711: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2712: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2713: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1.157 msaitoh 2714:
1.236 msaitoh 2715: /* 5718 step 24, 57XX step 53 */
1.158 msaitoh 2716: /* Set random backoff seed for TX */
2717: CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1.235 msaitoh 2718: (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
2719: CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
2720: CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
1.158 msaitoh 2721: BGE_TX_BACKOFF_SEED_MASK);
1.157 msaitoh 2722:
1.236 msaitoh 2723: /* 5718 step 26, 57XX step 55 */
1.158 msaitoh 2724: /* Set inter-packet gap */
1.216 msaitoh 2725: val = 0x2620;
1.327 msaitoh 2726: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2727: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
1.216 msaitoh 2728: val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2729: (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2730: CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1.51 fvdl 2731:
1.236 msaitoh 2732: /* 5718 step 27, 57XX step 56 */
1.158 msaitoh 2733: /*
2734: * Specify which ring to use for packets that don't match
2735: * any RX rules.
2736: */
2737: CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1.157 msaitoh 2738:
1.236 msaitoh 2739: /* 5718 step 28, 57XX step 57 */
1.158 msaitoh 2740: /*
2741: * Configure number of RX lists. One interrupt distribution
2742: * list, sixteen active lists, one bad frames class.
2743: */
2744: CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1.157 msaitoh 2745:
1.236 msaitoh 2746: /* 5718 step 29, 57XX step 58 */
1.158 msaitoh 2747: /* Inialize RX list placement stats mask. */
1.244 msaitoh 2748: if (BGE_IS_575X_PLUS(sc)) {
2749: val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK);
2750: val &= ~BGE_RXLPSTATCONTROL_DACK_FIX;
2751: CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val);
2752: } else
2753: CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2754:
1.236 msaitoh 2755: /* 5718 step 30, 57XX step 59 */
1.158 msaitoh 2756: CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1.157 msaitoh 2757:
1.236 msaitoh 2758: /* 5718 step 33, 57XX step 62 */
1.158 msaitoh 2759: /* Disable host coalescing until we get it set up */
2760: CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1.51 fvdl 2761:
1.236 msaitoh 2762: /* 5718 step 34, 57XX step 63 */
1.158 msaitoh 2763: /* Poll to make sure it's shut down. */
1.172 msaitoh 2764: for (i = 0; i < BGE_TIMEOUT * 2; i++) {
1.216 msaitoh 2765: DELAY(10);
1.158 msaitoh 2766: if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2767: break;
2768: }
1.151 cegger 2769:
1.172 msaitoh 2770: if (i == BGE_TIMEOUT * 2) {
1.158 msaitoh 2771: aprint_error_dev(sc->bge_dev,
2772: "host coalescing engine failed to idle\n");
1.170 msaitoh 2773: return ENXIO;
1.158 msaitoh 2774: }
1.51 fvdl 2775:
1.236 msaitoh 2776: /* 5718 step 35, 36, 37 */
1.158 msaitoh 2777: /* Set up host coalescing defaults */
2778: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2779: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2780: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2781: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1.216 msaitoh 2782: if (!(BGE_IS_5705_PLUS(sc))) {
1.158 msaitoh 2783: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2784: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1.51 fvdl 2785: }
1.158 msaitoh 2786: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2787: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1.51 fvdl 2788:
1.158 msaitoh 2789: /* Set up address of statistics block */
1.172 msaitoh 2790: if (BGE_IS_5700_FAMILY(sc)) {
2791: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1.158 msaitoh 2792: CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2793: CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2794: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2795: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1.16 thorpej 2796: }
2797:
1.236 msaitoh 2798: /* 5718 step 38 */
1.158 msaitoh 2799: /* Set up address of status block */
1.172 msaitoh 2800: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1.158 msaitoh 2801: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2802: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2803: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2804: sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2805: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1.16 thorpej 2806:
1.216 msaitoh 2807: /* Set up status block size. */
2808: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2809: sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2810: val = BGE_STATBLKSZ_FULL;
2811: bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2812: } else {
2813: val = BGE_STATBLKSZ_32BYTE;
2814: bzero(&sc->bge_rdata->bge_status_block, 32);
2815: }
2816:
1.236 msaitoh 2817: /* 5718 step 39, 57XX step 73 */
1.158 msaitoh 2818: /* Turn on host coalescing state machine */
1.216 msaitoh 2819: CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1.7 thorpej 2820:
1.236 msaitoh 2821: /* 5718 step 40, 57XX step 74 */
1.158 msaitoh 2822: /* Turn on RX BD completion state machine and enable attentions */
2823: CSR_WRITE_4(sc, BGE_RBDC_MODE,
1.161 msaitoh 2824: BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1.7 thorpej 2825:
1.236 msaitoh 2826: /* 5718 step 41, 57XX step 75 */
1.158 msaitoh 2827: /* Turn on RX list placement state machine */
2828: CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1.51 fvdl 2829:
1.236 msaitoh 2830: /* 57XX step 76 */
1.158 msaitoh 2831: /* Turn on RX list selector state machine. */
1.216 msaitoh 2832: if (!(BGE_IS_5705_PLUS(sc)))
1.158 msaitoh 2833: CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1.51 fvdl 2834:
1.161 msaitoh 2835: val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2836: BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2837: BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2838: BGE_MACMODE_FRMHDR_DMA_ENB;
2839:
1.261 msaitoh 2840: if (sc->bge_flags & BGEF_FIBER_TBI)
1.177 msaitoh 2841: val |= BGE_PORTMODE_TBI;
1.261 msaitoh 2842: else if (sc->bge_flags & BGEF_FIBER_MII)
1.177 msaitoh 2843: val |= BGE_PORTMODE_GMII;
1.161 msaitoh 2844: else
1.177 msaitoh 2845: val |= BGE_PORTMODE_MII;
1.161 msaitoh 2846:
1.236 msaitoh 2847: /* 5718 step 42 and 43, 57XX step 77 and 78 */
1.216 msaitoh 2848: /* Allow APE to send/receive frames. */
2849: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2850: val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2851:
1.158 msaitoh 2852: /* Turn on DMA, clear stats */
1.211 msaitoh 2853: CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
1.236 msaitoh 2854: /* 5718 step 44 */
1.211 msaitoh 2855: DELAY(40);
1.161 msaitoh 2856:
1.236 msaitoh 2857: /* 5718 step 45, 57XX step 79 */
1.158 msaitoh 2858: /* Set misc. local control, enable interrupts on attentions */
1.251 msaitoh 2859: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1.224 msaitoh 2860: if (BGE_IS_5717_PLUS(sc)) {
2861: CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
1.236 msaitoh 2862: /* 5718 step 46 */
1.224 msaitoh 2863: DELAY(100);
2864: }
1.80 fredb 2865:
1.236 msaitoh 2866: /* 57XX step 81 */
1.158 msaitoh 2867: /* Turn on DMA completion state machine */
1.216 msaitoh 2868: if (!(BGE_IS_5705_PLUS(sc)))
1.158 msaitoh 2869: CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1.149 sborrill 2870:
1.236 msaitoh 2871: /* 5718 step 47, 57XX step 82 */
1.203 msaitoh 2872: val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2873:
1.236 msaitoh 2874: /* 5718 step 48 */
1.216 msaitoh 2875: /* Enable host coalescing bug fix. */
1.203 msaitoh 2876: if (BGE_IS_5755_PLUS(sc))
2877: val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2878:
1.206 msaitoh 2879: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2880: val |= BGE_WDMAMODE_BURST_ALL_DATA;
2881:
1.158 msaitoh 2882: /* Turn on write DMA state machine */
1.213 msaitoh 2883: CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
1.236 msaitoh 2884: /* 5718 step 49 */
1.213 msaitoh 2885: DELAY(40);
1.203 msaitoh 2886:
2887: val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1.216 msaitoh 2888:
2889: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2890: val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2891:
1.203 msaitoh 2892: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2893: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2894: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2895: val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2896: BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2897: BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1.76 cube 2898:
1.261 msaitoh 2899: if (sc->bge_flags & BGEF_PCIE)
1.204 msaitoh 2900: val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1.258 msaitoh 2901: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
2902: if (ifp->if_mtu <= ETHERMTU)
2903: val |= BGE_RDMAMODE_JMB_2K_MMRR;
2904: }
1.316 bouyer 2905: if (sc->bge_flags & BGEF_TSO) {
1.203 msaitoh 2906: val |= BGE_RDMAMODE_TSO4_ENABLE;
1.316 bouyer 2907: if (BGE_IS_5717_PLUS(sc))
2908: val |= BGE_RDMAMODE_TSO6_ENABLE;
2909: }
1.76 cube 2910:
1.327 msaitoh 2911: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2912: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1.216 msaitoh 2913: val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2914: BGE_RDMAMODE_H2BNC_VLAN_DET;
2915: /*
2916: * Allow multiple outstanding read requests from
2917: * non-LSO read DMA engine.
2918: */
2919: val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2920: }
2921:
2922: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2923: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2924: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2925: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
1.257 msaitoh 2926: BGE_IS_57765_PLUS(sc)) {
1.327 msaitoh 2927: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2928: rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2929: else
2930: rdmareg = BGE_RDMA_RSRVCTRL;
2931: dmactl = CSR_READ_4(sc, rdmareg);
1.216 msaitoh 2932: /*
2933: * Adjust tx margin to prevent TX data corruption and
2934: * fix internal FIFO overflow.
2935: */
1.327 msaitoh 2936: if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2937: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1.216 msaitoh 2938: dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2939: BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2940: BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2941: dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2942: BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2943: BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2944: }
2945: /*
2946: * Enable fix for read DMA FIFO overruns.
2947: * The fix is to limit the number of RX BDs
1.349 andvar 2948: * the hardware would fetch at a time.
1.216 msaitoh 2949: */
1.327 msaitoh 2950: CSR_WRITE_4(sc, rdmareg, dmactl |
1.216 msaitoh 2951: BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2952: }
2953:
2954: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2955: CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2956: CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2957: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2958: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2959: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2960: /*
2961: * Allow 4KB burst length reads for non-LSO frames.
2962: * Enable 512B burst length reads for buffer descriptors.
2963: */
2964: CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2965: CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2966: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2967: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1.327 msaitoh 2968: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2969: CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2970: CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2971: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2972: BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
1.216 msaitoh 2973: }
1.158 msaitoh 2974: /* Turn on read DMA state machine */
1.211 msaitoh 2975: CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
1.236 msaitoh 2976: /* 5718 step 52 */
1.203 msaitoh 2977: delay(40);
1.128 tron 2978:
1.327 msaitoh 2979: if (sc->bge_flags & BGEF_RDMA_BUG) {
1.320 bouyer 2980: for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2981: val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2982: if ((val & 0xFFFF) > BGE_FRAMELEN)
2983: break;
2984: if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2985: break;
2986: }
2987: if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2988: val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2989: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2990: val |= BGE_RDMA_TX_LENGTH_WA_5719;
2991: else
2992: val |= BGE_RDMA_TX_LENGTH_WA_5720;
2993: CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2994: }
2995: }
2996:
1.236 msaitoh 2997: /* 5718 step 56, 57XX step 84 */
1.158 msaitoh 2998: /* Turn on RX data completion state machine */
2999: CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1.128 tron 3000:
1.158 msaitoh 3001: /* Turn on RX data and RX BD initiator state machine */
3002: CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1.133 markd 3003:
1.236 msaitoh 3004: /* 57XX step 85 */
1.158 msaitoh 3005: /* Turn on Mbuf cluster free state machine */
1.216 msaitoh 3006: if (!BGE_IS_5705_PLUS(sc))
1.158 msaitoh 3007: CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1.133 markd 3008:
1.236 msaitoh 3009: /* 5718 step 57, 57XX step 86 */
1.158 msaitoh 3010: /* Turn on send data completion state machine */
1.172 msaitoh 3011: val = BGE_SDCMODE_ENABLE;
3012: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
3013: val |= BGE_SDCMODE_CDELAY;
3014: CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1.106 jonathan 3015:
1.236 msaitoh 3016: /* 5718 step 58 */
1.225 msaitoh 3017: /* Turn on send BD completion state machine */
3018: CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3019:
1.236 msaitoh 3020: /* 57XX step 88 */
1.225 msaitoh 3021: /* Turn on RX BD initiator state machine */
3022: CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3023:
1.236 msaitoh 3024: /* 5718 step 60, 57XX step 90 */
1.158 msaitoh 3025: /* Turn on send data initiator state machine */
1.261 msaitoh 3026: if (sc->bge_flags & BGEF_TSO) {
1.158 msaitoh 3027: /* XXX: magic value from Linux driver */
1.222 msaitoh 3028: CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
3029: BGE_SDIMODE_HW_LSO_PRE_DMA);
1.177 msaitoh 3030: } else
1.158 msaitoh 3031: CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1.106 jonathan 3032:
1.236 msaitoh 3033: /* 5718 step 61, 57XX step 91 */
1.158 msaitoh 3034: /* Turn on send BD initiator state machine */
3035: CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1.133 markd 3036:
1.236 msaitoh 3037: /* 5718 step 62, 57XX step 92 */
1.158 msaitoh 3038: /* Turn on send BD selector state machine */
3039: CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1.135 taca 3040:
1.236 msaitoh 3041: /* 5718 step 31, 57XX step 60 */
1.158 msaitoh 3042: CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1.236 msaitoh 3043: /* 5718 step 32, 57XX step 61 */
1.158 msaitoh 3044: CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1.161 msaitoh 3045: BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1.133 markd 3046:
1.158 msaitoh 3047: /* ack/clear link change events */
1.161 msaitoh 3048: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3049: BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1.172 msaitoh 3050: BGE_MACSTAT_LINK_CHANGED);
1.158 msaitoh 3051: CSR_WRITE_4(sc, BGE_MI_STS, 0);
1.106 jonathan 3052:
1.216 msaitoh 3053: /*
3054: * Enable attention when the link has changed state for
3055: * devices that use auto polling.
3056: */
1.261 msaitoh 3057: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.158 msaitoh 3058: CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1.178 msaitoh 3059: } else {
1.272 msaitoh 3060: if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3061: mimode = BGE_MIMODE_500KHZ_CONST;
3062: else
3063: mimode = BGE_MIMODE_BASE;
3064: /* 5718 step 68. 5718 step 69 (optionally). */
3065: if (BGE_IS_5700_FAMILY(sc) ||
3066: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
3067: mimode |= BGE_MIMODE_AUTOPOLL;
3068: BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
3069: }
3070: mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3071: CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
1.158 msaitoh 3072: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
3073: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3074: BGE_EVTENB_MI_INTERRUPT);
3075: }
1.70 tron 3076:
1.161 msaitoh 3077: /*
3078: * Clear any pending link state attention.
3079: * Otherwise some link state change events may be lost until attention
3080: * is cleared by bge_intr() -> bge_link_upd() sequence.
3081: * It's not necessary on newer BCM chips - perhaps enabling link
3082: * state change attentions implies clearing pending attention.
3083: */
3084: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3085: BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3086: BGE_MACSTAT_LINK_CHANGED);
3087:
1.158 msaitoh 3088: /* Enable link state change attentions. */
3089: BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1.51 fvdl 3090:
1.170 msaitoh 3091: return 0;
1.158 msaitoh 3092: }
1.7 thorpej 3093:
1.158 msaitoh 3094: static const struct bge_revision *
3095: bge_lookup_rev(uint32_t chipid)
3096: {
3097: const struct bge_revision *br;
1.7 thorpej 3098:
1.158 msaitoh 3099: for (br = bge_revisions; br->br_name != NULL; br++) {
3100: if (br->br_chipid == chipid)
1.170 msaitoh 3101: return br;
1.158 msaitoh 3102: }
1.151 cegger 3103:
1.158 msaitoh 3104: for (br = bge_majorrevs; br->br_name != NULL; br++) {
3105: if (br->br_chipid == BGE_ASICREV(chipid))
1.170 msaitoh 3106: return br;
1.158 msaitoh 3107: }
1.151 cegger 3108:
1.170 msaitoh 3109: return NULL;
1.158 msaitoh 3110: }
1.7 thorpej 3111:
3112: static const struct bge_product *
3113: bge_lookup(const struct pci_attach_args *pa)
3114: {
3115: const struct bge_product *bp;
3116:
3117: for (bp = bge_products; bp->bp_name != NULL; bp++) {
3118: if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
3119: PCI_PRODUCT(pa->pa_id) == bp->bp_product)
1.170 msaitoh 3120: return bp;
1.7 thorpej 3121: }
3122:
1.170 msaitoh 3123: return NULL;
1.7 thorpej 3124: }
3125:
1.215 msaitoh 3126: static uint32_t
3127: bge_chipid(const struct pci_attach_args *pa)
3128: {
3129: uint32_t id;
3130:
3131: id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
3132: >> BGE_PCIMISCCTL_ASICREV_SHIFT;
3133:
3134: if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
3135: switch (PCI_PRODUCT(pa->pa_id)) {
3136: case PCI_PRODUCT_BROADCOM_BCM5717:
3137: case PCI_PRODUCT_BROADCOM_BCM5718:
1.216 msaitoh 3138: case PCI_PRODUCT_BROADCOM_BCM5719:
3139: case PCI_PRODUCT_BROADCOM_BCM5720:
1.327 msaitoh 3140: case PCI_PRODUCT_BROADCOM_BCM5725:
3141: case PCI_PRODUCT_BROADCOM_BCM5727:
3142: case PCI_PRODUCT_BROADCOM_BCM5762:
3143: case PCI_PRODUCT_BROADCOM_BCM57764:
3144: case PCI_PRODUCT_BROADCOM_BCM57767:
3145: case PCI_PRODUCT_BROADCOM_BCM57787:
1.215 msaitoh 3146: id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3147: BGE_PCI_GEN2_PRODID_ASICREV);
3148: break;
3149: case PCI_PRODUCT_BROADCOM_BCM57761:
3150: case PCI_PRODUCT_BROADCOM_BCM57762:
3151: case PCI_PRODUCT_BROADCOM_BCM57765:
3152: case PCI_PRODUCT_BROADCOM_BCM57766:
3153: case PCI_PRODUCT_BROADCOM_BCM57781:
1.305 msaitoh 3154: case PCI_PRODUCT_BROADCOM_BCM57782:
1.215 msaitoh 3155: case PCI_PRODUCT_BROADCOM_BCM57785:
1.305 msaitoh 3156: case PCI_PRODUCT_BROADCOM_BCM57786:
1.215 msaitoh 3157: case PCI_PRODUCT_BROADCOM_BCM57791:
3158: case PCI_PRODUCT_BROADCOM_BCM57795:
3159: id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3160: BGE_PCI_GEN15_PRODID_ASICREV);
3161: break;
3162: default:
3163: id = pci_conf_read(pa->pa_pc, pa->pa_tag,
3164: BGE_PCI_PRODID_ASICREV);
3165: break;
3166: }
3167: }
3168:
3169: return id;
3170: }
1.25 jonathan 3171:
1.1 fvdl 3172: /*
1.288 msaitoh 3173: * Return true if MSI can be used with this device.
3174: */
3175: static int
3176: bge_can_use_msi(struct bge_softc *sc)
3177: {
3178: int can_use_msi = 0;
3179:
3180: switch (BGE_ASICREV(sc->bge_chipid)) {
3181: case BGE_ASICREV_BCM5714_A0:
3182: case BGE_ASICREV_BCM5714:
3183: /*
3184: * Apparently, MSI doesn't work when these chips are
3185: * configured in single-port mode.
3186: */
3187: break;
3188: case BGE_ASICREV_BCM5750:
3189: if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
3190: BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
3191: can_use_msi = 1;
3192: break;
3193: default:
3194: if (BGE_IS_575X_PLUS(sc))
3195: can_use_msi = 1;
3196: }
1.362 skrll 3197: return can_use_msi;
1.288 msaitoh 3198: }
3199:
3200: /*
1.1 fvdl 3201: * Probe for a Broadcom chip. Check the PCI vendor and device IDs
3202: * against our list and return its name if we find a match. Note
3203: * that since the Broadcom controller contains VPD support, we
3204: * can get the device name string from the controller itself instead
3205: * of the compiled-in string. This is a little slow, but it guarantees
3206: * we'll always announce the right product name.
3207: */
1.104 thorpej 3208: static int
1.116 christos 3209: bge_probe(device_t parent, cfdata_t match, void *aux)
1.1 fvdl 3210: {
3211: struct pci_attach_args *pa = (struct pci_attach_args *)aux;
3212:
1.7 thorpej 3213: if (bge_lookup(pa) != NULL)
1.170 msaitoh 3214: return 1;
1.1 fvdl 3215:
1.170 msaitoh 3216: return 0;
1.1 fvdl 3217: }
3218:
1.104 thorpej 3219: static void
1.116 christos 3220: bge_attach(device_t parent, device_t self, void *aux)
1.1 fvdl 3221: {
1.354 skrll 3222: struct bge_softc * const sc = device_private(self);
3223: struct pci_attach_args * const pa = aux;
1.164 msaitoh 3224: prop_dictionary_t dict;
1.7 thorpej 3225: const struct bge_product *bp;
1.16 thorpej 3226: const struct bge_revision *br;
1.143 tron 3227: pci_chipset_tag_t pc;
1.1 fvdl 3228: const char *intrstr = NULL;
1.330 msaitoh 3229: uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5;
1.170 msaitoh 3230: uint32_t command;
1.1 fvdl 3231: struct ifnet *ifp;
1.331 msaitoh 3232: struct mii_data * const mii = &sc->bge_mii;
1.342 msaitoh 3233: uint32_t misccfg, mimode, macmode;
1.126 christos 3234: void * kva;
1.1 fvdl 3235: u_char eaddr[ETHER_ADDR_LEN];
1.216 msaitoh 3236: pcireg_t memtype, subid, reg;
1.1 fvdl 3237: bus_addr_t memaddr;
1.170 msaitoh 3238: uint32_t pm_ctl;
1.174 martin 3239: bool no_seeprom;
1.342 msaitoh 3240: int capmask, trys;
1.269 msaitoh 3241: int mii_flags;
1.273 msaitoh 3242: int map_flags;
1.266 christos 3243: char intrbuf[PCI_INTRSTR_LEN];
1.87 perry 3244:
1.7 thorpej 3245: bp = bge_lookup(pa);
3246: KASSERT(bp != NULL);
3247:
1.141 jmcneill 3248: sc->sc_pc = pa->pa_pc;
3249: sc->sc_pcitag = pa->pa_tag;
1.138 joerg 3250: sc->bge_dev = self;
1.1 fvdl 3251:
1.216 msaitoh 3252: sc->bge_pa = *pa;
1.172 msaitoh 3253: pc = sc->sc_pc;
3254: subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
3255:
1.30 thorpej 3256: aprint_naive(": Ethernet controller\n");
1.325 msaitoh 3257: aprint_normal(": %s Ethernet\n", bp->bp_name);
1.1 fvdl 3258:
3259: /*
3260: * Map control/status registers.
3261: */
3262: DPRINTFN(5, ("Map control/status regs\n"));
1.141 jmcneill 3263: command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1.1 fvdl 3264: command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1.141 jmcneill 3265: pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
3266: command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1.1 fvdl 3267:
3268: if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1.138 joerg 3269: aprint_error_dev(sc->bge_dev,
3270: "failed to enable memory mapping!\n");
1.1 fvdl 3271: return;
3272: }
3273:
3274: DPRINTFN(5, ("pci_mem_find\n"));
1.141 jmcneill 3275: memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
1.178 msaitoh 3276: switch (memtype) {
1.29 itojun 3277: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3278: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1.275 msaitoh 3279: #if 0
1.1 fvdl 3280: if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1.29 itojun 3281: memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1.227 msaitoh 3282: &memaddr, &sc->bge_bsize) == 0)
1.1 fvdl 3283: break;
1.275 msaitoh 3284: #else
3285: /*
3286: * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3287: * system get NMI on boot (PR#48451). This problem might not be
3288: * the driver's bug but our PCI common part's bug. Until we
3289: * find a real reason, we ignore the prefetchable bit.
3290: */
3291: if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0,
3292: memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) {
3293: map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3294: if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize,
3295: map_flags, &sc->bge_bhandle) == 0) {
3296: sc->bge_btag = pa->pa_memt;
3297: break;
3298: }
3299: }
3300: #endif
1.323 mrg 3301: /* FALLTHROUGH */
1.1 fvdl 3302: default:
1.138 joerg 3303: aprint_error_dev(sc->bge_dev, "can't find mem space\n");
1.1 fvdl 3304: return;
3305: }
3306:
1.215 msaitoh 3307: /* Save various chip information. */
3308: sc->bge_chipid = bge_chipid(pa);
1.216 msaitoh 3309: sc->bge_phy_addr = bge_phy_addr(sc);
1.76 cube 3310:
1.303 msaitoh 3311: if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
3312: &sc->bge_pciecap, NULL) != 0) {
1.171 msaitoh 3313: /* PCIe */
1.261 msaitoh 3314: sc->bge_flags |= BGEF_PCIE;
1.253 msaitoh 3315: /* Extract supported maximum payload size. */
3316: reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3317: sc->bge_pciecap + PCIE_DCAP);
3318: sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD);
1.216 msaitoh 3319: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3320: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3321: sc->bge_expmrq = 2048;
3322: else
3323: sc->bge_expmrq = 4096;
1.177 msaitoh 3324: bge_set_max_readrq(sc);
1.303 msaitoh 3325: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) {
3326: /* PCIe without PCIe cap */
3327: sc->bge_flags |= BGEF_PCIE;
1.171 msaitoh 3328: } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
3329: BGE_PCISTATE_PCI_BUSMODE) == 0) {
3330: /* PCI-X */
1.261 msaitoh 3331: sc->bge_flags |= BGEF_PCIX;
1.180 msaitoh 3332: if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
3333: &sc->bge_pcixcap, NULL) == 0)
3334: aprint_error_dev(sc->bge_dev,
3335: "unable to find PCIX capability\n");
1.171 msaitoh 3336: }
1.76 cube 3337:
1.216 msaitoh 3338: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) {
3339: /*
3340: * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
3341: * can clobber the chip's PCI config-space power control
3342: * registers, leaving the card in D3 powersave state. We do
3343: * not have memory-mapped registers in this state, so force
3344: * device into D0 state before starting initialization.
3345: */
3346: pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
1.331 msaitoh 3347: pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3);
1.216 msaitoh 3348: pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
3349: pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1.348 andvar 3350: DELAY(1000); /* 27 usec is allegedly sufficient */
1.216 msaitoh 3351: }
3352:
1.215 msaitoh 3353: /* Save chipset family. */
3354: switch (BGE_ASICREV(sc->bge_chipid)) {
3355: case BGE_ASICREV_BCM5717:
1.216 msaitoh 3356: case BGE_ASICREV_BCM5719:
3357: case BGE_ASICREV_BCM5720:
1.261 msaitoh 3358: sc->bge_flags |= BGEF_5717_PLUS;
1.257 msaitoh 3359: /* FALLTHROUGH */
1.327 msaitoh 3360: case BGE_ASICREV_BCM5762:
1.257 msaitoh 3361: case BGE_ASICREV_BCM57765:
3362: case BGE_ASICREV_BCM57766:
3363: if (!BGE_IS_5717_PLUS(sc))
1.261 msaitoh 3364: sc->bge_flags |= BGEF_57765_FAMILY;
3365: sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS |
3366: BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE;
1.327 msaitoh 3367: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
3368: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
3369: /*
3370: * Enable work around for DMA engine miscalculation
3371: * of TXMBUF available space.
3372: */
3373: sc->bge_flags |= BGEF_RDMA_BUG;
3374:
3375: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3376: (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) {
3377: /* Jumbo frame on BCM5719 A0 does not work. */
3378: sc->bge_flags &= ~BGEF_JUMBO_CAPABLE;
3379: }
3380: }
1.215 msaitoh 3381: break;
3382: case BGE_ASICREV_BCM5755:
3383: case BGE_ASICREV_BCM5761:
3384: case BGE_ASICREV_BCM5784:
3385: case BGE_ASICREV_BCM5785:
3386: case BGE_ASICREV_BCM5787:
3387: case BGE_ASICREV_BCM57780:
1.261 msaitoh 3388: sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS;
1.215 msaitoh 3389: break;
3390: case BGE_ASICREV_BCM5700:
3391: case BGE_ASICREV_BCM5701:
3392: case BGE_ASICREV_BCM5703:
3393: case BGE_ASICREV_BCM5704:
1.261 msaitoh 3394: sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE;
1.215 msaitoh 3395: break;
3396: case BGE_ASICREV_BCM5714_A0:
3397: case BGE_ASICREV_BCM5780:
3398: case BGE_ASICREV_BCM5714:
1.261 msaitoh 3399: sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE;
1.215 msaitoh 3400: /* FALLTHROUGH */
3401: case BGE_ASICREV_BCM5750:
3402: case BGE_ASICREV_BCM5752:
3403: case BGE_ASICREV_BCM5906:
1.261 msaitoh 3404: sc->bge_flags |= BGEF_575X_PLUS;
1.215 msaitoh 3405: /* FALLTHROUGH */
3406: case BGE_ASICREV_BCM5705:
1.261 msaitoh 3407: sc->bge_flags |= BGEF_5705_PLUS;
1.215 msaitoh 3408: break;
3409: }
1.172 msaitoh 3410:
1.216 msaitoh 3411: /* Identify chips with APE processor. */
3412: switch (BGE_ASICREV(sc->bge_chipid)) {
3413: case BGE_ASICREV_BCM5717:
3414: case BGE_ASICREV_BCM5719:
3415: case BGE_ASICREV_BCM5720:
3416: case BGE_ASICREV_BCM5761:
1.327 msaitoh 3417: case BGE_ASICREV_BCM5762:
1.261 msaitoh 3418: sc->bge_flags |= BGEF_APE;
1.216 msaitoh 3419: break;
3420: }
3421:
1.262 msaitoh 3422: /*
3423: * The 40bit DMA bug applies to the 5714/5715 controllers and is
3424: * not actually a MAC controller bug but an issue with the embedded
3425: * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3426: */
3427: if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0))
3428: sc->bge_flags |= BGEF_40BIT_BUG;
3429:
1.216 msaitoh 3430: /* Chips with APE need BAR2 access for APE registers/memory. */
1.261 msaitoh 3431: if ((sc->bge_flags & BGEF_APE) != 0) {
1.216 msaitoh 3432: memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
1.273 msaitoh 3433: #if 0
1.216 msaitoh 3434: if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
1.227 msaitoh 3435: &sc->bge_apetag, &sc->bge_apehandle, NULL,
3436: &sc->bge_apesize)) {
1.216 msaitoh 3437: aprint_error_dev(sc->bge_dev,
3438: "couldn't map BAR2 memory\n");
3439: return;
3440: }
1.273 msaitoh 3441: #else
3442: /*
3443: * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
3444: * system get NMI on boot (PR#48451). This problem might not be
3445: * the driver's bug but our PCI common part's bug. Until we
3446: * find a real reason, we ignore the prefetchable bit.
3447: */
3448: if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2,
3449: memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) {
3450: aprint_error_dev(sc->bge_dev,
3451: "couldn't map BAR2 memory\n");
3452: return;
3453: }
3454:
3455: map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3456: if (bus_space_map(pa->pa_memt, memaddr,
3457: sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) {
3458: aprint_error_dev(sc->bge_dev,
3459: "couldn't map BAR2 memory\n");
3460: return;
3461: }
3462: sc->bge_apetag = pa->pa_memt;
3463: #endif
1.216 msaitoh 3464:
3465: /* Enable APE register/memory access by host driver. */
3466: reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
3467: reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3468: BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3469: BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3470: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
3471:
3472: bge_ape_lock_init(sc);
3473: bge_ape_read_fw_ver(sc);
3474: }
3475:
3476: /* Identify the chips that use an CPMU. */
3477: if (BGE_IS_5717_PLUS(sc) ||
3478: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
3479: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3480: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
3481: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
1.261 msaitoh 3482: sc->bge_flags |= BGEF_CPMU_PRESENT;
1.216 msaitoh 3483:
1.172 msaitoh 3484: /*
3485: * When using the BCM5701 in PCI-X mode, data corruption has
3486: * been observed in the first few bytes of some received packets.
3487: * Aligning the packet buffer in memory eliminates the corruption.
3488: * Unfortunately, this misaligns the packet payloads. On platforms
3489: * which do not support unaligned accesses, we will realign the
3490: * payloads by copying the received packets.
3491: */
3492: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1.261 msaitoh 3493: sc->bge_flags & BGEF_PCIX)
3494: sc->bge_flags |= BGEF_RX_ALIGNBUG;
1.172 msaitoh 3495:
3496: if (BGE_IS_5700_FAMILY(sc))
1.261 msaitoh 3497: sc->bge_flags |= BGEF_JUMBO_CAPABLE;
1.172 msaitoh 3498:
3499: misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
3500: misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
3501:
3502: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3503: (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3504: misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1.261 msaitoh 3505: sc->bge_flags |= BGEF_IS_5788;
1.172 msaitoh 3506:
3507: /*
3508: * Some controllers seem to require a special firmware to use
3509: * TSO. But the firmware is not available to FreeBSD and Linux
3510: * claims that the TSO performed by the firmware is slower than
3511: * hardware based TSO. Moreover the firmware based TSO has one
3512: * known bug which can't handle TSO if ethernet header + IP/TCP
3513: * header is greater than 80 bytes. The workaround for the TSO
3514: * bug exist but it seems it's too expensive than not using
3515: * TSO at all. Some hardwares also have the TSO bug so limit
3516: * the TSO to the controllers that are not affected TSO issues
3517: * (e.g. 5755 or higher).
3518: */
3519: if (BGE_IS_5755_PLUS(sc)) {
3520: /*
3521: * BCM5754 and BCM5787 shares the same ASIC id so
3522: * explicit device id check is required.
3523: */
3524: if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
3525: (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
1.261 msaitoh 3526: sc->bge_flags |= BGEF_TSO;
1.316 bouyer 3527: /* TSO on BCM5719 A0 does not work. */
3528: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
3529: (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
3530: sc->bge_flags &= ~BGEF_TSO;
1.172 msaitoh 3531: }
3532:
1.220 msaitoh 3533: capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */
1.172 msaitoh 3534: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
3535: (misccfg == 0x4000 || misccfg == 0x8000)) ||
3536: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3537: PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3538: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
3539: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
3540: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
3541: (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
3542: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
3543: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
3544: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
3545: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
1.216 msaitoh 3546: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
3547: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
1.220 msaitoh 3548: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1.270 msaitoh 3549: /* These chips are 10/100 only. */
1.220 msaitoh 3550: capmask &= ~BMSR_EXTSTAT;
1.261 msaitoh 3551: sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
1.220 msaitoh 3552: }
1.172 msaitoh 3553:
3554: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3555: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
3556: (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1.220 msaitoh 3557: sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
1.261 msaitoh 3558: sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
1.172 msaitoh 3559:
1.220 msaitoh 3560: /* Set various PHY bug flags. */
1.162 msaitoh 3561: if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3562: sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1.261 msaitoh 3563: sc->bge_phy_flags |= BGEPHYF_CRC_BUG;
1.162 msaitoh 3564: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
3565: BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1.261 msaitoh 3566: sc->bge_phy_flags |= BGEPHYF_ADC_BUG;
1.162 msaitoh 3567: if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1.261 msaitoh 3568: sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG;
1.220 msaitoh 3569: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3570: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
3571: PCI_VENDOR(subid) == PCI_VENDOR_DELL)
1.261 msaitoh 3572: sc->bge_phy_flags |= BGEPHYF_NO_3LED;
1.172 msaitoh 3573: if (BGE_IS_5705_PLUS(sc) &&
3574: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
3575: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
1.216 msaitoh 3576: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
1.257 msaitoh 3577: !BGE_IS_57765_PLUS(sc)) {
1.162 msaitoh 3578: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1.172 msaitoh 3579: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
3580: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1.162 msaitoh 3581: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
3582: if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
3583: PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
1.261 msaitoh 3584: sc->bge_phy_flags |= BGEPHYF_JITTER_BUG;
1.162 msaitoh 3585: if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
1.261 msaitoh 3586: sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM;
1.216 msaitoh 3587: } else
1.261 msaitoh 3588: sc->bge_phy_flags |= BGEPHYF_BER_BUG;
1.162 msaitoh 3589: }
3590:
1.174 martin 3591: /*
3592: * SEEPROM check.
3593: * First check if firmware knows we do not have SEEPROM.
3594: */
1.180 msaitoh 3595: if (prop_dictionary_get_bool(device_properties(self),
1.367 skrll 3596: "without-seeprom", &no_seeprom) && no_seeprom)
1.330 msaitoh 3597: sc->bge_flags |= BGEF_NO_EEPROM;
1.174 martin 3598:
1.228 msaitoh 3599: else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1.261 msaitoh 3600: sc->bge_flags |= BGEF_NO_EEPROM;
1.228 msaitoh 3601:
1.174 martin 3602: /* Now check the 'ROM failed' bit on the RX CPU */
3603: else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
1.261 msaitoh 3604: sc->bge_flags |= BGEF_NO_EEPROM;
1.172 msaitoh 3605:
1.177 msaitoh 3606: sc->bge_asf_mode = 0;
1.216 msaitoh 3607: /* No ASF if APE present. */
1.261 msaitoh 3608: if ((sc->bge_flags & BGEF_APE) == 0) {
1.216 msaitoh 3609: if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3610: BGE_SRAM_DATA_SIG_MAGIC)) {
3611: if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3612: BGE_HWCFG_ASF) {
3613: sc->bge_asf_mode |= ASF_ENABLE;
3614: sc->bge_asf_mode |= ASF_STACKUP;
3615: if (BGE_IS_575X_PLUS(sc))
3616: sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
1.177 msaitoh 3617: }
3618: }
3619: }
3620:
1.318 jdolecek 3621: int counts[PCI_INTR_TYPE_SIZE] = {
3622: [PCI_INTR_TYPE_INTX] = 1,
3623: [PCI_INTR_TYPE_MSI] = 1,
1.319 jdolecek 3624: [PCI_INTR_TYPE_MSIX] = 1,
1.318 jdolecek 3625: };
3626: int max_type = PCI_INTR_TYPE_MSIX;
3627:
3628: if (!bge_can_use_msi(sc)) {
3629: /* MSI broken, allow only INTx */
1.293 knakahar 3630: max_type = PCI_INTR_TYPE_INTX;
1.318 jdolecek 3631: }
1.293 knakahar 3632:
3633: if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) {
3634: aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n");
3635: return;
1.288 msaitoh 3636: }
3637:
1.293 knakahar 3638: DPRINTFN(5, ("pci_intr_string\n"));
1.288 msaitoh 3639: intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf,
3640: sizeof(intrbuf));
3641: DPRINTFN(5, ("pci_intr_establish\n"));
1.310 msaitoh 3642: sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0],
3643: IPL_NET, bge_intr, sc, device_xname(sc->bge_dev));
1.293 knakahar 3644: if (sc->bge_intrhand == NULL) {
3645: pci_intr_release(pc, sc->bge_pihp, 1);
1.318 jdolecek 3646: sc->bge_pihp = NULL;
1.288 msaitoh 3647:
1.318 jdolecek 3648: aprint_error_dev(self, "couldn't establish interrupt");
3649: if (intrstr != NULL)
3650: aprint_error(" at %s", intrstr);
3651: aprint_error("\n");
1.288 msaitoh 3652: return;
3653: }
3654: aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
3655:
1.318 jdolecek 3656: switch (pci_intr_type(pc, sc->bge_pihp[0])) {
3657: case PCI_INTR_TYPE_MSIX:
3658: case PCI_INTR_TYPE_MSI:
3659: KASSERT(bge_can_use_msi(sc));
3660: sc->bge_flags |= BGEF_MSI;
3661: break;
3662: default:
3663: /* nothing to do */
3664: break;
3665: }
3666:
1.375 skrll 3667: char wqname[MAXCOMLEN];
3668: snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->bge_dev));
3669: int error = workqueue_create(&sc->sc_reset_wq, wqname,
3670: bge_handle_reset_work, sc, PRI_NONE, IPL_SOFTCLOCK,
3671: WQ_MPSAFE);
3672: if (error) {
3673: aprint_error_dev(sc->bge_dev,
3674: "unable to create reset workqueue\n");
3675: return;
3676: }
3677:
3678:
1.288 msaitoh 3679: /*
3680: * All controllers except BCM5700 supports tagged status but
3681: * we use tagged status only for MSI case on BCM5717. Otherwise
3682: * MSI on BCM5717 does not work.
3683: */
1.307 msaitoh 3684: if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI)
1.288 msaitoh 3685: sc->bge_flags |= BGEF_TAGGED_STATUS;
3686:
1.248 msaitoh 3687: /*
3688: * Reset NVRAM before bge_reset(). It's required to acquire NVRAM
3689: * lock in bge_reset().
3690: */
1.341 msaitoh 3691: CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR,
1.248 msaitoh 3692: BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
3693: delay(1000);
1.341 msaitoh 3694: BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1.248 msaitoh 3695:
3696: bge_stop_fw(sc);
1.353 buhrow 3697: bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
1.248 msaitoh 3698: if (bge_reset(sc))
3699: aprint_error_dev(sc->bge_dev, "chip reset failed\n");
1.243 msaitoh 3700:
1.241 msaitoh 3701: /*
3702: * Read the hardware config word in the first 32k of NIC internal
3703: * memory, or fall back to the config word in the EEPROM.
3704: * Note: on some BCM5700 cards, this value appears to be unset.
3705: */
1.267 msaitoh 3706: hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0;
1.248 msaitoh 3707: if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
1.241 msaitoh 3708: BGE_SRAM_DATA_SIG_MAGIC) {
3709: uint32_t tmp;
3710:
3711: hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3712: tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >>
3713: BGE_SRAM_DATA_VER_SHIFT;
3714: if ((0 < tmp) && (tmp < 0x100))
3715: hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2);
1.261 msaitoh 3716: if (sc->bge_flags & BGEF_PCIE)
1.241 msaitoh 3717: hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3);
1.278 msaitoh 3718: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
1.241 msaitoh 3719: hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4);
1.267 msaitoh 3720: if (BGE_IS_5717_PLUS(sc))
1.268 msaitoh 3721: hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5);
1.261 msaitoh 3722: } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) {
1.241 msaitoh 3723: bge_read_eeprom(sc, (void *)&hwcfg,
3724: BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
3725: hwcfg = be32toh(hwcfg);
3726: }
1.267 msaitoh 3727: aprint_normal_dev(sc->bge_dev,
3728: "HW config %08x, %08x, %08x, %08x %08x\n",
3729: hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5);
1.241 msaitoh 3730:
1.353 buhrow 3731: bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
3732: bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
1.177 msaitoh 3733:
1.1 fvdl 3734: if (bge_chipinit(sc)) {
1.138 joerg 3735: aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
1.1 fvdl 3736: bge_release_resources(sc);
3737: return;
3738: }
3739:
1.342 msaitoh 3740: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3741: BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL,
3742: BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUTEN1);
3743: DELAY(100);
3744: }
3745:
3746: /* Set MI_MODE */
3747: mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
3748: if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
3749: mimode |= BGE_MIMODE_500KHZ_CONST;
3750: else
3751: mimode |= BGE_MIMODE_BASE;
3752: CSR_WRITE_4_FLUSH(sc, BGE_MI_MODE, mimode);
3753: DELAY(80);
3754:
1.1 fvdl 3755: /*
1.203 msaitoh 3756: * Get station address from the EEPROM.
1.1 fvdl 3757: */
1.151 cegger 3758: if (bge_get_eaddr(sc, eaddr)) {
1.178 msaitoh 3759: aprint_error_dev(sc->bge_dev,
3760: "failed to read station address\n");
1.1 fvdl 3761: bge_release_resources(sc);
3762: return;
3763: }
3764:
1.51 fvdl 3765: br = bge_lookup_rev(sc->bge_chipid);
3766:
1.16 thorpej 3767: if (br == NULL) {
1.172 msaitoh 3768: aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
3769: sc->bge_chipid);
1.16 thorpej 3770: } else {
1.172 msaitoh 3771: aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
3772: br->br_name, sc->bge_chipid);
1.16 thorpej 3773: }
1.30 thorpej 3774: aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
1.1 fvdl 3775:
3776: /* Allocate the general information block and ring buffers. */
1.317 bouyer 3777: if (pci_dma64_available(pa)) {
1.41 fvdl 3778: sc->bge_dmatag = pa->pa_dmat64;
1.317 bouyer 3779: sc->bge_dmatag32 = pa->pa_dmat;
3780: sc->bge_dma64 = true;
3781: } else {
1.41 fvdl 3782: sc->bge_dmatag = pa->pa_dmat;
1.317 bouyer 3783: sc->bge_dmatag32 = pa->pa_dmat;
3784: sc->bge_dma64 = false;
3785: }
1.262 msaitoh 3786:
3787: /* 40bit DMA workaround */
3788: if (sizeof(bus_addr_t) > 4) {
3789: if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) {
3790: bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */
3791:
1.351 martin 3792: if (bus_dmatag_subregion(olddmatag, 0,
3793: (bus_addr_t)__MASK(40),
1.377 skrll 3794: &(sc->bge_dmatag), BUS_DMA_WAITOK) != 0) {
1.262 msaitoh 3795: aprint_error_dev(self,
3796: "WARNING: failed to restrict dma range,"
3797: " falling back to parent bus dma range\n");
3798: sc->bge_dmatag = olddmatag;
3799: }
3800: }
3801: }
1.320 bouyer 3802: SLIST_INIT(&sc->txdma_list);
1.1 fvdl 3803: DPRINTFN(5, ("bus_dmamem_alloc\n"));
3804: if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
1.227 msaitoh 3805: PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
1.377 skrll 3806: &sc->bge_ring_rseg, BUS_DMA_WAITOK)) {
1.138 joerg 3807: aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
1.1 fvdl 3808: return;
3809: }
3810: DPRINTFN(5, ("bus_dmamem_map\n"));
1.227 msaitoh 3811: if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
3812: sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva,
1.377 skrll 3813: BUS_DMA_WAITOK)) {
1.138 joerg 3814: aprint_error_dev(sc->bge_dev,
3815: "can't map DMA buffers (%zu bytes)\n",
3816: sizeof(struct bge_ring_data));
1.227 msaitoh 3817: bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3818: sc->bge_ring_rseg);
1.1 fvdl 3819: return;
3820: }
3821: DPRINTFN(5, ("bus_dmamem_create\n"));
3822: if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
3823: sizeof(struct bge_ring_data), 0,
1.377 skrll 3824: BUS_DMA_WAITOK, &sc->bge_ring_map)) {
1.138 joerg 3825: aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
1.1 fvdl 3826: bus_dmamem_unmap(sc->bge_dmatag, kva,
3827: sizeof(struct bge_ring_data));
1.227 msaitoh 3828: bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3829: sc->bge_ring_rseg);
1.1 fvdl 3830: return;
3831: }
3832: DPRINTFN(5, ("bus_dmamem_load\n"));
3833: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
3834: sizeof(struct bge_ring_data), NULL,
1.377 skrll 3835: BUS_DMA_WAITOK)) {
1.1 fvdl 3836: bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3837: bus_dmamem_unmap(sc->bge_dmatag, kva,
3838: sizeof(struct bge_ring_data));
1.227 msaitoh 3839: bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
3840: sc->bge_ring_rseg);
1.1 fvdl 3841: return;
3842: }
3843:
3844: DPRINTFN(5, ("bzero\n"));
3845: sc->bge_rdata = (struct bge_ring_data *)kva;
3846:
1.19 mjl 3847: memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
1.1 fvdl 3848:
3849: /* Try to allocate memory for jumbo buffers. */
1.166 msaitoh 3850: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1.44 hannken 3851: if (bge_alloc_jumbo_mem(sc)) {
1.138 joerg 3852: aprint_error_dev(sc->bge_dev,
3853: "jumbo buffer allocation failed\n");
1.44 hannken 3854: } else
3855: sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3856: }
1.1 fvdl 3857:
3858: /* Set default tuneable values. */
3859: sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3860: sc->bge_rx_coal_ticks = 150;
1.25 jonathan 3861: sc->bge_rx_max_coal_bds = 64;
3862: sc->bge_tx_coal_ticks = 300;
3863: sc->bge_tx_max_coal_bds = 400;
1.172 msaitoh 3864: if (BGE_IS_5705_PLUS(sc)) {
1.95 jonathan 3865: sc->bge_tx_coal_ticks = (12 * 5);
1.146 mlelstv 3866: sc->bge_tx_max_coal_bds = (12 * 5);
1.138 joerg 3867: aprint_verbose_dev(sc->bge_dev,
3868: "setting short Tx thresholds\n");
1.95 jonathan 3869: }
1.1 fvdl 3870:
1.216 msaitoh 3871: if (BGE_IS_5717_PLUS(sc))
1.202 tsutsui 3872: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3873: else if (BGE_IS_5705_PLUS(sc))
1.172 msaitoh 3874: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3875: else
3876: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3877:
1.375 skrll 3878: sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3879:
1.1 fvdl 3880: /* Set up ifnet structure */
3881: ifp = &sc->ethercom.ec_if;
3882: ifp->if_softc = sc;
3883: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1.375 skrll 3884: ifp->if_extflags = IFEF_MPSAFE;
1.1 fvdl 3885: ifp->if_ioctl = bge_ioctl;
1.141 jmcneill 3886: ifp->if_stop = bge_stop;
1.1 fvdl 3887: ifp->if_start = bge_start;
3888: ifp->if_init = bge_init;
1.315 riastrad 3889: IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
1.1 fvdl 3890: IFQ_SET_READY(&ifp->if_snd);
1.115 tsutsui 3891: DPRINTFN(5, ("strcpy if_xname\n"));
1.138 joerg 3892: strcpy(ifp->if_xname, device_xname(sc->bge_dev));
1.1 fvdl 3893:
1.157 msaitoh 3894: if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
1.18 thorpej 3895: sc->ethercom.ec_if.if_capabilities |=
1.172 msaitoh 3896: IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
3897: #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */
3898: sc->ethercom.ec_if.if_capabilities |=
1.88 yamt 3899: IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3900: IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1.172 msaitoh 3901: #endif
1.87 perry 3902: sc->ethercom.ec_capabilities |=
1.1 fvdl 3903: ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
1.335 msaitoh 3904: sc->ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
1.1 fvdl 3905:
1.261 msaitoh 3906: if (sc->bge_flags & BGEF_TSO)
1.95 jonathan 3907: sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
3908:
1.1 fvdl 3909: /*
3910: * Do MII setup.
3911: */
3912: DPRINTFN(5, ("mii setup\n"));
1.331 msaitoh 3913: mii->mii_ifp = ifp;
3914: mii->mii_readreg = bge_miibus_readreg;
3915: mii->mii_writereg = bge_miibus_writereg;
3916: mii->mii_statchg = bge_miibus_statchg;
1.1 fvdl 3917:
3918: /*
1.203 msaitoh 3919: * Figure out what sort of media we have by checking the hardware
1.241 msaitoh 3920: * config word. Note: on some BCM5700 cards, this value appears to be
3921: * unset. If that's the case, we have to rely on identifying the NIC
3922: * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41.
3923: * The SysKonnect SK-9D41 is a 1000baseSX card.
1.1 fvdl 3924: */
1.340 msaitoh 3925: if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
1.161 msaitoh 3926: (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
1.270 msaitoh 3927: if (BGE_IS_5705_PLUS(sc)) {
3928: sc->bge_flags |= BGEF_FIBER_MII;
3929: sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
3930: } else
3931: sc->bge_flags |= BGEF_FIBER_TBI;
1.161 msaitoh 3932: }
1.1 fvdl 3933:
1.261 msaitoh 3934: /* Set bge_phy_flags before prop_dictionary_set_uint32() */
3935: if (BGE_IS_JUMBO_CAPABLE(sc))
3936: sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE;
3937:
1.195 jym 3938: /* set phyflags and chipid before mii_attach() */
1.167 msaitoh 3939: dict = device_properties(self);
1.261 msaitoh 3940: prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags);
1.195 jym 3941: prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
1.167 msaitoh 3942:
1.342 msaitoh 3943: macmode = CSR_READ_4(sc, BGE_MAC_MODE);
3944: macmode &= ~BGE_MACMODE_PORTMODE;
1.334 msaitoh 3945: /* Initialize ifmedia structures. */
1.261 msaitoh 3946: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.342 msaitoh 3947: CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE,
3948: macmode | BGE_PORTMODE_TBI);
3949: DELAY(40);
3950:
1.334 msaitoh 3951: sc->ethercom.ec_ifmedia = &sc->bge_ifmedia;
1.1 fvdl 3952: ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3953: bge_ifmedia_sts);
1.177 msaitoh 3954: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
1.331 msaitoh 3955: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX |IFM_FDX,
1.1 fvdl 3956: 0, NULL);
1.177 msaitoh 3957: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3958: ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
1.155 he 3959: /* Pretend the user requested this setting */
1.162 msaitoh 3960: sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1.1 fvdl 3961: } else {
1.342 msaitoh 3962: uint16_t phyreg;
3963: int rv;
1.1 fvdl 3964: /*
1.177 msaitoh 3965: * Do transceiver setup and tell the firmware the
3966: * driver is down so we can try to get access the
3967: * probe if ASF is running. Retry a couple of times
3968: * if we get a conflict with the ASF firmware accessing
3969: * the PHY.
1.1 fvdl 3970: */
1.342 msaitoh 3971: if (sc->bge_flags & BGEF_FIBER_MII)
3972: macmode |= BGE_PORTMODE_GMII;
3973: else
3974: macmode |= BGE_PORTMODE_MII;
3975: CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, macmode);
3976: DELAY(40);
3977:
3978: /*
3979: * Do transceiver setup and tell the firmware the
3980: * driver is down so we can try to get access the
3981: * probe if ASF is running. Retry a couple of times
3982: * if we get a conflict with the ASF firmware accessing
3983: * the PHY.
3984: */
3985: trys = 0;
1.177 msaitoh 3986: BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1.334 msaitoh 3987: sc->ethercom.ec_mii = mii;
1.331 msaitoh 3988: ifmedia_init(&mii->mii_media, 0, bge_ifmedia_upd,
1.1 fvdl 3989: bge_ifmedia_sts);
1.269 msaitoh 3990: mii_flags = MIIF_DOPAUSE;
3991: if (sc->bge_flags & BGEF_FIBER_MII)
3992: mii_flags |= MIIF_HAVEFIBER;
1.342 msaitoh 3993: again:
3994: bge_asf_driver_up(sc);
3995: rv = bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
3996: MII_BMCR, &phyreg);
3997: if ((rv != 0) || ((phyreg & BMCR_PDOWN) != 0)) {
3998: int i;
3999:
4000: bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
4001: MII_BMCR, BMCR_RESET);
4002: /* Wait up to 500ms for it to complete. */
4003: for (i = 0; i < 500; i++) {
4004: bge_miibus_readreg(sc->bge_dev,
4005: sc->bge_phy_addr, MII_BMCR, &phyreg);
4006: if ((phyreg & BMCR_RESET) == 0)
4007: break;
4008: DELAY(1000);
4009: }
4010: }
4011:
1.331 msaitoh 4012: mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr,
1.269 msaitoh 4013: MII_OFFSET_ANY, mii_flags);
1.87 perry 4014:
1.342 msaitoh 4015: if (LIST_EMPTY(&mii->mii_phys) && (trys++ < 4))
4016: goto again;
4017:
1.331 msaitoh 4018: if (LIST_EMPTY(&mii->mii_phys)) {
1.138 joerg 4019: aprint_error_dev(sc->bge_dev, "no PHY found!\n");
1.331 msaitoh 4020: ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
4021: 0, NULL);
4022: ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
1.1 fvdl 4023: } else
1.331 msaitoh 4024: ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1.177 msaitoh 4025:
4026: /*
4027: * Now tell the firmware we are going up after probing the PHY
4028: */
4029: if (sc->bge_asf_mode & ASF_STACKUP)
4030: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1.1 fvdl 4031: }
4032:
4033: /*
4034: * Call MI attach routine.
4035: */
1.375 skrll 4036: DPRINTFN(5, ("if_initialize\n"));
4037: if_initialize(ifp);
4038: ifp->if_percpuq = if_percpuq_create(ifp);
1.299 ozaki-r 4039: if_deferred_start_init(ifp, NULL);
1.375 skrll 4040: if_register(ifp);
4041:
1.1 fvdl 4042: DPRINTFN(5, ("ether_ifattach\n"));
4043: ether_ifattach(ifp, eaddr);
1.186 msaitoh 4044: ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
1.375 skrll 4045:
1.148 mlelstv 4046: rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
1.277 tls 4047: RND_TYPE_NET, RND_FLAG_DEFAULT);
1.72 thorpej 4048: #ifdef BGE_EVENT_COUNTERS
4049: /*
4050: * Attach event counters.
4051: */
4052: evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
1.138 joerg 4053: NULL, device_xname(sc->bge_dev), "intr");
1.302 msaitoh 4054: evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR,
4055: NULL, device_xname(sc->bge_dev), "intr_spurious");
4056: evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR,
4057: NULL, device_xname(sc->bge_dev), "intr_spurious2");
1.72 thorpej 4058: evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
1.138 joerg 4059: NULL, device_xname(sc->bge_dev), "tx_xoff");
1.72 thorpej 4060: evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
1.138 joerg 4061: NULL, device_xname(sc->bge_dev), "tx_xon");
1.72 thorpej 4062: evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
1.138 joerg 4063: NULL, device_xname(sc->bge_dev), "rx_xoff");
1.72 thorpej 4064: evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
1.138 joerg 4065: NULL, device_xname(sc->bge_dev), "rx_xon");
1.72 thorpej 4066: evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
1.138 joerg 4067: NULL, device_xname(sc->bge_dev), "rx_macctl");
1.72 thorpej 4068: evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
1.138 joerg 4069: NULL, device_xname(sc->bge_dev), "xoffentered");
1.72 thorpej 4070: #endif /* BGE_EVENT_COUNTERS */
1.1 fvdl 4071: DPRINTFN(5, ("callout_init\n"));
1.375 skrll 4072: callout_init(&sc->bge_timeout, CALLOUT_MPSAFE);
1.345 thorpej 4073: callout_setfunc(&sc->bge_timeout, bge_tick, sc);
1.82 jmcneill 4074:
1.168 tsutsui 4075: if (pmf_device_register(self, NULL, NULL))
4076: pmf_class_network_register(self, ifp);
4077: else
1.141 jmcneill 4078: aprint_error_dev(self, "couldn't establish power handler\n");
1.172 msaitoh 4079:
1.207 msaitoh 4080: bge_sysctl_init(sc);
1.190 jruoho 4081:
1.172 msaitoh 4082: #ifdef BGE_DEBUG
4083: bge_debug_info(sc);
4084: #endif
1.1 fvdl 4085: }
4086:
1.227 msaitoh 4087: /*
4088: * Stop all chip I/O so that the kernel's probe routines don't
4089: * get confused by errant DMAs when rebooting.
4090: */
4091: static int
4092: bge_detach(device_t self, int flags __unused)
4093: {
1.354 skrll 4094: struct bge_softc * const sc = device_private(self);
4095: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.227 msaitoh 4096:
4097: /* Stop the interface. Callouts are stopped in it. */
4098: bge_stop(ifp, 1);
4099:
4100: mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1.230 christos 4101:
1.227 msaitoh 4102: ether_ifdetach(ifp);
4103: if_detach(ifp);
4104:
1.344 thorpej 4105: /* Delete all remaining media. */
4106: ifmedia_fini(&sc->bge_mii.mii_media);
4107:
1.227 msaitoh 4108: bge_release_resources(sc);
4109:
4110: return 0;
4111: }
4112:
1.104 thorpej 4113: static void
4114: bge_release_resources(struct bge_softc *sc)
1.1 fvdl 4115: {
4116:
1.301 msaitoh 4117: /* Detach sysctl */
4118: if (sc->bge_log != NULL)
4119: sysctl_teardown(&sc->bge_log);
4120:
4121: #ifdef BGE_EVENT_COUNTERS
4122: /* Detach event counters. */
4123: evcnt_detach(&sc->bge_ev_intr);
4124: evcnt_detach(&sc->bge_ev_intr_spurious);
4125: evcnt_detach(&sc->bge_ev_intr_spurious2);
4126: evcnt_detach(&sc->bge_ev_tx_xoff);
4127: evcnt_detach(&sc->bge_ev_tx_xon);
4128: evcnt_detach(&sc->bge_ev_rx_xoff);
4129: evcnt_detach(&sc->bge_ev_rx_xon);
4130: evcnt_detach(&sc->bge_ev_rx_macctl);
4131: evcnt_detach(&sc->bge_ev_xoffentered);
4132: #endif /* BGE_EVENT_COUNTERS */
4133:
1.227 msaitoh 4134: /* Disestablish the interrupt handler */
4135: if (sc->bge_intrhand != NULL) {
4136: pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
1.290 msaitoh 4137: pci_intr_release(sc->sc_pc, sc->bge_pihp, 1);
1.227 msaitoh 4138: sc->bge_intrhand = NULL;
4139: }
4140:
1.373 skrll 4141: if (sc->bge_cdata.bge_jumbo_buf != NULL)
4142: bge_free_jumbo_mem(sc);
4143:
1.239 msaitoh 4144: if (sc->bge_dmatag != NULL) {
4145: bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
4146: bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
4147: bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata,
4148: sizeof(struct bge_ring_data));
1.294 msaitoh 4149: bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
4150: sc->bge_ring_rseg);
1.239 msaitoh 4151: }
1.227 msaitoh 4152:
4153: /* Unmap the device registers */
4154: if (sc->bge_bsize != 0) {
4155: bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
4156: sc->bge_bsize = 0;
4157: }
4158:
4159: /* Unmap the APE registers */
4160: if (sc->bge_apesize != 0) {
4161: bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
4162: sc->bge_apesize);
4163: sc->bge_apesize = 0;
4164: }
1.1 fvdl 4165: }
4166:
1.177 msaitoh 4167: static int
1.104 thorpej 4168: bge_reset(struct bge_softc *sc)
1.1 fvdl 4169: {
1.216 msaitoh 4170: uint32_t cachesize, command;
4171: uint32_t reset, mac_mode, mac_mode_mask;
1.180 msaitoh 4172: pcireg_t devctl, reg;
1.76 cube 4173: int i, val;
1.151 cegger 4174: void (*write_op)(struct bge_softc *, int, int);
4175:
1.253 msaitoh 4176: /* Make mask for BGE_MAC_MODE register. */
1.216 msaitoh 4177: mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4178: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4179: mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
1.253 msaitoh 4180: /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */
4181: mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
1.330 msaitoh 4182:
1.216 msaitoh 4183: if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4184: (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
1.330 msaitoh 4185: if (sc->bge_flags & BGEF_PCIE)
1.151 cegger 4186: write_op = bge_writemem_direct;
1.178 msaitoh 4187: else
1.151 cegger 4188: write_op = bge_writemem_ind;
1.178 msaitoh 4189: } else
1.151 cegger 4190: write_op = bge_writereg_ind;
1.1 fvdl 4191:
1.236 msaitoh 4192: /* 57XX step 4 */
4193: /* Acquire the NVM lock */
1.261 msaitoh 4194: if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 &&
1.232 msaitoh 4195: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
1.216 msaitoh 4196: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) {
4197: CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4198: for (i = 0; i < 8000; i++) {
4199: if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4200: BGE_NVRAMSWARB_GNT1)
4201: break;
4202: DELAY(20);
4203: }
4204: if (i == 8000) {
4205: printf("%s: NVRAM lock timedout!\n",
4206: device_xname(sc->bge_dev));
4207: }
4208: }
1.243 msaitoh 4209:
1.216 msaitoh 4210: /* Take APE lock when performing reset. */
4211: bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4212:
1.236 msaitoh 4213: /* 57XX step 3 */
1.1 fvdl 4214: /* Save some important PCI state. */
1.141 jmcneill 4215: cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
1.236 msaitoh 4216: /* 5718 reset step 3 */
1.141 jmcneill 4217: command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
1.180 msaitoh 4218:
1.236 msaitoh 4219: /* 5718 reset step 5, 57XX step 5b-5d */
1.141 jmcneill 4220: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1.172 msaitoh 4221: BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4222: BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
1.1 fvdl 4223:
1.180 msaitoh 4224: /* XXX ???: Disable fastboot on controllers that support it. */
1.134 markd 4225: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1.172 msaitoh 4226: BGE_IS_5755_PLUS(sc))
1.119 tsutsui 4227: CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
4228:
1.236 msaitoh 4229: /* 5718 reset step 2, 57XX step 6 */
1.177 msaitoh 4230: /*
1.236 msaitoh 4231: * Write the magic number to SRAM at offset 0xB50.
1.177 msaitoh 4232: * When firmware finishes its initialization it will
4233: * write ~BGE_MAGIC_NUMBER to the same location.
4234: */
1.216 msaitoh 4235: bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1.177 msaitoh 4236:
1.304 msaitoh 4237: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
4238: val = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
4239: val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
4240: | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
4241: CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val);
4242: }
4243:
1.236 msaitoh 4244: /* 5718 reset step 6, 57XX step 7 */
1.216 msaitoh 4245: reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
1.76 cube 4246: /*
4247: * XXX: from FreeBSD/Linux; no documentation
4248: */
1.261 msaitoh 4249: if (sc->bge_flags & BGEF_PCIE) {
1.278 msaitoh 4250: if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) &&
1.214 msaitoh 4251: !BGE_IS_57765_PLUS(sc) &&
1.216 msaitoh 4252: (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) ==
1.214 msaitoh 4253: (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) {
1.157 msaitoh 4254: /* PCI Express 1.0 system */
1.214 msaitoh 4255: CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG,
4256: BGE_PHY_PCIE_SCRAM_MODE);
4257: }
1.76 cube 4258: if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1.157 msaitoh 4259: /*
4260: * Prevent PCI Express link training
4261: * during global reset.
4262: */
1.76 cube 4263: CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
1.222 msaitoh 4264: reset |= (1 << 29);
1.76 cube 4265: }
4266: }
4267:
1.180 msaitoh 4268: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4269: i = CSR_READ_4(sc, BGE_VCPU_STATUS);
4270: CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4271: i | BGE_VCPU_STATUS_DRV_RESET);
4272: i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4273: CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4274: i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4275: }
4276:
1.161 msaitoh 4277: /*
4278: * Set GPHY Power Down Override to leave GPHY
4279: * powered up in D0 uninitialized.
4280: */
1.216 msaitoh 4281: if (BGE_IS_5705_PLUS(sc) &&
1.261 msaitoh 4282: (sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
1.216 msaitoh 4283: reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
1.161 msaitoh 4284:
1.1 fvdl 4285: /* Issue global reset */
1.216 msaitoh 4286: write_op(sc, BGE_MISC_CFG, reset);
1.151 cegger 4287:
1.236 msaitoh 4288: /* 5718 reset step 7, 57XX step 8 */
1.261 msaitoh 4289: if (sc->bge_flags & BGEF_PCIE)
1.180 msaitoh 4290: delay(100*1000); /* too big */
4291: else
1.216 msaitoh 4292: delay(1000);
1.151 cegger 4293:
1.261 msaitoh 4294: if (sc->bge_flags & BGEF_PCIE) {
1.76 cube 4295: if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4296: DELAY(500000);
4297: /* XXX: Magic Numbers */
1.170 msaitoh 4298: reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4299: BGE_PCI_UNKNOWN0);
4300: pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4301: BGE_PCI_UNKNOWN0,
1.76 cube 4302: reg | (1 << 15));
4303: }
1.177 msaitoh 4304: devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
1.238 msaitoh 4305: sc->bge_pciecap + PCIE_DCSR);
1.177 msaitoh 4306: /* Clear enable no snoop and disable relaxed ordering. */
1.238 msaitoh 4307: devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD |
4308: PCIE_DCSR_ENA_NO_SNOOP);
1.216 msaitoh 4309:
4310: /* Set PCIE max payload size to 128 for older PCIe devices */
1.261 msaitoh 4311: if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
1.216 msaitoh 4312: devctl &= ~(0x00e0);
1.179 msaitoh 4313: /* Clear device status register. Write 1b to clear */
1.238 msaitoh 4314: devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED
4315: | PCIE_DCSR_NFED | PCIE_DCSR_CED;
1.177 msaitoh 4316: pci_conf_write(sc->sc_pc, sc->sc_pcitag,
1.238 msaitoh 4317: sc->bge_pciecap + PCIE_DCSR, devctl);
1.216 msaitoh 4318: bge_set_max_readrq(sc);
4319: }
4320:
4321: /* From Linux: dummy read to flush PCI posted writes */
4322: reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
4323:
1.236 msaitoh 4324: /*
4325: * Reset some of the PCI state that got zapped by reset
4326: * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be
4327: * set, too.
4328: */
1.216 msaitoh 4329: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
4330: BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4331: BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
4332: val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4333: if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
1.261 msaitoh 4334: (sc->bge_flags & BGEF_PCIX) != 0)
1.216 msaitoh 4335: val |= BGE_PCISTATE_RETRY_SAME_DMA;
4336: if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4337: val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4338: BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4339: BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4340: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val);
4341: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
4342: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
4343:
1.260 msaitoh 4344: /* 57xx step 11: disable PCI-X Relaxed Ordering. */
1.261 msaitoh 4345: if (sc->bge_flags & BGEF_PCIX) {
1.216 msaitoh 4346: reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
1.238 msaitoh 4347: + PCIX_CMD);
1.260 msaitoh 4348: /* Set max memory read byte count to 2K */
4349: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
4350: reg &= ~PCIX_CMD_BYTECNT_MASK;
4351: reg |= PCIX_CMD_BCNT_2048;
4352: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){
4353: /*
4354: * For 5704, set max outstanding split transaction
4355: * field to 0 (0 means it supports 1 request)
4356: */
4357: reg &= ~(PCIX_CMD_SPLTRANS_MASK
4358: | PCIX_CMD_BYTECNT_MASK);
4359: reg |= PCIX_CMD_BCNT_2048;
4360: }
1.216 msaitoh 4361: pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
1.238 msaitoh 4362: + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER);
1.76 cube 4363: }
4364:
1.236 msaitoh 4365: /* 5718 reset step 10, 57XX step 12 */
4366: /* Enable memory arbiter. */
1.216 msaitoh 4367: if (BGE_IS_5714_FAMILY(sc)) {
4368: val = CSR_READ_4(sc, BGE_MARB_MODE);
4369: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4370: } else
4371: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1.1 fvdl 4372:
1.180 msaitoh 4373: /* XXX 5721, 5751 and 5752 */
4374: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
4375: /* Step 19: */
4376: BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
4377: /* Step 20: */
4378: BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
1.44 hannken 4379: }
1.1 fvdl 4380:
1.274 msaitoh 4381: /* 5718 reset step 12, 57XX step 15 and 16 */
4382: /* Fix up byte swapping */
4383: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
4384:
1.253 msaitoh 4385: /* 5718 reset step 13, 57XX step 17 */
1.252 msaitoh 4386: /* Poll until the firmware initialization is complete */
4387: bge_poll_fw(sc);
4388:
1.236 msaitoh 4389: /* 57XX step 21 */
1.181 msaitoh 4390: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
4391: pcireg_t msidata;
1.330 msaitoh 4392:
1.181 msaitoh 4393: msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4394: BGE_PCI_MSI_DATA);
4395: msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
4396: pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
4397: msidata);
4398: }
1.151 cegger 4399:
1.236 msaitoh 4400: /* 57XX step 18 */
1.253 msaitoh 4401: /* Write mac mode. */
1.216 msaitoh 4402: val = CSR_READ_4(sc, BGE_MAC_MODE);
1.253 msaitoh 4403: /* Restore mac_mode_mask's bits using mac_mode */
1.216 msaitoh 4404: val = (val & ~mac_mode_mask) | mac_mode;
4405: CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
4406: DELAY(40);
1.1 fvdl 4407:
1.216 msaitoh 4408: bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
1.1 fvdl 4409:
1.161 msaitoh 4410: /*
4411: * The 5704 in TBI mode apparently needs some special
4412: * adjustment to insure the SERDES drive level is set
4413: * to 1.2V.
4414: */
1.261 msaitoh 4415: if (sc->bge_flags & BGEF_FIBER_TBI &&
1.161 msaitoh 4416: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1.170 msaitoh 4417: uint32_t serdescfg;
1.161 msaitoh 4418:
4419: serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
4420: serdescfg = (serdescfg & ~0xFFF) | 0x880;
4421: CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
4422: }
4423:
1.261 msaitoh 4424: if (sc->bge_flags & BGEF_PCIE &&
1.214 msaitoh 4425: !BGE_IS_57765_PLUS(sc) &&
1.172 msaitoh 4426: sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
1.214 msaitoh 4427: BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
1.172 msaitoh 4428: uint32_t v;
4429:
4430: /* Enable PCI Express bug fix */
1.217 msaitoh 4431: v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG);
4432: CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG,
4433: v | BGE_TLP_DATA_FIFO_PROTECT);
1.172 msaitoh 4434: }
1.216 msaitoh 4435:
4436: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
4437: BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4438: CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
1.177 msaitoh 4439:
4440: return 0;
1.1 fvdl 4441: }
4442:
4443: /*
4444: * Frame reception handling. This is called if there's a frame
4445: * on the receive return list.
4446: *
4447: * Note: we have to be able to handle two possibilities here:
1.184 njoly 4448: * 1) the frame is from the jumbo receive ring
1.1 fvdl 4449: * 2) the frame is from the standard receive ring
4450: */
4451:
1.104 thorpej 4452: static void
4453: bge_rxeof(struct bge_softc *sc)
1.1 fvdl 4454: {
1.358 skrll 4455: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.172 msaitoh 4456: uint16_t rx_prod, rx_cons;
1.1 fvdl 4457: int stdcnt = 0, jumbocnt = 0;
4458: bus_dmamap_t dmamap;
4459: bus_addr_t offset, toff;
4460: bus_size_t tlen;
4461: int tosync;
4462:
1.363 skrll 4463: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4464: offsetof(struct bge_ring_data, bge_status_block),
1.364 skrll 4465: sizeof(struct bge_status_block),
1.363 skrll 4466: BUS_DMASYNC_POSTREAD);
4467:
1.172 msaitoh 4468: rx_cons = sc->bge_rx_saved_considx;
4469: rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
4470:
4471: /* Nothing to do */
4472: if (rx_cons == rx_prod)
4473: return;
4474:
1.1 fvdl 4475: offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
1.172 msaitoh 4476: tosync = rx_prod - rx_cons;
1.1 fvdl 4477:
1.200 tls 4478: if (tosync != 0)
1.148 mlelstv 4479: rnd_add_uint32(&sc->rnd_source, tosync);
4480:
1.364 skrll 4481: toff = offset + (rx_cons * sizeof(struct bge_rx_bd));
1.1 fvdl 4482:
4483: if (tosync < 0) {
1.172 msaitoh 4484: tlen = (sc->bge_return_ring_cnt - rx_cons) *
1.364 skrll 4485: sizeof(struct bge_rx_bd);
1.1 fvdl 4486: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4487: toff, tlen, BUS_DMASYNC_POSTREAD);
1.374 skrll 4488: tosync = rx_prod;
4489: toff = offset;
1.1 fvdl 4490: }
4491:
1.347 jmcneill 4492: if (tosync != 0) {
4493: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1.374 skrll 4494: toff, tosync * sizeof(struct bge_rx_bd),
1.347 jmcneill 4495: BUS_DMASYNC_POSTREAD);
4496: }
1.1 fvdl 4497:
1.172 msaitoh 4498: while (rx_cons != rx_prod) {
1.1 fvdl 4499: struct bge_rx_bd *cur_rx;
1.170 msaitoh 4500: uint32_t rxidx;
1.1 fvdl 4501: struct mbuf *m = NULL;
4502:
1.172 msaitoh 4503: cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
1.1 fvdl 4504:
4505: rxidx = cur_rx->bge_idx;
1.172 msaitoh 4506: BGE_INC(rx_cons, sc->bge_return_ring_cnt);
1.1 fvdl 4507:
4508: if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4509: BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4510: m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4511: sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
4512: jumbocnt++;
1.124 bouyer 4513: bus_dmamap_sync(sc->bge_dmatag,
4514: sc->bge_cdata.bge_rx_jumbo_map,
1.126 christos 4515: mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
1.125 bouyer 4516: BGE_JLEN, BUS_DMASYNC_POSTREAD);
1.1 fvdl 4517: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1.343 thorpej 4518: if_statinc(ifp, if_ierrors);
1.1 fvdl 4519: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4520: continue;
4521: }
4522: if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
1.367 skrll 4523: NULL) == ENOBUFS) {
1.343 thorpej 4524: if_statinc(ifp, if_ierrors);
1.1 fvdl 4525: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
4526: continue;
4527: }
4528: } else {
4529: m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1.376 skrll 4530: sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1.124 bouyer 4531:
1.1 fvdl 4532: stdcnt++;
1.376 skrll 4533: sc->bge_std_cnt--;
4534:
1.1 fvdl 4535: dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
1.125 bouyer 4536: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
4537: dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
4538: bus_dmamap_unload(sc->bge_dmatag, dmamap);
1.376 skrll 4539:
1.1 fvdl 4540: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1.376 skrll 4541: m_free(m);
1.343 thorpej 4542: if_statinc(ifp, if_ierrors);
1.1 fvdl 4543: continue;
4544: }
4545: }
4546:
1.37 jonathan 4547: #ifndef __NO_STRICT_ALIGNMENT
1.178 msaitoh 4548: /*
4549: * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
4550: * the Rx buffer has the layer-2 header unaligned.
4551: * If our CPU requires alignment, re-align by copying.
4552: */
1.261 msaitoh 4553: if (sc->bge_flags & BGEF_RX_ALIGNBUG) {
1.127 tsutsui 4554: memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
1.178 msaitoh 4555: cur_rx->bge_len);
1.37 jonathan 4556: m->m_data += ETHER_ALIGN;
4557: }
4558: #endif
1.87 perry 4559:
1.54 fvdl 4560: m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
1.297 ozaki-r 4561: m_set_rcvif(m, ifp);
1.1 fvdl 4562:
1.219 msaitoh 4563: bge_rxcsum(sc, cur_rx, m);
4564:
4565: /*
4566: * If we received a packet with a vlan tag, pass it
4567: * to vlan_input() instead of ether_input().
4568: */
1.332 msaitoh 4569: if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)
1.313 msaitoh 4570: vlan_set_tag(m, cur_rx->bge_vlan_tag);
1.219 msaitoh 4571:
1.295 ozaki-r 4572: if_percpuq_enqueue(ifp->if_percpuq, m);
1.219 msaitoh 4573: }
4574:
4575: sc->bge_rx_saved_considx = rx_cons;
4576: bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4577: if (stdcnt)
1.376 skrll 4578: bge_fill_rx_ring_std(sc);
1.219 msaitoh 4579: if (jumbocnt)
4580: bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
4581: }
4582:
4583: static void
4584: bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4585: {
1.46 jonathan 4586:
1.257 msaitoh 4587: if (BGE_IS_57765_PLUS(sc)) {
1.219 msaitoh 4588: if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4589: if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4590: m->m_pkthdr.csum_flags = M_CSUM_IPv4;
1.216 msaitoh 4591: if ((cur_rx->bge_error_flag &
4592: BGE_RXERRFLAG_IP_CSUM_NOK) != 0)
4593: m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1.219 msaitoh 4594: if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4595: m->m_pkthdr.csum_data =
4596: cur_rx->bge_tcp_udp_csum;
4597: m->m_pkthdr.csum_flags |=
1.331 msaitoh 4598: (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA);
1.219 msaitoh 4599: }
1.216 msaitoh 4600: }
1.219 msaitoh 4601: } else {
4602: if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
4603: m->m_pkthdr.csum_flags = M_CSUM_IPv4;
4604: if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
4605: m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1.46 jonathan 4606: /*
4607: * Rx transport checksum-offload may also
4608: * have bugs with packets which, when transmitted,
4609: * were `runts' requiring padding.
4610: */
4611: if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4612: (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
1.219 msaitoh 4613: m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
1.46 jonathan 4614: m->m_pkthdr.csum_data =
4615: cur_rx->bge_tcp_udp_csum;
4616: m->m_pkthdr.csum_flags |=
1.331 msaitoh 4617: (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA);
1.1 fvdl 4618: }
4619: }
4620: }
4621:
1.104 thorpej 4622: static void
4623: bge_txeof(struct bge_softc *sc)
1.1 fvdl 4624: {
1.358 skrll 4625: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.1 fvdl 4626: struct bge_tx_bd *cur_tx = NULL;
4627: struct txdmamap_pool_entry *dma;
4628: bus_addr_t offset, toff;
4629: bus_size_t tlen;
4630: int tosync;
4631: struct mbuf *m;
4632:
4633: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4634: offsetof(struct bge_ring_data, bge_status_block),
1.364 skrll 4635: sizeof(struct bge_status_block),
1.1 fvdl 4636: BUS_DMASYNC_POSTREAD);
4637:
1.374 skrll 4638: const uint16_t hw_cons_idx =
4639: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx;
1.1 fvdl 4640: offset = offsetof(struct bge_ring_data, bge_tx_ring);
1.374 skrll 4641: tosync = hw_cons_idx - sc->bge_tx_saved_considx;
1.1 fvdl 4642:
1.200 tls 4643: if (tosync != 0)
1.148 mlelstv 4644: rnd_add_uint32(&sc->rnd_source, tosync);
4645:
1.364 skrll 4646: toff = offset + (sc->bge_tx_saved_considx * sizeof(struct bge_tx_bd));
1.1 fvdl 4647:
4648: if (tosync < 0) {
4649: tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
1.364 skrll 4650: sizeof(struct bge_tx_bd);
1.1 fvdl 4651: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1.331 msaitoh 4652: toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1.374 skrll 4653: tosync = hw_cons_idx;
4654: toff = offset;
1.1 fvdl 4655: }
4656:
1.347 jmcneill 4657: if (tosync != 0) {
4658: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1.374 skrll 4659: toff, tosync * sizeof(struct bge_tx_bd),
1.347 jmcneill 4660: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4661: }
1.1 fvdl 4662:
4663: /*
4664: * Go through our tx ring and free mbufs for those
4665: * frames that have been sent.
4666: */
1.374 skrll 4667: while (sc->bge_tx_saved_considx != hw_cons_idx) {
1.359 skrll 4668: uint32_t idx = sc->bge_tx_saved_considx;
1.1 fvdl 4669: cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
4670: if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
1.343 thorpej 4671: if_statinc(ifp, if_opackets);
1.1 fvdl 4672: m = sc->bge_cdata.bge_tx_chain[idx];
4673: if (m != NULL) {
4674: sc->bge_cdata.bge_tx_chain[idx] = NULL;
4675: dma = sc->txdma[idx];
1.317 bouyer 4676: if (dma->is_dma32) {
4677: bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32,
4678: 0, dma->dmamap32->dm_mapsize,
4679: BUS_DMASYNC_POSTWRITE);
4680: bus_dmamap_unload(
4681: sc->bge_dmatag32, dma->dmamap32);
4682: } else {
4683: bus_dmamap_sync(sc->bge_dmatag, dma->dmamap,
4684: 0, dma->dmamap->dm_mapsize,
4685: BUS_DMASYNC_POSTWRITE);
4686: bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
4687: }
1.1 fvdl 4688: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
4689: sc->txdma[idx] = NULL;
4690:
4691: m_freem(m);
4692: }
4693: sc->bge_txcnt--;
4694: BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
1.375 skrll 4695: sc->bge_tx_sending = false;
1.1 fvdl 4696: }
4697: }
4698:
1.104 thorpej 4699: static int
4700: bge_intr(void *xsc)
1.1 fvdl 4701: {
1.354 skrll 4702: struct bge_softc * const sc = xsc;
4703: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.288 msaitoh 4704: uint32_t pcistate, statusword, statustag;
1.247 msaitoh 4705: uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE;
1.1 fvdl 4706:
1.247 msaitoh 4707: /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */
4708: if (BGE_IS_5717_PLUS(sc))
4709: intrmask = 0;
4710:
1.375 skrll 4711: mutex_enter(sc->sc_core_lock);
4712:
1.357 skrll 4713: /*
4714: * It is possible for the interrupt to arrive before
1.161 msaitoh 4715: * the status block is updated prior to the interrupt.
4716: * Reading the PCI State register will confirm whether the
4717: * interrupt is ours and will flush the status block.
4718: */
1.288 msaitoh 4719: pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE);
1.144 mlelstv 4720:
1.161 msaitoh 4721: /* read status word from status block */
1.240 msaitoh 4722: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4723: offsetof(struct bge_ring_data, bge_status_block),
1.364 skrll 4724: sizeof(struct bge_status_block),
1.240 msaitoh 4725: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1.161 msaitoh 4726: statusword = sc->bge_rdata->bge_status_block.bge_status;
1.288 msaitoh 4727: statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
1.144 mlelstv 4728:
1.288 msaitoh 4729: if (sc->bge_flags & BGEF_TAGGED_STATUS) {
4730: if (sc->bge_lasttag == statustag &&
4731: (~pcistate & intrmask)) {
1.306 msaitoh 4732: BGE_EVCNT_INCR(sc->bge_ev_intr_spurious);
1.375 skrll 4733: mutex_exit(sc->sc_core_lock);
1.362 skrll 4734: return 0;
1.288 msaitoh 4735: }
4736: sc->bge_lasttag = statustag;
4737: } else {
4738: if (!(statusword & BGE_STATFLAG_UPDATED) &&
4739: !(~pcistate & intrmask)) {
1.306 msaitoh 4740: BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2);
1.375 skrll 4741: mutex_exit(sc->sc_core_lock);
1.362 skrll 4742: return 0;
1.288 msaitoh 4743: }
4744: statustag = 0;
4745: }
4746: /* Ack interrupt and stop others from occurring. */
4747: bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
4748: BGE_EVCNT_INCR(sc->bge_ev_intr);
1.144 mlelstv 4749:
1.288 msaitoh 4750: /* clear status word */
4751: sc->bge_rdata->bge_status_block.bge_status = 0;
1.1 fvdl 4752:
1.288 msaitoh 4753: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
4754: offsetof(struct bge_ring_data, bge_status_block),
1.364 skrll 4755: sizeof(struct bge_status_block),
1.288 msaitoh 4756: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1.72 thorpej 4757:
1.288 msaitoh 4758: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4759: statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
4760: BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
4761: bge_link_upd(sc);
1.1 fvdl 4762:
1.375 skrll 4763: if (sc->bge_if_flags & IFF_RUNNING) {
1.288 msaitoh 4764: /* Check RX return ring producer/consumer */
4765: bge_rxeof(sc);
1.144 mlelstv 4766:
1.288 msaitoh 4767: /* Check TX ring producer/consumer */
4768: bge_txeof(sc);
4769: }
1.1 fvdl 4770:
1.288 msaitoh 4771: if (sc->bge_pending_rxintr_change) {
4772: uint32_t rx_ticks = sc->bge_rx_coal_ticks;
4773: uint32_t rx_bds = sc->bge_rx_max_coal_bds;
1.1 fvdl 4774:
1.288 msaitoh 4775: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
4776: DELAY(10);
4777: (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
1.1 fvdl 4778:
1.288 msaitoh 4779: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
4780: DELAY(10);
4781: (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
1.58 jonathan 4782:
1.288 msaitoh 4783: sc->bge_pending_rxintr_change = 0;
4784: }
4785: bge_handle_events(sc);
1.87 perry 4786:
1.288 msaitoh 4787: /* Re-enable interrupts. */
4788: bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag);
1.58 jonathan 4789:
1.375 skrll 4790: if (sc->bge_if_flags & IFF_RUNNING)
1.299 ozaki-r 4791: if_schedule_deferred_start(ifp);
1.1 fvdl 4792:
1.375 skrll 4793: mutex_exit(sc->sc_core_lock);
4794:
1.288 msaitoh 4795: return 1;
1.1 fvdl 4796: }
4797:
1.104 thorpej 4798: static void
1.177 msaitoh 4799: bge_asf_driver_up(struct bge_softc *sc)
4800: {
4801: if (sc->bge_asf_mode & ASF_STACKUP) {
4802: /* Send ASF heartbeat aprox. every 2s */
4803: if (sc->bge_asf_count)
4804: sc->bge_asf_count --;
4805: else {
1.180 msaitoh 4806: sc->bge_asf_count = 2;
1.216 msaitoh 4807:
4808: bge_wait_for_event_ack(sc);
4809:
4810: bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
1.285 msaitoh 4811: BGE_FW_CMD_DRV_ALIVE3);
1.216 msaitoh 4812: bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4813: bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4814: BGE_FW_HB_TIMEOUT_SEC);
4815: CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
4816: CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4817: BGE_RX_CPU_DRV_EVENT);
1.177 msaitoh 4818: }
4819: }
4820: }
4821:
4822: static void
1.104 thorpej 4823: bge_tick(void *xsc)
1.1 fvdl 4824: {
1.354 skrll 4825: struct bge_softc * const sc = xsc;
1.375 skrll 4826: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.354 skrll 4827: struct mii_data * const mii = &sc->bge_mii;
1.1 fvdl 4828:
1.375 skrll 4829: mutex_enter(sc->sc_core_lock);
1.1 fvdl 4830:
1.172 msaitoh 4831: if (BGE_IS_5705_PLUS(sc))
4832: bge_stats_update_regs(sc);
4833: else
4834: bge_stats_update(sc);
1.1 fvdl 4835:
1.261 msaitoh 4836: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.161 msaitoh 4837: /*
4838: * Since in TBI mode auto-polling can't be used we should poll
4839: * link status manually. Here we register pending link event
4840: * and trigger interrupt.
4841: */
4842: BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4843: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4844: } else {
4845: /*
4846: * Do not touch PHY if we have link up. This could break
4847: * IPMI/ASF mode or produce extra input errors.
4848: * (extra input errors was reported for bcm5701 & bcm5704).
4849: */
4850: if (!BGE_STS_BIT(sc, BGE_STS_LINK))
4851: mii_tick(mii);
4852: }
4853:
1.216 msaitoh 4854: bge_asf_driver_up(sc);
4855:
1.375 skrll 4856: const bool ok = bge_watchdog(ifp);
4857:
4858: if (ok && !sc->bge_detaching)
1.345 thorpej 4859: callout_schedule(&sc->bge_timeout, hz);
1.1 fvdl 4860:
1.375 skrll 4861: mutex_exit(sc->sc_core_lock);
1.1 fvdl 4862: }
4863:
1.104 thorpej 4864: static void
1.172 msaitoh 4865: bge_stats_update_regs(struct bge_softc *sc)
4866: {
1.375 skrll 4867: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.172 msaitoh 4868:
1.343 thorpej 4869: net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
4870:
4871: if_statadd_ref(nsr, if_collisions,
4872: CSR_READ_4(sc, BGE_MAC_STATS +
4873: offsetof(struct bge_mac_stats_regs, etherStatsCollisions)));
1.172 msaitoh 4874:
1.320 bouyer 4875: /*
4876: * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0,
4877: * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames
4878: * (silicon bug). There's no reliable workaround so just
4879: * ignore the counter
4880: */
4881: if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1.328 bouyer 4882: sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4883: sc->bge_chipid != BGE_CHIPID_BCM5720_A0) {
1.343 thorpej 4884: if_statadd_ref(nsr, if_ierrors,
4885: CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
1.320 bouyer 4886: }
1.343 thorpej 4887: if_statadd_ref(nsr, if_ierrors,
4888: CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS));
4889: if_statadd_ref(nsr, if_ierrors,
4890: CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS));
4891:
4892: IF_STAT_PUTREF(ifp);
1.327 msaitoh 4893:
4894: if (sc->bge_flags & BGEF_RDMA_BUG) {
4895: uint32_t val, ucast, mcast, bcast;
4896:
4897: ucast = CSR_READ_4(sc, BGE_MAC_STATS +
4898: offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
4899: mcast = CSR_READ_4(sc, BGE_MAC_STATS +
4900: offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
4901: bcast = CSR_READ_4(sc, BGE_MAC_STATS +
4902: offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
4903:
4904: /*
4905: * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4906: * frames, it's safe to disable workaround for DMA engine's
4907: * miscalculation of TXMBUF space.
4908: */
4909: if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
4910: val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4911: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
4912: val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4913: else
4914: val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4915: CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4916: sc->bge_flags &= ~BGEF_RDMA_BUG;
4917: }
4918: }
1.172 msaitoh 4919: }
4920:
4921: static void
1.104 thorpej 4922: bge_stats_update(struct bge_softc *sc)
1.1 fvdl 4923: {
1.354 skrll 4924: struct ifnet * const ifp = &sc->ethercom.ec_if;
1.1 fvdl 4925: bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
1.44 hannken 4926:
1.1 fvdl 4927: #define READ_STAT(sc, stats, stat) \
4928: CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4929:
1.343 thorpej 4930: uint64_t collisions =
1.1 fvdl 4931: (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
4932: READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
4933: READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
1.343 thorpej 4934: READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo));
4935:
4936: if_statadd(ifp, if_collisions, collisions - sc->bge_if_collisions);
4937: sc->bge_if_collisions = collisions;
4938:
1.1 fvdl 4939:
1.72 thorpej 4940: BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
4941: READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
4942: BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
4943: READ_STAT(sc, stats, outXonSent.bge_addr_lo));
4944: BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
4945: READ_STAT(sc, stats,
1.330 msaitoh 4946: xoffPauseFramesReceived.bge_addr_lo));
1.72 thorpej 4947: BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
4948: READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
4949: BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
4950: READ_STAT(sc, stats,
1.330 msaitoh 4951: macControlFramesReceived.bge_addr_lo));
1.72 thorpej 4952: BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
4953: READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
4954:
1.1 fvdl 4955: #undef READ_STAT
4956:
4957: #ifdef notdef
4958: ifp->if_collisions +=
4959: (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
4960: sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
4961: sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
4962: sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
4963: ifp->if_collisions;
4964: #endif
4965: }
4966:
1.46 jonathan 4967: /*
4968: * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4969: * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4970: * but when such padded frames employ the bge IP/TCP checksum offload,
4971: * the hardware checksum assist gives incorrect results (possibly
4972: * from incorporating its own padding into the UDP/TCP checksum; who knows).
4973: * If we pad such runts with zeros, the onboard checksum comes out correct.
4974: */
1.102 perry 4975: static inline int
1.46 jonathan 4976: bge_cksum_pad(struct mbuf *pkt)
4977: {
4978: struct mbuf *last = NULL;
4979: int padlen;
4980:
4981: padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
4982:
4983: /* if there's only the packet-header and we can pad there, use it. */
4984: if (pkt->m_pkthdr.len == pkt->m_len &&
1.113 tsutsui 4985: M_TRAILINGSPACE(pkt) >= padlen) {
1.46 jonathan 4986: last = pkt;
4987: } else {
4988: /*
4989: * Walk packet chain to find last mbuf. We will either
1.87 perry 4990: * pad there, or append a new mbuf and pad it
1.46 jonathan 4991: * (thus perhaps avoiding the bcm5700 dma-min bug).
4992: */
4993: for (last = pkt; last->m_next != NULL; last = last->m_next) {
1.367 skrll 4994: continue; /* do nothing */
1.46 jonathan 4995: }
4996:
4997: /* `last' now points to last in chain. */
1.114 tsutsui 4998: if (M_TRAILINGSPACE(last) < padlen) {
1.46 jonathan 4999: /* Allocate new empty mbuf, pad it. Compact later. */
5000: struct mbuf *n;
5001: MGET(n, M_DONTWAIT, MT_DATA);
1.129 joerg 5002: if (n == NULL)
5003: return ENOBUFS;
1.46 jonathan 5004: n->m_len = 0;
5005: last->m_next = n;
5006: last = n;
5007: }
5008: }
5009:
1.114 tsutsui 5010: KDASSERT(!M_READONLY(last));
5011: KDASSERT(M_TRAILINGSPACE(last) >= padlen);
5012:
1.46 jonathan 5013: /* Now zero the pad area, to avoid the bge cksum-assist bug */
1.126 christos 5014: memset(mtod(last, char *) + last->m_len, 0, padlen);
1.46 jonathan 5015: last->m_len += padlen;
5016: pkt->m_pkthdr.len += padlen;
5017: return 0;
5018: }
1.45 jonathan 5019:
5020: /*
5021: * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
5022: */
1.102 perry 5023: static inline int
1.45 jonathan 5024: bge_compact_dma_runt(struct mbuf *pkt)
5025: {
5026: struct mbuf *m, *prev;
1.330 msaitoh 5027: int totlen;
1.45 jonathan 5028:
5029: prev = NULL;
5030: totlen = 0;
5031:
1.331 msaitoh 5032: for (m = pkt; m != NULL; prev = m, m = m->m_next) {
1.45 jonathan 5033: int mlen = m->m_len;
5034: int shortfall = 8 - mlen ;
5035:
5036: totlen += mlen;
1.203 msaitoh 5037: if (mlen == 0)
1.45 jonathan 5038: continue;
5039: if (mlen >= 8)
5040: continue;
5041:
1.357 skrll 5042: /*
5043: * If we get here, mbuf data is too small for DMA engine.
1.45 jonathan 5044: * Try to fix by shuffling data to prev or next in chain.
5045: * If that fails, do a compacting deep-copy of the whole chain.
5046: */
5047:
5048: /* Internal frag. If fits in prev, copy it there. */
1.113 tsutsui 5049: if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
1.330 msaitoh 5050: memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
1.45 jonathan 5051: prev->m_len += mlen;
5052: m->m_len = 0;
5053: /* XXX stitch chain */
5054: prev->m_next = m_free(m);
5055: m = prev;
5056: continue;
1.332 msaitoh 5057: } else if (m->m_next != NULL &&
1.367 skrll 5058: M_TRAILINGSPACE(m) >= shortfall &&
5059: m->m_next->m_len >= (8 + shortfall)) {
1.45 jonathan 5060: /* m is writable and have enough data in next, pull up. */
5061:
1.330 msaitoh 5062: memcpy(m->m_data + m->m_len, m->m_next->m_data,
1.115 tsutsui 5063: shortfall);
1.45 jonathan 5064: m->m_len += shortfall;
5065: m->m_next->m_len -= shortfall;
5066: m->m_next->m_data += shortfall;
1.332 msaitoh 5067: } else if (m->m_next == NULL || 1) {
1.357 skrll 5068: /*
5069: * Got a runt at the very end of the packet.
1.45 jonathan 5070: * borrow data from the tail of the preceding mbuf and
1.332 msaitoh 5071: * update its length in-place. (The original data is
5072: * still valid, so we can do this even if prev is not
5073: * writable.)
1.45 jonathan 5074: */
5075:
1.332 msaitoh 5076: /*
5077: * If we'd make prev a runt, just move all of its data.
5078: */
1.45 jonathan 5079: KASSERT(prev != NULL /*, ("runt but null PREV")*/);
5080: KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
1.111 christos 5081:
1.45 jonathan 5082: if ((prev->m_len - shortfall) < 8)
5083: shortfall = prev->m_len;
1.87 perry 5084:
1.45 jonathan 5085: #ifdef notyet /* just do the safe slow thing for now */
5086: if (!M_READONLY(m)) {
5087: if (M_LEADINGSPACE(m) < shorfall) {
5088: void *m_dat;
1.338 maxv 5089: m_dat = M_BUFADDR(m);
1.332 msaitoh 5090: memmove(m_dat, mtod(m, void*),
5091: m->m_len);
1.45 jonathan 5092: m->m_data = m_dat;
1.332 msaitoh 5093: }
1.45 jonathan 5094: } else
5095: #endif /* just do the safe slow thing */
5096: {
5097: struct mbuf * n = NULL;
5098: int newprevlen = prev->m_len - shortfall;
5099:
5100: MGET(n, M_NOWAIT, MT_DATA);
5101: if (n == NULL)
5102: return ENOBUFS;
5103: KASSERT(m->m_len + shortfall < MLEN
5104: /*,
5105: ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
5106:
5107: /* first copy the data we're stealing from prev */
1.115 tsutsui 5108: memcpy(n->m_data, prev->m_data + newprevlen,
5109: shortfall);
1.45 jonathan 5110:
5111: /* update prev->m_len accordingly */
5112: prev->m_len -= shortfall;
5113:
5114: /* copy data from runt m */
1.115 tsutsui 5115: memcpy(n->m_data + shortfall, m->m_data,
5116: m->m_len);
1.45 jonathan 5117:
5118: /* n holds what we stole from prev, plus m */
5119: n->m_len = shortfall + m->m_len;
5120:
5121: /* stitch n into chain and free m */
5122: n->m_next = m->m_next;
5123: prev->m_next = n;
5124: /* KASSERT(m->m_next == NULL); */
5125: m->m_next = NULL;
5126: m_free(m);
5127: m = n; /* for continuing loop */
5128: }
5129: }
5130: }
5131: return 0;
5132: }
5133:
1.1 fvdl 5134: /*
1.207 msaitoh 5135: * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1.1 fvdl 5136: * pointers to descriptors.
5137: */
1.104 thorpej 5138: static int
1.170 msaitoh 5139: bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1.1 fvdl 5140: {
1.317 bouyer 5141: struct bge_tx_bd *f, *prev_f;
1.170 msaitoh 5142: uint32_t frag, cur;
5143: uint16_t csum_flags = 0;
5144: uint16_t txbd_tso_flags = 0;
1.1 fvdl 5145: struct txdmamap_pool_entry *dma;
5146: bus_dmamap_t dmamap;
1.317 bouyer 5147: bus_dma_tag_t dmatag;
1.1 fvdl 5148: int i = 0;
1.95 jonathan 5149: int use_tso, maxsegsize, error;
1.311 knakahar 5150: bool have_vtag;
5151: uint16_t vtag;
1.330 msaitoh 5152: bool remap;
1.107 blymn 5153:
1.1 fvdl 5154: if (m_head->m_pkthdr.csum_flags) {
5155: if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
5156: csum_flags |= BGE_TXBDFLAG_IP_CSUM;
1.331 msaitoh 5157: if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4))
1.1 fvdl 5158: csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5159: }
5160:
1.87 perry 5161: /*
1.46 jonathan 5162: * If we were asked to do an outboard checksum, and the NIC
5163: * has the bug where it sometimes adds in the Ethernet padding,
5164: * explicitly pad with zeros so the cksum will be correct either way.
5165: * (For now, do this for all chip versions, until newer
5166: * are confirmed to not require the workaround.)
5167: */
5168: if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
5169: #ifdef notyet
5170: (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
1.87 perry 5171: #endif
1.46 jonathan 5172: m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
5173: goto check_dma_bug;
5174:
1.170 msaitoh 5175: if (bge_cksum_pad(m_head) != 0)
1.320 bouyer 5176: return ENOBUFS;
1.46 jonathan 5177:
5178: check_dma_bug:
1.157 msaitoh 5179: if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
1.29 itojun 5180: goto doit;
1.157 msaitoh 5181:
1.25 jonathan 5182: /*
5183: * bcm5700 Revision B silicon cannot handle DMA descriptors with
1.87 perry 5184: * less than eight bytes. If we encounter a teeny mbuf
1.25 jonathan 5185: * at the end of a chain, we can pad. Otherwise, copy.
5186: */
1.45 jonathan 5187: if (bge_compact_dma_runt(m_head) != 0)
5188: return ENOBUFS;
1.25 jonathan 5189:
5190: doit:
1.1 fvdl 5191: dma = SLIST_FIRST(&sc->txdma_list);
1.320 bouyer 5192: if (dma == NULL) {
1.1 fvdl 5193: return ENOBUFS;
1.320 bouyer 5194: }
1.1 fvdl 5195: dmamap = dma->dmamap;
1.317 bouyer 5196: dmatag = sc->bge_dmatag;
5197: dma->is_dma32 = false;
1.1 fvdl 5198:
5199: /*
1.95 jonathan 5200: * Set up any necessary TSO state before we start packing...
5201: */
5202: use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5203: if (!use_tso) {
5204: maxsegsize = 0;
5205: } else { /* TSO setup */
5206: unsigned mss;
5207: struct ether_header *eh;
5208: unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
1.317 bouyer 5209: unsigned bge_hlen;
1.95 jonathan 5210: struct mbuf * m0 = m_head;
5211: struct ip *ip;
5212: struct tcphdr *th;
5213: int iphl, hlen;
5214:
5215: /*
5216: * XXX It would be nice if the mbuf pkthdr had offset
5217: * fields for the protocol headers.
5218: */
5219:
5220: eh = mtod(m0, struct ether_header *);
5221: switch (htons(eh->ether_type)) {
5222: case ETHERTYPE_IP:
5223: offset = ETHER_HDR_LEN;
5224: break;
5225:
5226: case ETHERTYPE_VLAN:
5227: offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5228: break;
5229:
5230: default:
5231: /*
5232: * Don't support this protocol or encapsulation.
5233: */
1.170 msaitoh 5234: return ENOBUFS;
1.95 jonathan 5235: }
5236:
5237: /*
5238: * TCP/IP headers are in the first mbuf; we can do
5239: * this the easy way.
5240: */
5241: iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5242: hlen = iphl + offset;
5243: if (__predict_false(m0->m_len <
5244: (hlen + sizeof(struct tcphdr)))) {
5245:
1.316 bouyer 5246: aprint_error_dev(sc->bge_dev,
1.138 joerg 5247: "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
5248: "not handled yet\n",
1.367 skrll 5249: m0->m_len, hlen+ sizeof(struct tcphdr));
1.95 jonathan 5250: #ifdef NOTYET
5251: /*
5252: * XXX jonathan@NetBSD.org: untested.
1.330 msaitoh 5253: * how to force this branch to be taken?
1.95 jonathan 5254: */
1.267 msaitoh 5255: BGE_EVCNT_INCR(sc->bge_ev_txtsopain);
1.95 jonathan 5256:
5257: m_copydata(m0, offset, sizeof(ip), &ip);
5258: m_copydata(m0, hlen, sizeof(th), &th);
5259:
5260: ip.ip_len = 0;
5261:
5262: m_copyback(m0, hlen + offsetof(struct ip, ip_len),
5263: sizeof(ip.ip_len), &ip.ip_len);
5264:
5265: th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5266: ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5267:
5268: m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5269: sizeof(th.th_sum), &th.th_sum);
5270:
5271: hlen += th.th_off << 2;
5272: iptcp_opt_words = hlen;
5273: #else
5274: /*
5275: * if_wm "hard" case not yet supported, can we not
5276: * mandate it out of existence?
5277: */
5278: (void) ip; (void)th; (void) ip_tcp_hlen;
5279:
5280: return ENOBUFS;
5281: #endif
5282: } else {
1.126 christos 5283: ip = (struct ip *) (mtod(m0, char *) + offset);
5284: th = (struct tcphdr *) (mtod(m0, char *) + hlen);
1.95 jonathan 5285: ip_tcp_hlen = iphl + (th->th_off << 2);
5286:
5287: /* Total IP/TCP options, in 32-bit words */
5288: iptcp_opt_words = (ip_tcp_hlen
5289: - sizeof(struct tcphdr)
5290: - sizeof(struct ip)) >> 2;
5291: }
1.207 msaitoh 5292: if (BGE_IS_575X_PLUS(sc)) {
1.95 jonathan 5293: th->th_sum = 0;
1.317 bouyer 5294: csum_flags = 0;
1.95 jonathan 5295: } else {
5296: /*
1.107 blymn 5297: * XXX jonathan@NetBSD.org: 5705 untested.
1.95 jonathan 5298: * Requires TSO firmware patch for 5701/5703/5704.
5299: */
5300: th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5301: ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5302: }
5303:
5304: mss = m_head->m_pkthdr.segsz;
1.107 blymn 5305: txbd_tso_flags |=
1.95 jonathan 5306: BGE_TXBDFLAG_CPU_PRE_DMA |
5307: BGE_TXBDFLAG_CPU_POST_DMA;
5308:
5309: /*
5310: * Our NIC TSO-assist assumes TSO has standard, optionless
5311: * IPv4 and TCP headers, which total 40 bytes. By default,
5312: * the NIC copies 40 bytes of IP/TCP header from the
5313: * supplied header into the IP/TCP header portion of
5314: * each post-TSO-segment. If the supplied packet has IP or
5315: * TCP options, we need to tell the NIC to copy those extra
5316: * bytes into each post-TSO header, in addition to the normal
5317: * 40-byte IP/TCP header (and to leave space accordingly).
5318: * Unfortunately, the driver encoding of option length
5319: * varies across different ASIC families.
5320: */
5321: tcp_seg_flags = 0;
1.317 bouyer 5322: bge_hlen = ip_tcp_hlen >> 2;
5323: if (BGE_IS_5717_PLUS(sc)) {
5324: tcp_seg_flags = (bge_hlen & 0x3) << 14;
5325: txbd_tso_flags |=
5326: ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2);
5327: } else if (BGE_IS_5705_PLUS(sc)) {
1.332 msaitoh 5328: tcp_seg_flags = bge_hlen << 11;
1.317 bouyer 5329: } else {
5330: /* XXX iptcp_opt_words or bge_hlen ? */
1.332 msaitoh 5331: txbd_tso_flags |= iptcp_opt_words << 12;
1.95 jonathan 5332: }
5333: maxsegsize = mss | tcp_seg_flags;
5334: ip->ip_len = htons(mss + ip_tcp_hlen);
1.317 bouyer 5335: ip->ip_sum = 0;
1.95 jonathan 5336:
5337: } /* TSO setup */
5338:
1.317 bouyer 5339: have_vtag = vlan_has_tag(m_head);
5340: if (have_vtag)
5341: vtag = vlan_get_tag(m_head);
5342:
1.95 jonathan 5343: /*
1.1 fvdl 5344: * Start packing the mbufs in this chain into
5345: * the fragment pointers. Stop when we run out
5346: * of fragments or hit the end of the mbuf chain.
5347: */
1.320 bouyer 5348: remap = true;
1.317 bouyer 5349: load_again:
1.332 msaitoh 5350: error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT);
1.320 bouyer 5351: if (__predict_false(error)) {
1.332 msaitoh 5352: if (error == EFBIG && remap) {
1.320 bouyer 5353: struct mbuf *m;
5354: remap = false;
5355: m = m_defrag(m_head, M_NOWAIT);
5356: if (m != NULL) {
5357: KASSERT(m == m_head);
5358: goto load_again;
5359: }
5360: }
5361: return error;
5362: }
1.118 tsutsui 5363: /*
5364: * Sanity check: avoid coming within 16 descriptors
5365: * of the end of the ring.
5366: */
5367: if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
5368: BGE_TSO_PRINTF(("%s: "
5369: " dmamap_load_mbuf too close to ring wrap\n",
1.138 joerg 5370: device_xname(sc->bge_dev)));
1.118 tsutsui 5371: goto fail_unload;
5372: }
1.95 jonathan 5373:
1.317 bouyer 5374: /* Iterate over dmap-map fragments. */
5375: f = prev_f = NULL;
5376: cur = frag = *txidx;
1.6 thorpej 5377:
1.1 fvdl 5378: for (i = 0; i < dmamap->dm_nsegs; i++) {
5379: f = &sc->bge_rdata->bge_tx_ring[frag];
5380: if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
5381: break;
1.107 blymn 5382:
1.172 msaitoh 5383: BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
1.1 fvdl 5384: f->bge_len = dmamap->dm_segs[i].ds_len;
1.320 bouyer 5385: if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && (
5386: (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) !=
5387: ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) ||
5388: (prev_f != NULL &&
5389: prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi))
5390: ) {
1.317 bouyer 5391: /*
5392: * watchdog timeout issue was observed with TSO,
5393: * limiting DMA address space to 32bits seems to
5394: * address the issue.
5395: */
5396: bus_dmamap_unload(dmatag, dmamap);
5397: dmatag = sc->bge_dmatag32;
5398: dmamap = dma->dmamap32;
5399: dma->is_dma32 = true;
1.320 bouyer 5400: remap = true;
1.317 bouyer 5401: goto load_again;
5402: }
1.95 jonathan 5403:
5404: /*
5405: * For 5751 and follow-ons, for TSO we must turn
5406: * off checksum-assist flag in the tx-descr, and
5407: * supply the ASIC-revision-specific encoding
5408: * of TSO flags and segsize.
5409: */
5410: if (use_tso) {
1.207 msaitoh 5411: if (BGE_IS_575X_PLUS(sc) || i == 0) {
1.95 jonathan 5412: f->bge_rsvd = maxsegsize;
5413: f->bge_flags = csum_flags | txbd_tso_flags;
5414: } else {
5415: f->bge_rsvd = 0;
5416: f->bge_flags =
5417: (csum_flags | txbd_tso_flags) & 0x0fff;
5418: }
5419: } else {
5420: f->bge_rsvd = 0;
5421: f->bge_flags = csum_flags;
5422: }
1.1 fvdl 5423:
1.311 knakahar 5424: if (have_vtag) {
1.1 fvdl 5425: f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
1.311 knakahar 5426: f->bge_vlan_tag = vtag;
1.1 fvdl 5427: } else {
5428: f->bge_vlan_tag = 0;
5429: }
1.317 bouyer 5430: prev_f = f;
1.1 fvdl 5431: cur = frag;
5432: BGE_INC(frag, BGE_TX_RING_CNT);
5433: }
5434:
1.95 jonathan 5435: if (i < dmamap->dm_nsegs) {
5436: BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
1.138 joerg 5437: device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
1.118 tsutsui 5438: goto fail_unload;
1.95 jonathan 5439: }
1.1 fvdl 5440:
1.317 bouyer 5441: bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize,
1.1 fvdl 5442: BUS_DMASYNC_PREWRITE);
5443:
1.95 jonathan 5444: if (frag == sc->bge_tx_saved_considx) {
5445: BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
1.138 joerg 5446: device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
1.95 jonathan 5447:
1.118 tsutsui 5448: goto fail_unload;
1.95 jonathan 5449: }
1.1 fvdl 5450:
5451: sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
5452: sc->bge_cdata.bge_tx_chain[cur] = m_head;
5453: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
5454: sc->txdma[cur] = dma;
1.118 tsutsui 5455: sc->bge_txcnt += dmamap->dm_nsegs;
1.1 fvdl 5456:
5457: *txidx = frag;
5458:
1.170 msaitoh 5459: return 0;
1.118 tsutsui 5460:
1.158 msaitoh 5461: fail_unload:
1.317 bouyer 5462: bus_dmamap_unload(dmatag, dmamap);
1.118 tsutsui 5463:
5464: return ENOBUFS;
1.1 fvdl 5465: }
5466:
1.375 skrll 5467:
5468: static void
5469: bge_start(struct ifnet *ifp)
5470: {
5471: struct bge_softc * const sc = ifp->if_softc;
5472:
5473: mutex_enter(sc->sc_core_lock);
5474: bge_start_locked(ifp);
5475: mutex_exit(sc->sc_core_lock);
5476: }
5477:
1.1 fvdl 5478: /*
5479: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5480: * to the mbuf data regions directly in the transmit descriptors.
5481: */
1.104 thorpej 5482: static void
1.375 skrll 5483: bge_start_locked(struct ifnet *ifp)
1.1 fvdl 5484: {
1.354 skrll 5485: struct bge_softc * const sc = ifp->if_softc;
1.1 fvdl 5486: struct mbuf *m_head = NULL;
1.320 bouyer 5487: struct mbuf *m;
1.170 msaitoh 5488: uint32_t prodidx;
1.1 fvdl 5489: int pkts = 0;
1.320 bouyer 5490: int error;
1.1 fvdl 5491:
1.375 skrll 5492: if ((sc->bge_if_flags & IFF_RUNNING) != IFF_RUNNING)
1.1 fvdl 5493: return;
5494:
1.94 jonathan 5495: prodidx = sc->bge_tx_prodidx;
1.1 fvdl 5496:
1.170 msaitoh 5497: while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
1.1 fvdl 5498: IFQ_POLL(&ifp->if_snd, m_head);
5499: if (m_head == NULL)
5500: break;
5501:
5502: #if 0
5503: /*
5504: * XXX
5505: * safety overkill. If this is a fragmented packet chain
5506: * with delayed TCP/UDP checksums, then only encapsulate
5507: * it if we have enough descriptors to handle the entire
5508: * chain at once.
5509: * (paranoia -- may not actually be needed)
5510: */
5511: if (m_head->m_flags & M_FIRSTFRAG &&
5512: m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
5513: if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
1.86 thorpej 5514: M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
1.1 fvdl 5515: ifp->if_flags |= IFF_OACTIVE;
5516: break;
5517: }
5518: }
5519: #endif
5520:
5521: /*
5522: * Pack the data into the transmit ring. If we
5523: * don't have room, set the OACTIVE flag and wait
5524: * for the NIC to drain the ring.
5525: */
1.320 bouyer 5526: error = bge_encap(sc, m_head, &prodidx);
5527: if (__predict_false(error)) {
1.375 skrll 5528: if (SLIST_EMPTY(&sc->txdma_list)) {
1.320 bouyer 5529: /* just wait for the transmit ring to drain */
5530: break;
5531: }
5532: IFQ_DEQUEUE(&ifp->if_snd, m);
5533: KASSERT(m == m_head);
5534: m_freem(m_head);
5535: continue;
1.1 fvdl 5536: }
1.330 msaitoh 5537:
1.1 fvdl 5538: /* now we are committed to transmit the packet */
1.320 bouyer 5539: IFQ_DEQUEUE(&ifp->if_snd, m);
5540: KASSERT(m == m_head);
1.1 fvdl 5541: pkts++;
5542:
5543: /*
5544: * If there's a BPF listener, bounce a copy of this frame
5545: * to him.
5546: */
1.314 msaitoh 5547: bpf_mtap(ifp, m_head, BPF_D_OUT);
1.1 fvdl 5548: }
5549: if (pkts == 0)
5550: return;
5551:
5552: /* Transmit */
1.151 cegger 5553: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
1.158 msaitoh 5554: /* 5700 b2 errata */
5555: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1.151 cegger 5556: bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
1.1 fvdl 5557:
1.94 jonathan 5558: sc->bge_tx_prodidx = prodidx;
1.375 skrll 5559: sc->bge_tx_lastsent = time_uptime;
5560: sc->bge_tx_sending = true;
5561: }
1.94 jonathan 5562:
1.375 skrll 5563: static int
5564: bge_init(struct ifnet *ifp)
5565: {
5566: struct bge_softc * const sc = ifp->if_softc;
5567:
5568: mutex_enter(sc->sc_core_lock);
5569: int ret = bge_init_locked(ifp);
5570: mutex_exit(sc->sc_core_lock);
5571:
5572: return ret;
1.1 fvdl 5573: }
5574:
1.375 skrll 5575:
1.104 thorpej 5576: static int
1.375 skrll 5577: bge_init_locked(struct ifnet *ifp)
1.1 fvdl 5578: {
1.354 skrll 5579: struct bge_softc * const sc = ifp->if_softc;
1.170 msaitoh 5580: const uint16_t *m;
1.258 msaitoh 5581: uint32_t mode, reg;
1.375 skrll 5582: int error = 0;
1.1 fvdl 5583:
1.375 skrll 5584: KASSERT(IFNET_LOCKED(ifp));
5585: KASSERT(mutex_owned(sc->sc_core_lock));
1.358 skrll 5586: KASSERT(ifp == &sc->ethercom.ec_if);
1.1 fvdl 5587:
5588: /* Cancel pending I/O and flush buffers. */
1.375 skrll 5589: bge_stop_locked(ifp, 0);
1.177 msaitoh 5590:
5591: bge_stop_fw(sc);
5592: bge_sig_pre_reset(sc, BGE_RESET_START);
1.1 fvdl 5593: bge_reset(sc);
1.177 msaitoh 5594: bge_sig_legacy(sc, BGE_RESET_START);
1.287 msaitoh 5595:
5596: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5597: reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5598: reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE |
5599: BGE_CPMU_CTRL_LINK_IDLE_MODE);
5600: CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5601:
5602: reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
5603: reg &= ~BGE_CPMU_LSPD_10MB_CLK;
5604: reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
5605: CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
5606:
5607: reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD);
5608: reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK;
5609: reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25;
5610: CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg);
5611:
5612: reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC);
5613: reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK;
5614: reg |= BGE_CPMU_HST_ACC_MACCLK_6_25;
5615: CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg);
5616: }
5617:
1.304 msaitoh 5618: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
1.305 msaitoh 5619: pcireg_t aercap;
5620:
1.304 msaitoh 5621: reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH);
5622: reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK)
5623: | BGE_PCIE_PWRMNG_L1THRESH_4MS
5624: | BGE_PCIE_PWRMNG_EXTASPMTMR_EN;
5625: CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg);
5626:
5627: reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY);
5628: reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK)
5629: | BGE_PCIE_EIDLE_DELAY_13CLK;
5630: CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg);
5631:
1.305 msaitoh 5632: /* Clear correctable error */
5633: if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag,
5634: PCI_EXTCAP_AER, &aercap, NULL) != 0)
5635: pci_conf_write(sc->sc_pc, sc->sc_pcitag,
5636: aercap + PCI_AER_COR_STATUS, 0xffffffff);
1.304 msaitoh 5637:
5638: reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
5639: reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
5640: | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
5641: CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg);
5642: }
5643:
1.177 msaitoh 5644: bge_sig_post_reset(sc, BGE_RESET_START);
5645:
1.1 fvdl 5646: bge_chipinit(sc);
5647:
5648: /*
5649: * Init the various state machines, ring
5650: * control blocks and firmware.
5651: */
5652: error = bge_blockinit(sc);
5653: if (error != 0) {
1.138 joerg 5654: aprint_error_dev(sc->bge_dev, "initialization error %d\n",
1.1 fvdl 5655: error);
5656: return error;
5657: }
5658:
1.236 msaitoh 5659: /* 5718 step 25, 57XX step 54 */
1.1 fvdl 5660: /* Specify MTU. */
5661: CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
1.107 blymn 5662: ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
1.1 fvdl 5663:
1.236 msaitoh 5664: /* 5718 step 23 */
1.1 fvdl 5665: /* Load our MAC address. */
1.170 msaitoh 5666: m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
1.1 fvdl 5667: CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
1.336 msaitoh 5668: CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI,
5669: ((uint32_t)htons(m[1]) << 16) | htons(m[2]));
1.1 fvdl 5670:
5671: /* Enable or disable promiscuous mode as needed. */
1.378 ! skrll 5672: if (ifp->if_flags & IFF_PROMISC)
1.1 fvdl 5673: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1.178 msaitoh 5674: else
1.1 fvdl 5675: BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5676:
5677: /* Program multicast filter. */
5678: bge_setmulti(sc);
5679:
5680: /* Init RX ring. */
5681: bge_init_rx_ring_std(sc);
5682:
1.161 msaitoh 5683: /*
5684: * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5685: * memory to insure that the chip has in fact read the first
5686: * entry of the ring.
5687: */
5688: if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
1.372 skrll 5689: u_int i;
1.161 msaitoh 5690: for (i = 0; i < 10; i++) {
5691: DELAY(20);
1.372 skrll 5692: uint32_t v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
1.161 msaitoh 5693: if (v == (MCLBYTES - ETHER_ALIGN))
5694: break;
5695: }
5696: if (i == 10)
5697: aprint_error_dev(sc->bge_dev,
5698: "5705 A0 chip failed to load RX ring\n");
5699: }
5700:
1.1 fvdl 5701: /* Init jumbo RX ring. */
5702: if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
5703: bge_init_rx_ring_jumbo(sc);
5704:
5705: /* Init our RX return ring index */
5706: sc->bge_rx_saved_considx = 0;
5707:
5708: /* Init TX ring. */
5709: bge_init_tx_ring(sc);
5710:
1.236 msaitoh 5711: /* 5718 step 63, 57XX step 94 */
1.206 msaitoh 5712: /* Enable TX MAC state machine lockup fix. */
5713: mode = CSR_READ_4(sc, BGE_TX_MODE);
5714: if (BGE_IS_5755_PLUS(sc) ||
5715: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
5716: mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
1.327 msaitoh 5717: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
5718: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1.216 msaitoh 5719: mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5720: mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5721: (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5722: }
1.206 msaitoh 5723:
1.1 fvdl 5724: /* Turn on transmitter */
1.211 msaitoh 5725: CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
1.236 msaitoh 5726: /* 5718 step 64 */
1.206 msaitoh 5727: DELAY(100);
1.1 fvdl 5728:
1.236 msaitoh 5729: /* 5718 step 65, 57XX step 95 */
1.1 fvdl 5730: /* Turn on receiver */
1.216 msaitoh 5731: mode = CSR_READ_4(sc, BGE_RX_MODE);
5732: if (BGE_IS_5755_PLUS(sc))
5733: mode |= BGE_RXMODE_IPV6_ENABLE;
1.327 msaitoh 5734: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
5735: mode |= BGE_RXMODE_IPV4_FRAG_FIX;
1.216 msaitoh 5736: CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
1.236 msaitoh 5737: /* 5718 step 66 */
1.206 msaitoh 5738: DELAY(10);
1.1 fvdl 5739:
1.258 msaitoh 5740: /* 5718 step 12, 57XX step 37 */
5741: /*
5742: * XXX Doucments of 5718 series and 577xx say the recommended value
5743: * is 1, but tg3 set 1 only on 57765 series.
5744: */
5745: if (BGE_IS_57765_PLUS(sc))
5746: reg = 1;
5747: else
5748: reg = 2;
5749: CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg);
1.71 thorpej 5750:
1.1 fvdl 5751: /* Tell firmware we're alive. */
5752: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5753:
5754: /* Enable host interrupts. */
1.226 msaitoh 5755: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5756: BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
1.211 msaitoh 5757: bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
1.1 fvdl 5758:
1.142 dyoung 5759: if ((error = bge_ifmedia_upd(ifp)) != 0)
5760: goto out;
1.1 fvdl 5761:
1.375 skrll 5762: /* IFNET_LOCKED asserted above */
1.1 fvdl 5763: ifp->if_flags |= IFF_RUNNING;
5764:
1.345 thorpej 5765: callout_schedule(&sc->bge_timeout, hz);
1.142 dyoung 5766:
5767: out:
1.186 msaitoh 5768: sc->bge_if_flags = ifp->if_flags;
1.1 fvdl 5769:
1.142 dyoung 5770: return error;
1.1 fvdl 5771: }
5772:
5773: /*
5774: * Set media options.
5775: */
1.104 thorpej 5776: static int
5777: bge_ifmedia_upd(struct ifnet *ifp)
1.1 fvdl 5778: {
1.354 skrll 5779: struct bge_softc * const sc = ifp->if_softc;
5780: struct mii_data * const mii = &sc->bge_mii;
5781: struct ifmedia * const ifm = &sc->bge_ifmedia;
1.142 dyoung 5782: int rc;
1.1 fvdl 5783:
5784: /* If this is a 1000baseX NIC, enable the TBI port. */
1.261 msaitoh 5785: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.1 fvdl 5786: if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1.170 msaitoh 5787: return EINVAL;
5788: switch (IFM_SUBTYPE(ifm->ifm_media)) {
1.1 fvdl 5789: case IFM_AUTO:
1.161 msaitoh 5790: /*
5791: * The BCM5704 ASIC appears to have a special
5792: * mechanism for programming the autoneg
5793: * advertisement registers in TBI mode.
5794: */
5795: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1.170 msaitoh 5796: uint32_t sgdig;
1.161 msaitoh 5797: sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5798: if (sgdig & BGE_SGDIGSTS_DONE) {
5799: CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5800: sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5801: sgdig |= BGE_SGDIGCFG_AUTO |
5802: BGE_SGDIGCFG_PAUSE_CAP |
5803: BGE_SGDIGCFG_ASYM_PAUSE;
1.211 msaitoh 5804: CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
1.161 msaitoh 5805: sgdig | BGE_SGDIGCFG_SEND);
5806: DELAY(5);
1.211 msaitoh 5807: CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
5808: sgdig);
1.161 msaitoh 5809: }
5810: }
1.1 fvdl 5811: break;
5812: case IFM_1000_SX:
1.329 msaitoh 5813: if ((ifm->ifm_media & IFM_FDX) != 0) {
1.341 msaitoh 5814: BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE,
1.1 fvdl 5815: BGE_MACMODE_HALF_DUPLEX);
5816: } else {
1.341 msaitoh 5817: BGE_SETBIT_FLUSH(sc, BGE_MAC_MODE,
1.1 fvdl 5818: BGE_MACMODE_HALF_DUPLEX);
5819: }
1.216 msaitoh 5820: DELAY(40);
1.1 fvdl 5821: break;
5822: default:
1.170 msaitoh 5823: return EINVAL;
1.1 fvdl 5824: }
1.69 thorpej 5825: /* XXX 802.3x flow control for 1000BASE-SX */
1.170 msaitoh 5826: return 0;
1.1 fvdl 5827: }
5828:
1.287 msaitoh 5829: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) &&
5830: (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) {
5831: uint32_t reg;
5832:
5833: reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
5834: if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) {
5835: reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY;
5836: CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
5837: }
5838: }
5839:
1.161 msaitoh 5840: BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
1.142 dyoung 5841: if ((rc = mii_mediachg(mii)) == ENXIO)
5842: return 0;
1.161 msaitoh 5843:
1.287 msaitoh 5844: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
5845: uint32_t reg;
5846:
5847: reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK);
5848: if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK)
5849: == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) {
5850: reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK;
5851: delay(40);
5852: CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg);
5853: }
5854: }
5855:
1.161 msaitoh 5856: /*
5857: * Force an interrupt so that we will call bge_link_upd
5858: * if needed and clear any pending link state attention.
5859: * Without this we are not getting any further interrupts
5860: * for link state changes and thus will not UP the link and
5861: * not be able to send in bge_start. The only way to get
5862: * things working was to receive a packet and get a RX intr.
5863: */
5864: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1.261 msaitoh 5865: sc->bge_flags & BGEF_IS_5788)
1.161 msaitoh 5866: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5867: else
5868: BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5869:
1.142 dyoung 5870: return rc;
1.1 fvdl 5871: }
5872:
5873: /*
5874: * Report current media status.
5875: */
1.104 thorpej 5876: static void
5877: bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1.1 fvdl 5878: {
1.354 skrll 5879: struct bge_softc * const sc = ifp->if_softc;
5880: struct mii_data * const mii = &sc->bge_mii;
1.1 fvdl 5881:
1.261 msaitoh 5882: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.1 fvdl 5883: ifmr->ifm_status = IFM_AVALID;
5884: ifmr->ifm_active = IFM_ETHER;
5885: if (CSR_READ_4(sc, BGE_MAC_STS) &
5886: BGE_MACSTAT_TBI_PCS_SYNCHED)
5887: ifmr->ifm_status |= IFM_ACTIVE;
5888: ifmr->ifm_active |= IFM_1000_SX;
5889: if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5890: ifmr->ifm_active |= IFM_HDX;
5891: else
5892: ifmr->ifm_active |= IFM_FDX;
5893: return;
5894: }
5895:
5896: mii_pollstat(mii);
5897: ifmr->ifm_status = mii->mii_media_status;
1.69 thorpej 5898: ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
5899: sc->bge_flowflags;
1.1 fvdl 5900: }
5901:
1.104 thorpej 5902: static int
1.186 msaitoh 5903: bge_ifflags_cb(struct ethercom *ec)
5904: {
1.354 skrll 5905: struct ifnet * const ifp = &ec->ec_if;
5906: struct bge_softc * const sc = ifp->if_softc;
1.375 skrll 5907: int ret = 0;
5908:
5909: KASSERT(IFNET_LOCKED(ifp));
5910: mutex_enter(sc->sc_core_lock);
5911:
1.337 msaitoh 5912: u_short change = ifp->if_flags ^ sc->bge_if_flags;
1.186 msaitoh 5913:
1.375 skrll 5914: if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
5915: ret = ENETRESET;
5916: } else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
5917: if ((ifp->if_flags & IFF_PROMISC) == 0)
5918: BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
5919: else
5920: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1.186 msaitoh 5921:
1.375 skrll 5922: bge_setmulti(sc);
5923: }
1.186 msaitoh 5924:
1.375 skrll 5925: sc->bge_if_flags = ifp->if_flags;
5926: mutex_exit(sc->sc_core_lock);
1.186 msaitoh 5927:
1.375 skrll 5928: return ret;
1.186 msaitoh 5929: }
5930:
5931: static int
1.126 christos 5932: bge_ioctl(struct ifnet *ifp, u_long command, void *data)
1.1 fvdl 5933: {
1.354 skrll 5934: struct bge_softc * const sc = ifp->if_softc;
5935: struct ifreq * const ifr = (struct ifreq *) data;
1.375 skrll 5936: int error = 0;
5937:
5938: switch (command) {
5939: case SIOCADDMULTI:
5940: case SIOCDELMULTI:
5941: break;
5942: default:
5943: KASSERT(IFNET_LOCKED(ifp));
5944: }
1.1 fvdl 5945:
1.375 skrll 5946: const int s = splnet();
1.1 fvdl 5947:
1.170 msaitoh 5948: switch (command) {
1.1 fvdl 5949: case SIOCSIFMEDIA:
1.375 skrll 5950: mutex_enter(sc->sc_core_lock);
1.69 thorpej 5951: /* XXX Flow control is not supported for 1000BASE-SX */
1.261 msaitoh 5952: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.69 thorpej 5953: ifr->ifr_media &= ~IFM_ETH_FMASK;
5954: sc->bge_flowflags = 0;
5955: }
5956:
5957: /* Flow control requires full-duplex mode. */
5958: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5959: (ifr->ifr_media & IFM_FDX) == 0) {
1.330 msaitoh 5960: ifr->ifr_media &= ~IFM_ETH_FMASK;
1.69 thorpej 5961: }
5962: if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5963: if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1.157 msaitoh 5964: /* We can do both TXPAUSE and RXPAUSE. */
1.69 thorpej 5965: ifr->ifr_media |=
5966: IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5967: }
5968: sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5969: }
1.375 skrll 5970: mutex_exit(sc->sc_core_lock);
1.334 msaitoh 5971:
1.261 msaitoh 5972: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.1 fvdl 5973: error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
5974: command);
5975: } else {
1.375 skrll 5976: struct mii_data * const mii = &sc->bge_mii;
1.1 fvdl 5977: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
5978: command);
5979: }
5980: break;
5981: default:
1.152 tron 5982: if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5983: break;
5984:
5985: error = 0;
5986:
1.375 skrll 5987: if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5988: mutex_enter(sc->sc_core_lock);
5989: if (sc->bge_if_flags & IFF_RUNNING) {
5990: bge_setmulti(sc);
5991: }
5992: mutex_exit(sc->sc_core_lock);
5993: }
1.1 fvdl 5994: break;
5995: }
5996:
5997: splx(s);
5998:
1.170 msaitoh 5999: return error;
1.1 fvdl 6000: }
6001:
1.375 skrll 6002: static bool
6003: bge_watchdog_check(struct bge_softc * const sc)
1.1 fvdl 6004: {
1.375 skrll 6005:
6006: KASSERT(mutex_owned(sc->sc_core_lock));
6007:
6008: if (!sc->bge_tx_sending)
6009: return true;
6010:
6011: if (time_uptime - sc->bge_tx_lastsent <= bge_watchdog_timeout)
6012: return true;
1.1 fvdl 6013:
1.330 msaitoh 6014: /* If pause frames are active then don't reset the hardware. */
1.320 bouyer 6015: if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
1.375 skrll 6016: const uint32_t status = CSR_READ_4(sc, BGE_RX_STS);
1.320 bouyer 6017: if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
6018: /*
6019: * If link partner has us in XOFF state then wait for
6020: * the condition to clear.
6021: */
6022: CSR_WRITE_4(sc, BGE_RX_STS, status);
1.375 skrll 6023: sc->bge_tx_lastsent = time_uptime;
6024: return true;
1.320 bouyer 6025: } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
6026: (status & BGE_RXSTAT_RCVD_XON) != 0) {
6027: /*
6028: * If link partner has us in XOFF state then wait for
6029: * the condition to clear.
6030: */
6031: CSR_WRITE_4(sc, BGE_RX_STS, status);
1.375 skrll 6032: sc->bge_tx_lastsent = time_uptime;
6033: return true;
1.320 bouyer 6034: }
6035: /*
1.330 msaitoh 6036: * Any other condition is unexpected and the controller
6037: * should be reset.
1.320 bouyer 6038: */
6039: }
6040:
1.375 skrll 6041: return false;
6042: }
6043:
6044: static bool
6045: bge_watchdog(struct ifnet *ifp)
6046: {
6047: struct bge_softc * const sc = ifp->if_softc;
6048:
6049: KASSERT(mutex_owned(sc->sc_core_lock));
6050:
6051: if (!sc->sc_triggerreset && bge_watchdog_check(sc))
6052: return true;
6053:
1.138 joerg 6054: aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
1.1 fvdl 6055:
1.375 skrll 6056: if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
6057: workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
6058:
6059: return false;
6060: }
6061:
6062: /*
6063: * Perform an interface watchdog reset.
6064: */
6065: static void
6066: bge_handle_reset_work(struct work *work, void *arg)
6067: {
6068: struct bge_softc * const sc = arg;
6069: struct ifnet * const ifp = &sc->ethercom.ec_if;
6070:
6071: /* Don't want ioctl operations to happen */
6072: IFNET_LOCK(ifp);
6073:
6074: /* reset the interface. */
1.1 fvdl 6075: bge_init(ifp);
6076:
1.375 skrll 6077: IFNET_UNLOCK(ifp);
6078:
6079: /*
6080: * There are still some upper layer processing which call
6081: * ifp->if_start(). e.g. ALTQ or one CPU system
6082: */
6083: /* Try to get more packets going. */
6084: ifp->if_start(ifp);
6085:
6086: atomic_store_relaxed(&sc->sc_reset_pending, 0);
1.1 fvdl 6087: }
6088:
1.11 thorpej 6089: static void
6090: bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
6091: {
6092: int i;
6093:
1.211 msaitoh 6094: BGE_CLRBIT_FLUSH(sc, reg, bit);
1.11 thorpej 6095:
1.180 msaitoh 6096: for (i = 0; i < 1000; i++) {
1.216 msaitoh 6097: delay(100);
1.11 thorpej 6098: if ((CSR_READ_4(sc, reg) & bit) == 0)
6099: return;
6100: }
6101:
1.165 msaitoh 6102: /*
6103: * Doesn't print only when the register is BGE_SRS_MODE. It occurs
6104: * on some environment (and once after boot?)
6105: */
6106: if (reg != BGE_SRS_MODE)
6107: aprint_error_dev(sc->bge_dev,
6108: "block failed to stop: reg 0x%lx, bit 0x%08x\n",
6109: (u_long)reg, bit);
1.11 thorpej 6110: }
6111:
1.375 skrll 6112:
6113: static void
6114: bge_stop(struct ifnet *ifp, int disable)
6115: {
6116: struct bge_softc * const sc = ifp->if_softc;
6117:
6118: ASSERT_SLEEPABLE();
6119:
6120: mutex_enter(sc->sc_core_lock);
6121: bge_stop_locked(ifp, disable);
6122: mutex_exit(sc->sc_core_lock);
6123: }
6124:
1.1 fvdl 6125: /*
6126: * Stop the adapter and free any mbufs allocated to the
6127: * RX and TX lists.
6128: */
1.104 thorpej 6129: static void
1.375 skrll 6130: bge_stop_locked(struct ifnet *ifp, int disable)
1.1 fvdl 6131: {
1.354 skrll 6132: struct bge_softc * const sc = ifp->if_softc;
1.1 fvdl 6133:
1.375 skrll 6134: KASSERT(mutex_owned(sc->sc_core_lock));
6135:
1.292 martin 6136: if (disable) {
6137: sc->bge_detaching = 1;
1.281 martin 6138: callout_halt(&sc->bge_timeout, NULL);
1.292 martin 6139: } else
1.281 martin 6140: callout_stop(&sc->bge_timeout);
1.1 fvdl 6141:
1.216 msaitoh 6142: /* Disable host interrupts. */
1.226 msaitoh 6143: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
1.216 msaitoh 6144: bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
6145:
1.1 fvdl 6146: /*
1.177 msaitoh 6147: * Tell firmware we're shutting down.
6148: */
6149: bge_stop_fw(sc);
1.216 msaitoh 6150: bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
1.177 msaitoh 6151:
6152: /*
1.208 msaitoh 6153: * Disable all of the receiver blocks.
1.1 fvdl 6154: */
1.11 thorpej 6155: bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
6156: bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
6157: bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1.172 msaitoh 6158: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 6159: bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1.11 thorpej 6160: bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
6161: bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
6162: bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
1.1 fvdl 6163:
6164: /*
1.208 msaitoh 6165: * Disable all of the transmit blocks.
1.1 fvdl 6166: */
1.11 thorpej 6167: bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
6168: bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
6169: bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
6170: bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
6171: bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1.172 msaitoh 6172: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 6173: bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1.11 thorpej 6174: bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1.1 fvdl 6175:
1.216 msaitoh 6176: BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB);
6177: delay(40);
6178:
6179: bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
6180:
1.1 fvdl 6181: /*
6182: * Shut down all of the memory managers and related
6183: * state machines.
6184: */
1.236 msaitoh 6185: /* 5718 step 5a,5b */
1.11 thorpej 6186: bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
6187: bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
1.172 msaitoh 6188: if (BGE_IS_5700_FAMILY(sc))
1.44 hannken 6189: bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1.11 thorpej 6190:
1.236 msaitoh 6191: /* 5718 step 5c,5d */
1.1 fvdl 6192: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
6193: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1.11 thorpej 6194:
1.172 msaitoh 6195: if (BGE_IS_5700_FAMILY(sc)) {
1.44 hannken 6196: bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
6197: bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
6198: }
1.1 fvdl 6199:
1.177 msaitoh 6200: bge_reset(sc);
1.216 msaitoh 6201: bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
6202: bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
1.1 fvdl 6203:
6204: /*
1.177 msaitoh 6205: * Keep the ASF firmware running if up.
1.1 fvdl 6206: */
1.177 msaitoh 6207: if (sc->bge_asf_mode & ASF_STACKUP)
6208: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6209: else
6210: BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1.1 fvdl 6211:
6212: /* Free the RX lists. */
1.376 skrll 6213: bge_free_rx_ring_std(sc);
1.1 fvdl 6214:
6215: /* Free jumbo RX list. */
1.172 msaitoh 6216: if (BGE_IS_JUMBO_CAPABLE(sc))
6217: bge_free_rx_ring_jumbo(sc);
1.1 fvdl 6218:
6219: /* Free TX buffers. */
1.320 bouyer 6220: bge_free_tx_ring(sc, disable);
1.1 fvdl 6221:
6222: /*
6223: * Isolate/power down the PHY.
6224: */
1.261 msaitoh 6225: if (!(sc->bge_flags & BGEF_FIBER_TBI))
1.1 fvdl 6226: mii_down(&sc->bge_mii);
6227:
1.161 msaitoh 6228: sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
1.1 fvdl 6229:
1.161 msaitoh 6230: /* Clear MAC's link state (PHY may still have link UP). */
6231: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1.1 fvdl 6232:
1.375 skrll 6233: ifp->if_flags &= ~IFF_RUNNING;
1.1 fvdl 6234: }
6235:
1.161 msaitoh 6236: static void
6237: bge_link_upd(struct bge_softc *sc)
6238: {
1.354 skrll 6239: struct ifnet * const ifp = &sc->ethercom.ec_if;
6240: struct mii_data * const mii = &sc->bge_mii;
1.170 msaitoh 6241: uint32_t status;
1.322 msaitoh 6242: uint16_t phyval;
1.161 msaitoh 6243: int link;
6244:
6245: /* Clear 'pending link event' flag */
6246: BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
6247:
6248: /*
6249: * Process link state changes.
6250: * Grrr. The link status word in the status block does
6251: * not work correctly on the BCM5700 rev AX and BX chips,
6252: * according to all available information. Hence, we have
6253: * to enable MII interrupts in order to properly obtain
6254: * async link changes. Unfortunately, this also means that
6255: * we have to read the MAC status register to detect link
6256: * changes, thereby adding an additional register access to
6257: * the interrupt handler.
6258: */
6259:
6260: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
6261: status = CSR_READ_4(sc, BGE_MAC_STS);
6262: if (status & BGE_MACSTAT_MI_INTERRUPT) {
6263: mii_pollstat(mii);
6264:
6265: if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6266: mii->mii_media_status & IFM_ACTIVE &&
6267: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6268: BGE_STS_SETBIT(sc, BGE_STS_LINK);
6269: else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6270: (!(mii->mii_media_status & IFM_ACTIVE) ||
6271: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6272: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6273:
6274: /* Clear the interrupt */
6275: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6276: BGE_EVTENB_MI_INTERRUPT);
1.216 msaitoh 6277: bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
1.322 msaitoh 6278: BRGPHY_MII_ISR, &phyval);
1.216 msaitoh 6279: bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6280: BRGPHY_MII_IMR, BRGPHY_INTRS);
1.161 msaitoh 6281: }
6282: return;
6283: }
6284:
1.261 msaitoh 6285: if (sc->bge_flags & BGEF_FIBER_TBI) {
1.161 msaitoh 6286: status = CSR_READ_4(sc, BGE_MAC_STS);
6287: if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6288: if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
6289: BGE_STS_SETBIT(sc, BGE_STS_LINK);
1.219 msaitoh 6290: if (BGE_ASICREV(sc->bge_chipid)
6291: == BGE_ASICREV_BCM5704) {
1.341 msaitoh 6292: BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE,
1.161 msaitoh 6293: BGE_MACMODE_TBI_SEND_CFGS);
1.219 msaitoh 6294: DELAY(40);
6295: }
1.161 msaitoh 6296: CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6297: if_link_state_change(ifp, LINK_STATE_UP);
6298: }
6299: } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
6300: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6301: if_link_state_change(ifp, LINK_STATE_DOWN);
6302: }
6303: } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
1.178 msaitoh 6304: /*
1.161 msaitoh 6305: * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
6306: * bit in status word always set. Workaround this bug by
6307: * reading PHY link status directly.
6308: */
6309: link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
6310: BGE_STS_LINK : 0;
6311:
6312: if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
6313: mii_pollstat(mii);
6314:
6315: if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
6316: mii->mii_media_status & IFM_ACTIVE &&
6317: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
6318: BGE_STS_SETBIT(sc, BGE_STS_LINK);
6319: else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
6320: (!(mii->mii_media_status & IFM_ACTIVE) ||
6321: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
6322: BGE_STS_CLRBIT(sc, BGE_STS_LINK);
6323: }
1.256 msaitoh 6324: } else {
6325: /*
6326: * For controllers that call mii_tick, we have to poll
6327: * link status.
6328: */
6329: mii_pollstat(mii);
1.161 msaitoh 6330: }
6331:
1.287 msaitoh 6332: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
6333: uint32_t reg, scale;
6334:
6335: reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) &
6336: BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK;
6337: if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5)
6338: scale = 65;
6339: else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25)
6340: scale = 6;
6341: else
6342: scale = 12;
6343:
6344: reg = CSR_READ_4(sc, BGE_MISC_CFG) &
6345: ~BGE_MISCCFG_TIMER_PRESCALER;
6346: reg |= scale << 1;
6347: CSR_WRITE_4(sc, BGE_MISC_CFG, reg);
6348: }
1.161 msaitoh 6349: /* Clear the attention */
1.331 msaitoh 6350: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
6351: BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1.161 msaitoh 6352: BGE_MACSTAT_LINK_CHANGED);
6353: }
6354:
1.64 jonathan 6355: static int
1.207 msaitoh 6356: bge_sysctl_verify(SYSCTLFN_ARGS)
1.64 jonathan 6357: {
6358: int error, t;
6359: struct sysctlnode node;
6360:
6361: node = *rnode;
6362: t = *(int*)rnode->sysctl_data;
6363: node.sysctl_data = &t;
6364: error = sysctl_lookup(SYSCTLFN_CALL(&node));
6365: if (error || newp == NULL)
1.170 msaitoh 6366: return error;
1.64 jonathan 6367:
6368: #if 0
6369: DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
6370: node.sysctl_num, rnode->sysctl_num));
6371: #endif
6372:
6373: if (node.sysctl_num == bge_rxthresh_nodenum) {
6374: if (t < 0 || t >= NBGE_RX_THRESH)
1.170 msaitoh 6375: return EINVAL;
1.64 jonathan 6376: bge_update_all_threshes(t);
6377: } else
1.170 msaitoh 6378: return EINVAL;
1.64 jonathan 6379:
6380: *(int*)rnode->sysctl_data = t;
6381:
1.170 msaitoh 6382: return 0;
1.64 jonathan 6383: }
6384:
6385: /*
1.65 atatat 6386: * Set up sysctl(3) MIB, hw.bge.*.
1.64 jonathan 6387: */
1.190 jruoho 6388: static void
1.207 msaitoh 6389: bge_sysctl_init(struct bge_softc *sc)
1.64 jonathan 6390: {
1.66 atatat 6391: int rc, bge_root_num;
1.90 atatat 6392: const struct sysctlnode *node;
1.64 jonathan 6393:
1.190 jruoho 6394: if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6395: 0, CTLTYPE_NODE, "bge",
1.73 atatat 6396: SYSCTL_DESCR("BGE interface controls"),
1.64 jonathan 6397: NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
1.203 msaitoh 6398: goto out;
1.64 jonathan 6399: }
6400:
1.66 atatat 6401: bge_root_num = node->sysctl_num;
6402:
1.64 jonathan 6403: /* BGE Rx interrupt mitigation level */
1.190 jruoho 6404: if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6405: CTLFLAG_READWRITE,
1.73 atatat 6406: CTLTYPE_INT, "rx_lvl",
6407: SYSCTL_DESCR("BGE receive interrupt mitigation level"),
1.207 msaitoh 6408: bge_sysctl_verify, 0,
1.64 jonathan 6409: &bge_rx_thresh_lvl,
1.66 atatat 6410: 0, CTL_HW, bge_root_num, CTL_CREATE,
1.64 jonathan 6411: CTL_EOL)) != 0) {
1.203 msaitoh 6412: goto out;
1.64 jonathan 6413: }
6414:
6415: bge_rxthresh_nodenum = node->sysctl_num;
6416:
1.375 skrll 6417: #ifdef BGE_DEBUG
6418: if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
6419: CTLFLAG_READWRITE,
6420: CTLTYPE_BOOL, "trigger_reset",
6421: SYSCTL_DESCR("Trigger an interface reset"),
6422: NULL, 0, &sc->sc_triggerreset, 0, CTL_CREATE,
6423: CTL_EOL)) != 0) {
6424: goto out;
6425: }
6426: #endif
1.64 jonathan 6427: return;
6428:
1.203 msaitoh 6429: out:
1.138 joerg 6430: aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
1.64 jonathan 6431: }
1.151 cegger 6432:
1.172 msaitoh 6433: #ifdef BGE_DEBUG
6434: void
6435: bge_debug_info(struct bge_softc *sc)
6436: {
6437:
6438: printf("Hardware Flags:\n");
1.214 msaitoh 6439: if (BGE_IS_57765_PLUS(sc))
6440: printf(" - 57765 Plus\n");
6441: if (BGE_IS_5717_PLUS(sc))
6442: printf(" - 5717 Plus\n");
1.172 msaitoh 6443: if (BGE_IS_5755_PLUS(sc))
6444: printf(" - 5755 Plus\n");
1.207 msaitoh 6445: if (BGE_IS_575X_PLUS(sc))
6446: printf(" - 575X Plus\n");
1.172 msaitoh 6447: if (BGE_IS_5705_PLUS(sc))
6448: printf(" - 5705 Plus\n");
6449: if (BGE_IS_5714_FAMILY(sc))
6450: printf(" - 5714 Family\n");
6451: if (BGE_IS_5700_FAMILY(sc))
6452: printf(" - 5700 Family\n");
1.261 msaitoh 6453: if (sc->bge_flags & BGEF_IS_5788)
1.172 msaitoh 6454: printf(" - 5788\n");
1.261 msaitoh 6455: if (sc->bge_flags & BGEF_JUMBO_CAPABLE)
1.172 msaitoh 6456: printf(" - Supports Jumbo Frames\n");
1.261 msaitoh 6457: if (sc->bge_flags & BGEF_NO_EEPROM)
1.173 msaitoh 6458: printf(" - No EEPROM\n");
1.261 msaitoh 6459: if (sc->bge_flags & BGEF_PCIX)
1.172 msaitoh 6460: printf(" - PCI-X Bus\n");
1.261 msaitoh 6461: if (sc->bge_flags & BGEF_PCIE)
1.172 msaitoh 6462: printf(" - PCI Express Bus\n");
1.261 msaitoh 6463: if (sc->bge_flags & BGEF_RX_ALIGNBUG)
1.172 msaitoh 6464: printf(" - RX Alignment Bug\n");
1.261 msaitoh 6465: if (sc->bge_flags & BGEF_APE)
1.216 msaitoh 6466: printf(" - APE\n");
1.261 msaitoh 6467: if (sc->bge_flags & BGEF_CPMU_PRESENT)
1.214 msaitoh 6468: printf(" - CPMU\n");
1.261 msaitoh 6469: if (sc->bge_flags & BGEF_TSO)
1.172 msaitoh 6470: printf(" - TSO\n");
1.288 msaitoh 6471: if (sc->bge_flags & BGEF_TAGGED_STATUS)
6472: printf(" - TAGGED_STATUS\n");
1.220 msaitoh 6473:
1.279 msaitoh 6474: /* PHY related */
1.261 msaitoh 6475: if (sc->bge_phy_flags & BGEPHYF_NO_3LED)
1.220 msaitoh 6476: printf(" - No 3 LEDs\n");
1.261 msaitoh 6477: if (sc->bge_phy_flags & BGEPHYF_CRC_BUG)
1.220 msaitoh 6478: printf(" - CRC bug\n");
1.261 msaitoh 6479: if (sc->bge_phy_flags & BGEPHYF_ADC_BUG)
1.220 msaitoh 6480: printf(" - ADC bug\n");
1.261 msaitoh 6481: if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG)
1.220 msaitoh 6482: printf(" - 5704 A0 bug\n");
1.261 msaitoh 6483: if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG)
1.220 msaitoh 6484: printf(" - jitter bug\n");
1.261 msaitoh 6485: if (sc->bge_phy_flags & BGEPHYF_BER_BUG)
1.220 msaitoh 6486: printf(" - BER bug\n");
1.261 msaitoh 6487: if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM)
1.220 msaitoh 6488: printf(" - adjust trim\n");
1.261 msaitoh 6489: if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED)
1.220 msaitoh 6490: printf(" - no wirespeed\n");
1.279 msaitoh 6491:
6492: /* ASF related */
6493: if (sc->bge_asf_mode & ASF_ENABLE)
6494: printf(" - ASF enable\n");
1.280 enami 6495: if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE)
1.279 msaitoh 6496: printf(" - ASF new handshake\n");
6497: if (sc->bge_asf_mode & ASF_STACKUP)
6498: printf(" - ASF stackup\n");
1.172 msaitoh 6499: }
6500: #endif /* BGE_DEBUG */
6501:
6502: static int
6503: bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6504: {
6505: prop_dictionary_t dict;
6506: prop_data_t ea;
6507:
1.261 msaitoh 6508: if ((sc->bge_flags & BGEF_NO_EEPROM) == 0)
1.172 msaitoh 6509: return 1;
6510:
6511: dict = device_properties(sc->bge_dev);
6512: ea = prop_dictionary_get(dict, "mac-address");
6513: if (ea != NULL) {
6514: KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
6515: KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1.346 msaitoh 6516: memcpy(ether_addr, prop_data_value(ea), ETHER_ADDR_LEN);
1.172 msaitoh 6517: return 0;
6518: }
6519:
6520: return 1;
6521: }
6522:
1.178 msaitoh 6523: static int
1.170 msaitoh 6524: bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 6525: {
1.170 msaitoh 6526: uint32_t mac_addr;
1.151 cegger 6527:
1.205 msaitoh 6528: mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
1.151 cegger 6529: if ((mac_addr >> 16) == 0x484b) {
6530: ether_addr[0] = (uint8_t)(mac_addr >> 8);
6531: ether_addr[1] = (uint8_t)mac_addr;
1.205 msaitoh 6532: mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
1.151 cegger 6533: ether_addr[2] = (uint8_t)(mac_addr >> 24);
6534: ether_addr[3] = (uint8_t)(mac_addr >> 16);
6535: ether_addr[4] = (uint8_t)(mac_addr >> 8);
6536: ether_addr[5] = (uint8_t)mac_addr;
1.170 msaitoh 6537: return 0;
1.151 cegger 6538: }
1.170 msaitoh 6539: return 1;
1.151 cegger 6540: }
6541:
6542: static int
1.170 msaitoh 6543: bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 6544: {
6545: int mac_offset = BGE_EE_MAC_OFFSET;
6546:
1.177 msaitoh 6547: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1.151 cegger 6548: mac_offset = BGE_EE_MAC_OFFSET_5906;
6549:
6550: return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6551: ETHER_ADDR_LEN));
6552: }
6553:
6554: static int
1.170 msaitoh 6555: bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
1.151 cegger 6556: {
6557:
1.170 msaitoh 6558: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
6559: return 1;
1.151 cegger 6560:
6561: return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6562: ETHER_ADDR_LEN));
6563: }
6564:
6565: static int
1.170 msaitoh 6566: bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
1.151 cegger 6567: {
6568: static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6569: /* NOTE: Order is critical */
1.172 msaitoh 6570: bge_get_eaddr_fw,
1.151 cegger 6571: bge_get_eaddr_mem,
6572: bge_get_eaddr_nvram,
6573: bge_get_eaddr_eeprom,
6574: NULL
6575: };
6576: const bge_eaddr_fcn_t *func;
6577:
6578: for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6579: if ((*func)(sc, eaddr) == 0)
6580: break;
6581: }
1.362 skrll 6582: return *func == NULL ? ENXIO : 0;
1.151 cegger 6583: }
CVSweb <webmaster@jp.NetBSD.org>