[BACK]Return to if_bge.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / dev / pci

Annotation of src/sys/dev/pci/if_bge.c, Revision 1.29.2.5

1.29.2.5! grant       1: /*     $NetBSD: if_bge.c,v 1.29.2.4 2003/06/16 13:23:30 grant Exp $    */
1.29.2.2  jmc         2:
                      3: /*
                      4:  * Copyright (c) 2001 Wind River Systems
                      5:  * Copyright (c) 1997, 1998, 1999, 2001
                      6:  *     Bill Paul <wpaul@windriver.com>.  All rights reserved.
                      7:  *
                      8:  * Redistribution and use in source and binary forms, with or without
                      9:  * modification, are permitted provided that the following conditions
                     10:  * are met:
                     11:  * 1. Redistributions of source code must retain the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer.
                     13:  * 2. Redistributions in binary form must reproduce the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer in the
                     15:  *    documentation and/or other materials provided with the distribution.
                     16:  * 3. All advertising materials mentioning features or use of this software
                     17:  *    must display the following acknowledgement:
                     18:  *     This product includes software developed by Bill Paul.
                     19:  * 4. Neither the name of the author nor the names of any co-contributors
                     20:  *    may be used to endorse or promote products derived from this software
                     21:  *    without specific prior written permission.
                     22:  *
                     23:  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
                     24:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     25:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     26:  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
                     27:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     28:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     29:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     30:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     31:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     32:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
                     33:  * THE POSSIBILITY OF SUCH DAMAGE.
                     34:  *
                     35:  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
                     36:  */
                     37:
                     38: /*
                     39:  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
                     40:  *
                     41:  * NetBSD version by:
                     42:  *
                     43:  *     Frank van der Linden <fvdl@wasabisystems.com>
                     44:  *     Jason Thorpe <thorpej@wasabisystems.com>
                     45:  *     jonathan Stone <joanthan@dsg.stanford.edu>
                     46:  *
                     47:  * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com>
                     48:  * Senior Engineer, Wind River Systems
                     49:  */
                     50:
                     51: /*
                     52:  * The Broadcom BCM5700 is based on technology originally developed by
                     53:  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
                     54:  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
                     55:  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
                     56:  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
                     57:  * frames, highly configurable RX filtering, and 16 RX and TX queues
                     58:  * (which, along with RX filter rules, can be used for QOS applications).
                     59:  * Other features, such as TCP segmentation, may be available as part
                     60:  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
                     61:  * firmware images can be stored in hardware and need not be compiled
                     62:  * into the driver.
                     63:  *
                     64:  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
                     65:  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
                     66:  *
                     67:  * The BCM5701 is a single-chip solution incorporating both the BCM5700
                     68:  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
                     69:  * does not support external SSRAM.
                     70:  *
                     71:  * Broadcom also produces a variation of the BCM5700 under the "Altima"
                     72:  * brand name, which is functionally similar but lacks PCI-X support.
                     73:  *
                     74:  * Without external SSRAM, you can only have at most 4 TX rings,
                     75:  * and the use of the mini RX ring is disabled. This seems to imply
                     76:  * that these features are simply not available on the BCM5701. As a
                     77:  * result, this driver does not implement any support for the mini RX
                     78:  * ring.
                     79:  */
                     80:
                     81: #include "bpfilter.h"
                     82: #include "vlan.h"
                     83:
                     84: #include <sys/param.h>
                     85: #include <sys/systm.h>
                     86: #include <sys/callout.h>
                     87: #include <sys/sockio.h>
                     88: #include <sys/mbuf.h>
                     89: #include <sys/malloc.h>
                     90: #include <sys/kernel.h>
                     91: #include <sys/device.h>
                     92: #include <sys/socket.h>
                     93:
                     94: #include <net/if.h>
                     95: #include <net/if_dl.h>
                     96: #include <net/if_media.h>
                     97: #include <net/if_ether.h>
                     98:
                     99: #ifdef INET
                    100: #include <netinet/in.h>
                    101: #include <netinet/in_systm.h>
                    102: #include <netinet/in_var.h>
                    103: #include <netinet/ip.h>
                    104: #endif
                    105:
                    106: #if NBPFILTER > 0
                    107: #include <net/bpf.h>
                    108: #endif
                    109:
                    110: #include <dev/pci/pcireg.h>
                    111: #include <dev/pci/pcivar.h>
                    112: #include <dev/pci/pcidevs.h>
                    113:
                    114: #include <dev/mii/mii.h>
                    115: #include <dev/mii/miivar.h>
                    116: #include <dev/mii/miidevs.h>
                    117: #include <dev/mii/brgphyreg.h>
                    118:
                    119: #include <dev/pci/if_bgereg.h>
                    120:
                    121: #include <uvm/uvm_extern.h>
                    122:
                    123: int bge_probe(struct device *, struct cfdata *, void *);
                    124: void bge_attach(struct device *, struct device *, void *);
                    125: void bge_release_resources(struct bge_softc *);
                    126: void bge_txeof(struct bge_softc *);
                    127: void bge_rxeof(struct bge_softc *);
                    128:
                    129: void bge_tick(void *);
                    130: void bge_stats_update(struct bge_softc *);
                    131: int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
                    132:
                    133: int bge_intr(void *);
                    134: void bge_start(struct ifnet *);
                    135: int bge_ioctl(struct ifnet *, u_long, caddr_t);
                    136: int bge_init(struct ifnet *);
                    137: void bge_stop(struct bge_softc *);
                    138: void bge_watchdog(struct ifnet *);
                    139: void bge_shutdown(void *);
                    140: int bge_ifmedia_upd(struct ifnet *);
                    141: void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
                    142:
                    143: u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
                    144: int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
                    145:
                    146: void bge_setmulti(struct bge_softc *);
                    147:
                    148: void bge_handle_events(struct bge_softc *);
                    149: int bge_alloc_jumbo_mem(struct bge_softc *);
                    150: void bge_free_jumbo_mem(struct bge_softc *);
                    151: void *bge_jalloc(struct bge_softc *);
                    152: void bge_jfree(struct mbuf *, caddr_t, u_int, void *);
                    153: int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
                    154: int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
                    155: int bge_init_rx_ring_std(struct bge_softc *);
                    156: void bge_free_rx_ring_std(struct bge_softc *);
                    157: int bge_init_rx_ring_jumbo(struct bge_softc *);
                    158: void bge_free_rx_ring_jumbo(struct bge_softc *);
                    159: void bge_free_tx_ring(struct bge_softc *);
                    160: int bge_init_tx_ring(struct bge_softc *);
                    161:
                    162: int bge_chipinit(struct bge_softc *);
                    163: int bge_blockinit(struct bge_softc *);
                    164: int bge_setpowerstate(struct bge_softc *, int);
                    165:
                    166: #ifdef notdef
                    167: u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
                    168: void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
                    169: void bge_vpd_read(struct bge_softc *);
                    170: #endif
                    171:
                    172: u_int32_t bge_readmem_ind(struct bge_softc *, int);
                    173: void bge_writemem_ind(struct bge_softc *, int, int);
                    174: #ifdef notdef
                    175: u_int32_t bge_readreg_ind(struct bge_softc *, int);
                    176: #endif
                    177: void bge_writereg_ind(struct bge_softc *, int, int);
                    178:
                    179: int bge_miibus_readreg(struct device *, int, int);
                    180: void bge_miibus_writereg(struct device *, int, int, int);
                    181: void bge_miibus_statchg(struct device *);
                    182:
                    183: void bge_reset(struct bge_softc *);
                    184:
                    185: void bge_dump_status(struct bge_softc *);
                    186: void bge_dump_rxbd(struct bge_rx_bd *);
                    187:
                    188: #define BGE_DEBUG
                    189: #ifdef BGE_DEBUG
                    190: #define DPRINTF(x)     if (bgedebug) printf x
                    191: #define DPRINTFN(n,x)  if (bgedebug >= (n)) printf x
                    192: int    bgedebug = 0;
                    193: #else
                    194: #define DPRINTF(x)
                    195: #define DPRINTFN(n,x)
                    196: #endif
                    197:
                    198: /* Various chip quirks. */
                    199: #define        BGE_QUIRK_LINK_STATE_BROKEN     0x00000001
                    200: #define        BGE_QUIRK_CSUM_BROKEN           0x00000002
                    201: #define        BGE_QUIRK_ONLY_PHY_1            0x00000004
                    202: #define        BGE_QUIRK_5700_SMALLDMA         0x00000008
                    203: #define        BGE_QUIRK_5700_PCIX_REG_BUG     0x00000010
                    204: #define        BGE_QUIRK_PRODUCER_BUG          0x00000011
                    205:
                    206: /* following bugs are common to bcm5700 rev B, all flavours */
                    207: #define BGE_QUIRK_5700_COMMON \
                    208:        (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
                    209:
                    210: struct cfattach bge_ca = {
                    211:        sizeof(struct bge_softc), bge_probe, bge_attach
                    212: };
                    213:
                    214: u_int32_t
                    215: bge_readmem_ind(sc, off)
                    216:        struct bge_softc *sc;
                    217:        int off;
                    218: {
                    219:        struct pci_attach_args  *pa = &(sc->bge_pa);
                    220:        pcireg_t val;
                    221:
                    222:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
                    223:        val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
                    224:        return val;
                    225: }
                    226:
                    227: void
                    228: bge_writemem_ind(sc, off, val)
                    229:        struct bge_softc *sc;
                    230:        int off, val;
                    231: {
                    232:        struct pci_attach_args  *pa = &(sc->bge_pa);
                    233:
                    234:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
                    235:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
                    236: }
                    237:
                    238: #ifdef notdef
                    239: u_int32_t
                    240: bge_readreg_ind(sc, off)
                    241:        struct bge_softc *sc;
                    242:        int off;
                    243: {
                    244:        struct pci_attach_args  *pa = &(sc->bge_pa);
                    245:
                    246:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
                    247:        return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
                    248: }
                    249: #endif
                    250:
                    251: void
                    252: bge_writereg_ind(sc, off, val)
                    253:        struct bge_softc *sc;
                    254:        int off, val;
                    255: {
                    256:        struct pci_attach_args  *pa = &(sc->bge_pa);
                    257:
                    258:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
                    259:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
                    260: }
                    261:
                    262: #ifdef notdef
                    263: u_int8_t
                    264: bge_vpd_readbyte(sc, addr)
                    265:        struct bge_softc *sc;
                    266:        int addr;
                    267: {
                    268:        int i;
                    269:        u_int32_t val;
                    270:        struct pci_attach_args  *pa = &(sc->bge_pa);
                    271:
                    272:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
                    273:        for (i = 0; i < BGE_TIMEOUT * 10; i++) {
                    274:                DELAY(10);
                    275:                if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
                    276:                    BGE_VPD_FLAG)
                    277:                        break;
                    278:        }
                    279:
                    280:        if (i == BGE_TIMEOUT) {
                    281:                printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
                    282:                return(0);
                    283:        }
                    284:
                    285:        val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
                    286:
                    287:        return((val >> ((addr % 4) * 8)) & 0xFF);
                    288: }
                    289:
                    290: void
                    291: bge_vpd_read_res(sc, res, addr)
                    292:        struct bge_softc *sc;
                    293:        struct vpd_res *res;
                    294:        int addr;
                    295: {
                    296:        int i;
                    297:        u_int8_t *ptr;
                    298:
                    299:        ptr = (u_int8_t *)res;
                    300:        for (i = 0; i < sizeof(struct vpd_res); i++)
                    301:                ptr[i] = bge_vpd_readbyte(sc, i + addr);
                    302: }
                    303:
                    304: void
                    305: bge_vpd_read(sc)
                    306:        struct bge_softc *sc;
                    307: {
                    308:        int pos = 0, i;
                    309:        struct vpd_res res;
                    310:
                    311:        if (sc->bge_vpd_prodname != NULL)
                    312:                free(sc->bge_vpd_prodname, M_DEVBUF);
                    313:        if (sc->bge_vpd_readonly != NULL)
                    314:                free(sc->bge_vpd_readonly, M_DEVBUF);
                    315:        sc->bge_vpd_prodname = NULL;
                    316:        sc->bge_vpd_readonly = NULL;
                    317:
                    318:        bge_vpd_read_res(sc, &res, pos);
                    319:
                    320:        if (res.vr_id != VPD_RES_ID) {
                    321:                printf("%s: bad VPD resource id: expected %x got %x\n",
                    322:                        sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
                    323:                return;
                    324:        }
                    325:
                    326:        pos += sizeof(res);
                    327:        sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
                    328:        if (sc->bge_vpd_prodname == NULL)
                    329:                panic("bge_vpd_read");
                    330:        for (i = 0; i < res.vr_len; i++)
                    331:                sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
                    332:        sc->bge_vpd_prodname[i] = '\0';
                    333:        pos += i;
                    334:
                    335:        bge_vpd_read_res(sc, &res, pos);
                    336:
                    337:        if (res.vr_id != VPD_RES_READ) {
                    338:                printf("%s: bad VPD resource id: expected %x got %x\n",
                    339:                    sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
                    340:                return;
                    341:        }
                    342:
                    343:        pos += sizeof(res);
                    344:        sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
                    345:        if (sc->bge_vpd_readonly == NULL)
                    346:                panic("bge_vpd_read");
                    347:        for (i = 0; i < res.vr_len + 1; i++)
                    348:                sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
                    349: }
                    350: #endif
                    351:
                    352: /*
                    353:  * Read a byte of data stored in the EEPROM at address 'addr.' The
                    354:  * BCM570x supports both the traditional bitbang interface and an
                    355:  * auto access interface for reading the EEPROM. We use the auto
                    356:  * access method.
                    357:  */
                    358: u_int8_t
                    359: bge_eeprom_getbyte(sc, addr, dest)
                    360:        struct bge_softc *sc;
                    361:        int addr;
                    362:        u_int8_t *dest;
                    363: {
                    364:        int i;
                    365:        u_int32_t byte = 0;
                    366:
                    367:        /*
                    368:         * Enable use of auto EEPROM access so we can avoid
                    369:         * having to use the bitbang method.
                    370:         */
                    371:        BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
                    372:
                    373:        /* Reset the EEPROM, load the clock period. */
                    374:        CSR_WRITE_4(sc, BGE_EE_ADDR,
                    375:            BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
                    376:        DELAY(20);
                    377:
                    378:        /* Issue the read EEPROM command. */
                    379:        CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
                    380:
                    381:        /* Wait for completion */
                    382:        for(i = 0; i < BGE_TIMEOUT * 10; i++) {
                    383:                DELAY(10);
                    384:                if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
                    385:                        break;
                    386:        }
                    387:
                    388:        if (i == BGE_TIMEOUT) {
                    389:                printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
                    390:                return(0);
                    391:        }
                    392:
                    393:        /* Get result. */
                    394:        byte = CSR_READ_4(sc, BGE_EE_DATA);
                    395:
                    396:        *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
                    397:
                    398:        return(0);
                    399: }
                    400:
                    401: /*
                    402:  * Read a sequence of bytes from the EEPROM.
                    403:  */
                    404: int
                    405: bge_read_eeprom(sc, dest, off, cnt)
                    406:        struct bge_softc *sc;
                    407:        caddr_t dest;
                    408:        int off;
                    409:        int cnt;
                    410: {
                    411:        int err = 0, i;
                    412:        u_int8_t byte = 0;
                    413:
                    414:        for (i = 0; i < cnt; i++) {
                    415:                err = bge_eeprom_getbyte(sc, off + i, &byte);
                    416:                if (err)
                    417:                        break;
                    418:                *(dest + i) = byte;
                    419:        }
                    420:
                    421:        return(err ? 1 : 0);
                    422: }
                    423:
                    424: int
                    425: bge_miibus_readreg(dev, phy, reg)
                    426:        struct device *dev;
                    427:        int phy, reg;
                    428: {
                    429:        struct bge_softc *sc = (struct bge_softc *)dev;
                    430:        struct ifnet *ifp;
                    431:        u_int32_t val;
                    432:        u_int32_t saved_autopoll;
                    433:        int i;
                    434:
                    435:        ifp = &sc->ethercom.ec_if;
                    436:
                    437:        /*
                    438:         * Several chips with builtin PHYs will incorrectly answer to
                    439:         * other PHY instances than the builtin PHY at id 1.
                    440:         */
                    441:        if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
                    442:                return(0);
                    443:
                    444:        /* Reading with autopolling on may trigger PCI errors */
                    445:        saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
                    446:        if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
                    447:                CSR_WRITE_4(sc, BGE_MI_MODE,
                    448:             saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
                    449:                DELAY(40);
                    450:        }
                    451:
                    452:        CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
                    453:            BGE_MIPHY(phy)|BGE_MIREG(reg));
                    454:
                    455:        for (i = 0; i < BGE_TIMEOUT; i++) {
                    456:                val = CSR_READ_4(sc, BGE_MI_COMM);
                    457:                if (!(val & BGE_MICOMM_BUSY))
                    458:                        break;
                    459:                delay(10);
                    460:        }
                    461:
                    462:        if (i == BGE_TIMEOUT) {
                    463:                printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
                    464:         val = 0;
                    465:                goto done;
                    466:        }
                    467:
                    468:        val = CSR_READ_4(sc, BGE_MI_COMM);
                    469:
                    470: done:
                    471:        if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
                    472:                CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
                    473:                DELAY(40);
                    474:        }
                    475:
                    476:        if (val & BGE_MICOMM_READFAIL)
                    477:                return(0);
                    478:
                    479:        return(val & 0xFFFF);
                    480: }
                    481:
                    482: void
                    483: bge_miibus_writereg(dev, phy, reg, val)
                    484:        struct device *dev;
                    485:        int phy, reg, val;
                    486: {
                    487:        struct bge_softc *sc = (struct bge_softc *)dev;
                    488:     u_int32_t saved_autopoll;
                    489:     int i;
                    490:
                    491:     /* Touching the PHY while autopolling is on may trigger PCI errors */
                    492:        saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
                    493:        if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
                    494:                delay(40);
                    495:                CSR_WRITE_4(sc, BGE_MI_MODE,
                    496:                    saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
                    497:                delay(10); /* 40 usec is supposed to be adequate */
                    498:        }
                    499:
                    500:        CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
                    501:            BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
                    502:
                    503:        for (i = 0; i < BGE_TIMEOUT; i++) {
                    504:                if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
                    505:                        break;
                    506:                delay(10);
                    507:        }
                    508:
                    509:        if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
                    510:                CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
                    511:                delay(40);
                    512:        }
                    513:
                    514:        if (i == BGE_TIMEOUT) {
                    515:                printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
                    516:        }
                    517: }
                    518:
                    519: void
                    520: bge_miibus_statchg(dev)
                    521:        struct device *dev;
                    522: {
                    523:        struct bge_softc *sc = (struct bge_softc *)dev;
                    524:        struct mii_data *mii = &sc->bge_mii;
                    525:
                    526:        BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
                    527:        if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
                    528:                BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
                    529:        } else {
                    530:                BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
                    531:        }
                    532:
                    533:        if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
                    534:                BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
                    535:        } else {
                    536:                BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
                    537:        }
                    538: }
                    539:
                    540: /*
                    541:  * Handle events that have triggered interrupts.
                    542:  */
                    543: void
                    544: bge_handle_events(sc)
                    545:        struct bge_softc                *sc;
                    546: {
                    547:
                    548:        return;
                    549: }
                    550:
                    551: /*
                    552:  * Memory management for jumbo frames.
                    553:  */
                    554:
                    555: int
                    556: bge_alloc_jumbo_mem(sc)
                    557:        struct bge_softc                *sc;
                    558: {
                    559:        caddr_t                 ptr, kva;
                    560:        bus_dma_segment_t       seg;
                    561:        int             i, rseg, state, error;
                    562:        struct bge_jpool_entry   *entry;
                    563:
                    564:        state = error = 0;
                    565:
                    566:        /* Grab a big chunk o' storage. */
                    567:        if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
                    568:             &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
                    569:                printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
                    570:                return ENOBUFS;
                    571:        }
                    572:
                    573:        state = 1;
                    574:        if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
                    575:            BUS_DMA_NOWAIT)) {
                    576:                printf("%s: can't map dma buffers (%d bytes)\n",
                    577:                    sc->bge_dev.dv_xname, (int)BGE_JMEM);
                    578:                error = ENOBUFS;
                    579:                goto out;
                    580:        }
                    581:
                    582:        state = 2;
                    583:        if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
                    584:            BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
                    585:                printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
                    586:                error = ENOBUFS;
                    587:                goto out;
                    588:        }
                    589:
                    590:        state = 3;
                    591:        if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
                    592:            kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
                    593:                printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
                    594:                error = ENOBUFS;
                    595:                goto out;
                    596:        }
                    597:
                    598:        state = 4;
                    599:        sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
                    600:        DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
                    601:
                    602:        SLIST_INIT(&sc->bge_jfree_listhead);
                    603:        SLIST_INIT(&sc->bge_jinuse_listhead);
                    604:
                    605:        /*
                    606:         * Now divide it up into 9K pieces and save the addresses
                    607:         * in an array.
                    608:         */
                    609:        ptr = sc->bge_cdata.bge_jumbo_buf;
                    610:        for (i = 0; i < BGE_JSLOTS; i++) {
                    611:                sc->bge_cdata.bge_jslots[i] = ptr;
                    612:                ptr += BGE_JLEN;
                    613:                entry = malloc(sizeof(struct bge_jpool_entry),
                    614:                    M_DEVBUF, M_NOWAIT);
                    615:                if (entry == NULL) {
                    616:                        printf("%s: no memory for jumbo buffer queue!\n",
                    617:                            sc->bge_dev.dv_xname);
                    618:                        error = ENOBUFS;
                    619:                        goto out;
                    620:                }
                    621:                entry->slot = i;
                    622:                SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
                    623:                                 entry, jpool_entries);
                    624:        }
                    625: out:
                    626:        if (error != 0) {
                    627:                switch (state) {
                    628:                case 4:
                    629:                        bus_dmamap_unload(sc->bge_dmatag,
                    630:                            sc->bge_cdata.bge_rx_jumbo_map);
                    631:                case 3:
                    632:                        bus_dmamap_destroy(sc->bge_dmatag,
                    633:                            sc->bge_cdata.bge_rx_jumbo_map);
                    634:                case 2:
                    635:                        bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
                    636:                case 1:
                    637:                        bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
                    638:                        break;
                    639:                default:
                    640:                        break;
                    641:                }
                    642:        }
                    643:
                    644:        return error;
                    645: }
                    646:
                    647: /*
                    648:  * Allocate a jumbo buffer.
                    649:  */
                    650: void *
                    651: bge_jalloc(sc)
                    652:        struct bge_softc                *sc;
                    653: {
                    654:        struct bge_jpool_entry   *entry;
                    655:
                    656:        entry = SLIST_FIRST(&sc->bge_jfree_listhead);
                    657:
                    658:        if (entry == NULL) {
                    659:                printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
                    660:                return(NULL);
                    661:        }
                    662:
                    663:        SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
                    664:        SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
                    665:        return(sc->bge_cdata.bge_jslots[entry->slot]);
                    666: }
                    667:
                    668: /*
                    669:  * Release a jumbo buffer.
                    670:  */
                    671: void
                    672: bge_jfree(m, buf, size, arg)
                    673:        struct mbuf     *m;
                    674:        caddr_t         buf;
                    675:        u_int           size;
                    676:        void            *arg;
                    677: {
                    678:        struct bge_jpool_entry *entry;
                    679:        struct bge_softc *sc;
                    680:        int i, s;
                    681:
                    682:        /* Extract the softc struct pointer. */
                    683:        sc = (struct bge_softc *)arg;
                    684:
                    685:        if (sc == NULL)
                    686:                panic("bge_jfree: can't find softc pointer!");
                    687:
                    688:        /* calculate the slot this buffer belongs to */
                    689:
                    690:        i = ((caddr_t)buf
                    691:             - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
                    692:
                    693:        if ((i < 0) || (i >= BGE_JSLOTS))
                    694:                panic("bge_jfree: asked to free buffer that we don't manage!");
                    695:
                    696:        s = splvm();
                    697:        entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
                    698:        if (entry == NULL)
                    699:                panic("bge_jfree: buffer not in use!");
                    700:        entry->slot = i;
                    701:        SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
                    702:        SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
                    703:
                    704:        if (__predict_true(m != NULL))
                    705:                pool_cache_put(&mbpool_cache, m);
                    706:        splx(s);
                    707: }
                    708:
                    709:
                    710: /*
                    711:  * Intialize a standard receive ring descriptor.
                    712:  */
                    713: int
                    714: bge_newbuf_std(sc, i, m, dmamap)
                    715:        struct bge_softc        *sc;
                    716:        int                     i;
                    717:        struct mbuf             *m;
                    718:        bus_dmamap_t dmamap;
                    719: {
                    720:        struct mbuf             *m_new = NULL;
                    721:        struct bge_rx_bd        *r;
                    722:        int                     error;
                    723:
                    724:        if (dmamap == NULL) {
                    725:                error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
                    726:                    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
                    727:                if (error != 0)
                    728:                        return error;
                    729:        }
                    730:
                    731:        sc->bge_cdata.bge_rx_std_map[i] = dmamap;
                    732:
                    733:        if (m == NULL) {
                    734:                MGETHDR(m_new, M_DONTWAIT, MT_DATA);
                    735:                if (m_new == NULL) {
                    736:                        return(ENOBUFS);
                    737:                }
                    738:
                    739:                MCLGET(m_new, M_DONTWAIT);
                    740:                if (!(m_new->m_flags & M_EXT)) {
                    741:                        m_freem(m_new);
                    742:                        return(ENOBUFS);
                    743:                }
                    744:                m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
                    745:                m_adj(m_new, ETHER_ALIGN);
                    746:
                    747:                if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
                    748:                    BUS_DMA_READ|BUS_DMA_NOWAIT))
                    749:                        return(ENOBUFS);
                    750:        } else {
                    751:                m_new = m;
                    752:                m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
                    753:                m_new->m_data = m_new->m_ext.ext_buf;
                    754:                m_adj(m_new, ETHER_ALIGN);
                    755:        }
                    756:
                    757:        sc->bge_cdata.bge_rx_std_chain[i] = m_new;
                    758:        r = &sc->bge_rdata->bge_rx_std_ring[i];
                    759:        bge_set_hostaddr(&r->bge_addr,
                    760:            dmamap->dm_segs[0].ds_addr);
                    761:        r->bge_flags = BGE_RXBDFLAG_END;
                    762:        r->bge_len = m_new->m_len;
                    763:        r->bge_idx = i;
                    764:
                    765:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                    766:            offsetof(struct bge_ring_data, bge_rx_std_ring) +
                    767:                i * sizeof (struct bge_rx_bd),
                    768:            sizeof (struct bge_rx_bd),
                    769:            BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
                    770:
                    771:        return(0);
                    772: }
                    773:
                    774: /*
                    775:  * Initialize a jumbo receive ring descriptor. This allocates
                    776:  * a jumbo buffer from the pool managed internally by the driver.
                    777:  */
                    778: int
                    779: bge_newbuf_jumbo(sc, i, m)
                    780:        struct bge_softc *sc;
                    781:        int i;
                    782:        struct mbuf *m;
                    783: {
                    784:        struct mbuf *m_new = NULL;
                    785:        struct bge_rx_bd *r;
                    786:
                    787:        if (m == NULL) {
                    788:                caddr_t                 *buf = NULL;
                    789:
                    790:                /* Allocate the mbuf. */
                    791:                MGETHDR(m_new, M_DONTWAIT, MT_DATA);
                    792:                if (m_new == NULL) {
                    793:                        return(ENOBUFS);
                    794:                }
                    795:
                    796:                /* Allocate the jumbo buffer */
                    797:                buf = bge_jalloc(sc);
                    798:                if (buf == NULL) {
                    799:                        m_freem(m_new);
                    800:                        printf("%s: jumbo allocation failed "
                    801:                            "-- packet dropped!\n", sc->bge_dev.dv_xname);
                    802:                        return(ENOBUFS);
                    803:                }
                    804:
                    805:                /* Attach the buffer to the mbuf. */
                    806:                m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
                    807:                MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
                    808:                    bge_jfree, sc);
                    809:        } else {
                    810:                m_new = m;
                    811:                m_new->m_data = m_new->m_ext.ext_buf;
                    812:                m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
                    813:        }
                    814:
                    815:        m_adj(m_new, ETHER_ALIGN);
                    816:        /* Set up the descriptor. */
                    817:        r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
                    818:        sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
                    819:        bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
                    820:        r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
                    821:        r->bge_len = m_new->m_len;
                    822:        r->bge_idx = i;
                    823:
                    824:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                    825:            offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
                    826:                i * sizeof (struct bge_rx_bd),
                    827:            sizeof (struct bge_rx_bd),
                    828:            BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
                    829:
                    830:        return(0);
                    831: }
                    832:
                    833: /*
                    834:  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
                    835:  * that's 1MB or memory, which is a lot. For now, we fill only the first
                    836:  * 256 ring entries and hope that our CPU is fast enough to keep up with
                    837:  * the NIC.
                    838:  */
                    839: int
                    840: bge_init_rx_ring_std(sc)
                    841:        struct bge_softc *sc;
                    842: {
                    843:        int i;
                    844:
                    845:        if (sc->bge_flags & BGE_RXRING_VALID)
                    846:                return 0;
                    847:
                    848:        for (i = 0; i < BGE_SSLOTS; i++) {
                    849:                if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
                    850:                        return(ENOBUFS);
                    851:        }
                    852:
                    853:        sc->bge_std = i - 1;
                    854:        CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
                    855:
                    856:        sc->bge_flags |= BGE_RXRING_VALID;
                    857:
                    858:        return(0);
                    859: }
                    860:
                    861: void
                    862: bge_free_rx_ring_std(sc)
                    863:        struct bge_softc *sc;
                    864: {
                    865:        int i;
                    866:
                    867:        if (!(sc->bge_flags & BGE_RXRING_VALID))
                    868:                return;
                    869:
                    870:        for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
                    871:                if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
                    872:                        m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
                    873:                        sc->bge_cdata.bge_rx_std_chain[i] = NULL;
                    874:                        bus_dmamap_destroy(sc->bge_dmatag,
                    875:                            sc->bge_cdata.bge_rx_std_map[i]);
                    876:                }
                    877:                memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
                    878:                    sizeof(struct bge_rx_bd));
                    879:        }
                    880:
                    881:        sc->bge_flags &= ~BGE_RXRING_VALID;
                    882: }
                    883:
                    884: int
                    885: bge_init_rx_ring_jumbo(sc)
                    886:        struct bge_softc *sc;
                    887: {
                    888:        int i;
1.29.2.4  grant     889:        volatile struct bge_rcb *rcb;
1.29.2.2  jmc       890:
                    891:        for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
                    892:                if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
                    893:                        return(ENOBUFS);
                    894:        };
                    895:
                    896:        sc->bge_jumbo = i - 1;
                    897:
                    898:        rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1.29.2.4  grant     899:        rcb->bge_maxlen_flags = 0;
                    900:        CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1.29.2.2  jmc       901:
                    902:        CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
                    903:
                    904:        return(0);
                    905: }
                    906:
                    907: void
                    908: bge_free_rx_ring_jumbo(sc)
                    909:        struct bge_softc *sc;
                    910: {
                    911:        int i;
                    912:
                    913:        if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
                    914:                return;
                    915:
                    916:        for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
                    917:                if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
                    918:                        m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
                    919:                        sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
                    920:                }
                    921:                memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
                    922:                    sizeof(struct bge_rx_bd));
                    923:        }
                    924:
                    925:        sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
                    926: }
                    927:
                    928: void
                    929: bge_free_tx_ring(sc)
                    930:        struct bge_softc *sc;
                    931: {
                    932:        int i, freed;
                    933:        struct txdmamap_pool_entry *dma;
                    934:
                    935:        if (!(sc->bge_flags & BGE_TXRING_VALID))
                    936:                return;
                    937:
                    938:        freed = 0;
                    939:
                    940:        for (i = 0; i < BGE_TX_RING_CNT; i++) {
                    941:                if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
                    942:                        freed++;
                    943:                        m_freem(sc->bge_cdata.bge_tx_chain[i]);
                    944:                        sc->bge_cdata.bge_tx_chain[i] = NULL;
                    945:                        SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
                    946:                                            link);
                    947:                        sc->txdma[i] = 0;
                    948:                }
                    949:                memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
                    950:                    sizeof(struct bge_tx_bd));
                    951:        }
                    952:
                    953:        while ((dma = SLIST_FIRST(&sc->txdma_list))) {
                    954:                SLIST_REMOVE_HEAD(&sc->txdma_list, link);
                    955:                bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
                    956:                free(dma, M_DEVBUF);
                    957:        }
                    958:
                    959:        sc->bge_flags &= ~BGE_TXRING_VALID;
                    960: }
                    961:
                    962: int
                    963: bge_init_tx_ring(sc)
                    964:        struct bge_softc *sc;
                    965: {
                    966:        int i;
                    967:        bus_dmamap_t dmamap;
                    968:        struct txdmamap_pool_entry *dma;
                    969:
                    970:        if (sc->bge_flags & BGE_TXRING_VALID)
                    971:                return 0;
                    972:
                    973:        sc->bge_txcnt = 0;
                    974:        sc->bge_tx_saved_considx = 0;
                    975:        CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
                    976:        if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)    /* 5700 b2 errata */
                    977:                CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
                    978:
                    979:        CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
                    980:        if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)    /* 5700 b2 errata */
                    981:                CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
                    982:
                    983:        SLIST_INIT(&sc->txdma_list);
                    984:        for (i = 0; i < BGE_RSLOTS; i++) {
                    985:                if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
                    986:                    BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
                    987:                    &dmamap))
                    988:                        return(ENOBUFS);
                    989:                if (dmamap == NULL)
                    990:                        panic("dmamap NULL in bge_init_tx_ring");
                    991:                dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
                    992:                if (dma == NULL) {
                    993:                        printf("%s: can't alloc txdmamap_pool_entry\n",
                    994:                            sc->bge_dev.dv_xname);
                    995:                        bus_dmamap_destroy(sc->bge_dmatag, dmamap);
                    996:                        return (ENOMEM);
                    997:                }
                    998:                dma->dmamap = dmamap;
                    999:                SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
                   1000:        }
                   1001:
                   1002:        sc->bge_flags |= BGE_TXRING_VALID;
                   1003:
                   1004:        return(0);
                   1005: }
                   1006:
                   1007: void
                   1008: bge_setmulti(sc)
                   1009:        struct bge_softc *sc;
                   1010: {
                   1011:        struct ethercom         *ac = &sc->ethercom;
                   1012:        struct ifnet            *ifp = &ac->ec_if;
                   1013:        struct ether_multi      *enm;
                   1014:        struct ether_multistep  step;
                   1015:        u_int32_t               hashes[4] = { 0, 0, 0, 0 };
                   1016:        u_int32_t               h;
                   1017:        int                     i;
                   1018:
                   1019:        if (ifp->if_flags & IFF_PROMISC)
                   1020:                goto allmulti;
                   1021:
                   1022:        /* Now program new ones. */
                   1023:        ETHER_FIRST_MULTI(step, ac, enm);
                   1024:        while (enm != NULL) {
                   1025:                if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
                   1026:                        /*
                   1027:                         * We must listen to a range of multicast addresses.
                   1028:                         * For now, just accept all multicasts, rather than
                   1029:                         * trying to set only those filter bits needed to match
                   1030:                         * the range.  (At this time, the only use of address
                   1031:                         * ranges is for IP multicast routing, for which the
                   1032:                         * range is big enough to require all bits set.)
                   1033:                         */
                   1034:                        goto allmulti;
                   1035:                }
                   1036:
                   1037:                h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
                   1038:
                   1039:                /* Just want the 7 least-significant bits. */
                   1040:                h &= 0x7f;
                   1041:
                   1042:                hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
                   1043:                ETHER_NEXT_MULTI(step, enm);
                   1044:        }
                   1045:
                   1046:        ifp->if_flags &= ~IFF_ALLMULTI;
                   1047:        goto setit;
                   1048:
                   1049:  allmulti:
                   1050:        ifp->if_flags |= IFF_ALLMULTI;
                   1051:        hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
                   1052:
                   1053:  setit:
                   1054:        for (i = 0; i < 4; i++)
                   1055:                CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
                   1056: }
                   1057:
                   1058: const int bge_swapbits[] = {
                   1059:        0,
                   1060:        BGE_MODECTL_BYTESWAP_DATA,
                   1061:        BGE_MODECTL_WORDSWAP_DATA,
                   1062:        BGE_MODECTL_BYTESWAP_NONFRAME,
                   1063:        BGE_MODECTL_WORDSWAP_NONFRAME,
                   1064:
                   1065:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
                   1066:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
                   1067:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
                   1068:
                   1069:        BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
                   1070:        BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
                   1071:
                   1072:        BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
                   1073:
                   1074:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
                   1075:            BGE_MODECTL_BYTESWAP_NONFRAME,
                   1076:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
                   1077:            BGE_MODECTL_WORDSWAP_NONFRAME,
                   1078:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
                   1079:            BGE_MODECTL_WORDSWAP_NONFRAME,
                   1080:        BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
                   1081:            BGE_MODECTL_WORDSWAP_NONFRAME,
                   1082:
                   1083:        BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
                   1084:            BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
                   1085: };
                   1086:
                   1087: int bge_swapindex = 0;
                   1088:
                   1089: /*
                   1090:  * Do endian, PCI and DMA initialization. Also check the on-board ROM
                   1091:  * self-test results.
                   1092:  */
                   1093: int
                   1094: bge_chipinit(sc)
                   1095:        struct bge_softc *sc;
                   1096: {
                   1097:        u_int32_t               cachesize;
                   1098:        int                     i;
                   1099:        u_int32_t               dma_rw_ctl;
                   1100:        struct pci_attach_args  *pa = &(sc->bge_pa);
                   1101:
                   1102:
                   1103:        /* Set endianness before we access any non-PCI registers. */
                   1104:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
                   1105:            BGE_INIT);
                   1106:
                   1107:        /* Set power state to D0. */
                   1108:        bge_setpowerstate(sc, 0);
                   1109:
                   1110:        /*
                   1111:         * Check the 'ROM failed' bit on the RX CPU to see if
                   1112:         * self-tests passed.
                   1113:         */
                   1114:        if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
                   1115:                printf("%s: RX CPU self-diagnostics failed!\n",
                   1116:                    sc->bge_dev.dv_xname);
                   1117:                return(ENODEV);
                   1118:        }
                   1119:
                   1120:        /* Clear the MAC control register */
                   1121:        CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
                   1122:
                   1123:        /*
                   1124:         * Clear the MAC statistics block in the NIC's
                   1125:         * internal memory.
                   1126:         */
                   1127:        for (i = BGE_STATS_BLOCK;
                   1128:            i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
                   1129:                BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
                   1130:
                   1131:        for (i = BGE_STATUS_BLOCK;
                   1132:            i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
                   1133:                BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
                   1134:
                   1135:        /* Set up the PCI DMA control register. */
                   1136:        if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
                   1137:            BGE_PCISTATE_PCI_BUSMODE) {
                   1138:                /* Conventional PCI bus */
                   1139:                DPRINTFN(4, ("(%s: PCI 2.2 dma setting)\n", sc->bge_dev.dv_xname));
                   1140:                dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
                   1141:                   (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
                   1142:                   (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
                   1143:                   (0x0F));
                   1144:        } else {
                   1145:                DPRINTFN(4, ("(:%s: PCI-X dma setting)\n", sc->bge_dev.dv_xname));
                   1146:                /* PCI-X bus */
                   1147:                dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
                   1148:                    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
                   1149:                    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
                   1150:                    (0x0F);
                   1151:                /*
                   1152:                 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
                   1153:                 * for hardware bugs, which means we should also clear
                   1154:                 * the low-order MINDMA bits.  In addition, the 5704
                   1155:                 * uses a different encoding of read/write watermarks.
                   1156:                 */
                   1157:                if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0) {
                   1158:                        dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
                   1159:                          /* should be 0x1f0000 */
                   1160:                          (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
                   1161:                          (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
                   1162:                        dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
                   1163:                }
                   1164:                else if ((sc->bge_asicrev >> 28) ==
                   1165:                         (BGE_ASICREV_BCM5703_A0 >> 28)) {
                   1166:                        dma_rw_ctl &=  0xfffffff0;
                   1167:                        dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
                   1168:                }
                   1169:        }
                   1170:
                   1171:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
                   1172:
                   1173:        /*
                   1174:         * Set up general mode register.
                   1175:         */
                   1176:        CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
                   1177:                    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
                   1178:                    BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
                   1179:                    BGE_MODECTL_RX_NO_PHDR_CSUM);
                   1180:
                   1181:        /* Get cache line size. */
                   1182:        cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
                   1183:
                   1184:        /*
                   1185:         * Avoid violating PCI spec on certain chip revs.
                   1186:         */
                   1187:        if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
                   1188:            PCIM_CMD_MWIEN) {
                   1189:                switch(cachesize) {
                   1190:                case 1:
                   1191:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1192:                                   BGE_PCI_WRITE_BNDRY_16BYTES);
                   1193:                        break;
                   1194:                case 2:
                   1195:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1196:                                   BGE_PCI_WRITE_BNDRY_32BYTES);
                   1197:                        break;
                   1198:                case 4:
                   1199:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1200:                                   BGE_PCI_WRITE_BNDRY_64BYTES);
                   1201:                        break;
                   1202:                case 8:
                   1203:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1204:                                   BGE_PCI_WRITE_BNDRY_128BYTES);
                   1205:                        break;
                   1206:                case 16:
                   1207:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1208:                                   BGE_PCI_WRITE_BNDRY_256BYTES);
                   1209:                        break;
                   1210:                case 32:
                   1211:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1212:                                   BGE_PCI_WRITE_BNDRY_512BYTES);
                   1213:                        break;
                   1214:                case 64:
                   1215:                        PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
                   1216:                                   BGE_PCI_WRITE_BNDRY_1024BYTES);
                   1217:                        break;
                   1218:                default:
                   1219:                /* Disable PCI memory write and invalidate. */
                   1220: #if 0
                   1221:                        if (bootverbose)
                   1222:                                printf("%s: cache line size %d not "
                   1223:                                    "supported; disabling PCI MWI\n",
                   1224:                                    sc->bge_dev.dv_xname, cachesize);
                   1225: #endif
                   1226:                        PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
                   1227:                            PCIM_CMD_MWIEN);
                   1228:                        break;
                   1229:                }
                   1230:        }
                   1231:
                   1232:        /*
                   1233:         * Disable memory write invalidate.  Apparently it is not supported
                   1234:         * properly by these devices.
                   1235:         */
                   1236:        PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
                   1237:
                   1238:
                   1239: #ifdef __brokenalpha__
                   1240:        /*
                   1241:         * Must insure that we do not cross an 8K (bytes) boundary
                   1242:         * for DMA reads.  Our highest limit is 1K bytes.  This is a
                   1243:         * restriction on some ALPHA platforms with early revision
                   1244:         * 21174 PCI chipsets, such as the AlphaPC 164lx
                   1245:         */
                   1246:        PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
                   1247: #endif
                   1248:
                   1249:        /* Set the timer prescaler (always 66Mhz) */
                   1250:        CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
                   1251:
                   1252:        return(0);
                   1253: }
                   1254:
                   1255: int
                   1256: bge_blockinit(sc)
                   1257:        struct bge_softc *sc;
                   1258: {
1.29.2.4  grant    1259:        volatile struct bge_rcb         *rcb;
1.29.2.2  jmc      1260:        bus_size_t              rcb_addr;
                   1261:        int                     i;
                   1262:        struct ifnet            *ifp = &sc->ethercom.ec_if;
                   1263:        bge_hostaddr            taddr;
                   1264:
                   1265:        /*
                   1266:         * Initialize the memory window pointer register so that
                   1267:         * we can access the first 32K of internal NIC RAM. This will
                   1268:         * allow us to set up the TX send ring RCBs and the RX return
                   1269:         * ring RCBs, plus other things which live in NIC memory.
                   1270:         */
                   1271:
                   1272:        pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
                   1273:            BGE_PCI_MEMWIN_BASEADDR, 0);
                   1274:
                   1275:        /* Configure mbuf memory pool */
                   1276:        if (sc->bge_extram) {
                   1277:                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
                   1278:                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
                   1279:        } else {
                   1280:                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
                   1281:                CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
                   1282:        }
                   1283:
                   1284:        /* Configure DMA resource pool */
                   1285:        CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
                   1286:        CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
                   1287:
                   1288:        /* Configure mbuf pool watermarks */
                   1289: #ifdef ORIG_WPAUL_VALUES
                   1290:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
                   1291:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
                   1292:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
                   1293: #else
                   1294:        /* new broadcom docs strongly recommend these: */
                   1295:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
                   1296:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
                   1297:        CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
                   1298: #endif
                   1299:
                   1300:        /* Configure DMA resource watermarks */
                   1301:        CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
                   1302:        CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
                   1303:
                   1304:        /* Enable buffer manager */
                   1305:        CSR_WRITE_4(sc, BGE_BMAN_MODE,
                   1306:            BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
                   1307:
                   1308:        /* Poll for buffer manager start indication */
                   1309:        for (i = 0; i < BGE_TIMEOUT; i++) {
                   1310:                if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
                   1311:                        break;
                   1312:                DELAY(10);
                   1313:        }
                   1314:
                   1315:        if (i == BGE_TIMEOUT) {
                   1316:                printf("%s: buffer manager failed to start\n",
                   1317:                    sc->bge_dev.dv_xname);
                   1318:                return(ENXIO);
                   1319:        }
                   1320:
                   1321:        /* Enable flow-through queues */
                   1322:        CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
                   1323:        CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
                   1324:
                   1325:        /* Wait until queue initialization is complete */
                   1326:        for (i = 0; i < BGE_TIMEOUT; i++) {
                   1327:                if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
                   1328:                        break;
                   1329:                DELAY(10);
                   1330:        }
                   1331:
                   1332:        if (i == BGE_TIMEOUT) {
                   1333:                printf("%s: flow-through queue init failed\n",
                   1334:                    sc->bge_dev.dv_xname);
                   1335:                return(ENXIO);
                   1336:        }
                   1337:
                   1338:        /* Initialize the standard RX ring control block */
                   1339:        rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
                   1340:        bge_set_hostaddr(&rcb->bge_hostaddr,
                   1341:            BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1.29.2.4  grant    1342:        rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1.29.2.2  jmc      1343:        if (sc->bge_extram)
                   1344:                rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
                   1345:        else
                   1346:                rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1.29.2.4  grant    1347:        CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
                   1348:        CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
                   1349:        CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
                   1350:        CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1.29.2.2  jmc      1351:
                   1352:        /*
                   1353:         * Initialize the jumbo RX ring control block
                   1354:         * We set the 'ring disabled' bit in the flags
                   1355:         * field until we're actually ready to start
                   1356:         * using this ring (i.e. once we set the MTU
                   1357:         * high enough to require it).
                   1358:         */
                   1359:        rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
                   1360:        bge_set_hostaddr(&rcb->bge_hostaddr,
                   1361:            BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1.29.2.4  grant    1362:        rcb->bge_maxlen_flags =
                   1363:           BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED);
1.29.2.2  jmc      1364:        if (sc->bge_extram)
                   1365:                rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
                   1366:        else
                   1367:                rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
                   1368:
1.29.2.4  grant    1369:        CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
                   1370:        CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
                   1371:        CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
                   1372:        CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1.29.2.2  jmc      1373:
                   1374:        /* Set up dummy disabled mini ring RCB */
                   1375:        rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1.29.2.4  grant    1376:        rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
                   1377:            BGE_RCB_FLAG_RING_DISABLED);
                   1378:        CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1.29.2.2  jmc      1379:
                   1380:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   1381:            offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib),
                   1382:            BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
                   1383:
                   1384:        /*
                   1385:         * Set the BD ring replentish thresholds. The recommended
                   1386:         * values are 1/8th the number of descriptors allocated to
                   1387:         * each ring.
                   1388:         */
                   1389:        CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
                   1390:        CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
                   1391:
                   1392:        /*
                   1393:         * Disable all unused send rings by setting the 'ring disabled'
                   1394:         * bit in the flags field of all the TX send ring control blocks.
                   1395:         * These are located in NIC memory.
                   1396:         */
                   1397:        rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
                   1398:        for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1.29.2.4  grant    1399:                RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
                   1400:                    BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1.29.2.2  jmc      1401:                RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
                   1402:                rcb_addr += sizeof(struct bge_rcb);
                   1403:        }
                   1404:
                   1405:        /* Configure TX RCB 0 (we use only the first ring) */
                   1406:        rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
                   1407:        bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
                   1408:        RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
                   1409:        RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
                   1410:        RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
                   1411:                    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1.29.2.4  grant    1412:        RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
                   1413:            BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1.29.2.2  jmc      1414:
                   1415:        /* Disable all unused RX return rings */
                   1416:        rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
                   1417:        for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
                   1418:                RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
                   1419:                RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1.29.2.4  grant    1420:                RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
                   1421:                            BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,
                   1422:                                      BGE_RCB_FLAG_RING_DISABLED));
1.29.2.2  jmc      1423:                RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
                   1424:                CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
                   1425:                    (i * (sizeof(u_int64_t))), 0);
                   1426:                rcb_addr += sizeof(struct bge_rcb);
                   1427:        }
                   1428:
                   1429:        /* Initialize RX ring indexes */
                   1430:        CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
                   1431:        CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
                   1432:        CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
                   1433:
                   1434:        /*
                   1435:         * Set up RX return ring 0
                   1436:         * Note that the NIC address for RX return rings is 0x00000000.
                   1437:         * The return rings live entirely within the host, so the
                   1438:         * nicaddr field in the RCB isn't used.
                   1439:         */
                   1440:        rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
                   1441:        bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
                   1442:        RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
                   1443:        RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
                   1444:        RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1.29.2.4  grant    1445:        RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
                   1446:            BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,0));
1.29.2.2  jmc      1447:
                   1448:        /* Set random backoff seed for TX */
                   1449:        CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
                   1450:            LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
                   1451:            LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
                   1452:            LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
                   1453:            BGE_TX_BACKOFF_SEED_MASK);
                   1454:
                   1455:        /* Set inter-packet gap */
                   1456:        CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
                   1457:
                   1458:        /*
                   1459:         * Specify which ring to use for packets that don't match
                   1460:         * any RX rules.
                   1461:         */
                   1462:        CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
                   1463:
                   1464:        /*
                   1465:         * Configure number of RX lists. One interrupt distribution
                   1466:         * list, sixteen active lists, one bad frames class.
                   1467:         */
                   1468:        CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
                   1469:
                   1470:        /* Inialize RX list placement stats mask. */
                   1471:        CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
                   1472:        CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
                   1473:
                   1474:        /* Disable host coalescing until we get it set up */
                   1475:        CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
                   1476:
                   1477:        /* Poll to make sure it's shut down. */
                   1478:        for (i = 0; i < BGE_TIMEOUT; i++) {
                   1479:                if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
                   1480:                        break;
                   1481:                DELAY(10);
                   1482:        }
                   1483:
                   1484:        if (i == BGE_TIMEOUT) {
                   1485:                printf("%s: host coalescing engine failed to idle\n",
                   1486:                    sc->bge_dev.dv_xname);
                   1487:                return(ENXIO);
                   1488:        }
                   1489:
                   1490:        /* Set up host coalescing defaults */
                   1491:        CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
                   1492:        CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
                   1493:        CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
                   1494:        CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
                   1495:        CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
                   1496:        CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
                   1497:        CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
                   1498:        CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
                   1499:        CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
                   1500:
                   1501:        /* Set up address of statistics block */
                   1502:        bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
                   1503:        CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
                   1504:        CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
                   1505:        CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
                   1506:
                   1507:        /* Set up address of status block */
                   1508:        bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
                   1509:        CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
                   1510:        CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
                   1511:        CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
                   1512:        sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
                   1513:        sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
                   1514:
                   1515:        /* Turn on host coalescing state machine */
                   1516:        CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
                   1517:
                   1518:        /* Turn on RX BD completion state machine and enable attentions */
                   1519:        CSR_WRITE_4(sc, BGE_RBDC_MODE,
                   1520:            BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
                   1521:
                   1522:        /* Turn on RX list placement state machine */
                   1523:        CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
                   1524:
                   1525:        /* Turn on RX list selector state machine. */
                   1526:        CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
                   1527:
                   1528:        /* Turn on DMA, clear stats */
                   1529:        CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
                   1530:            BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
                   1531:            BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
                   1532:            BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
                   1533:            (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
                   1534:
                   1535:        /* Set misc. local control, enable interrupts on attentions */
                   1536:        sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
                   1537:
                   1538: #ifdef notdef
                   1539:        /* Assert GPIO pins for PHY reset */
                   1540:        BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
                   1541:            BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
                   1542:        BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
                   1543:            BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
                   1544: #endif
                   1545:
                   1546: #if defined(not_quite_yet)
                   1547:        /* Linux driver enables enable gpio pin #1 on 5700s */
                   1548:        if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
                   1549:                sc->bge_local_ctrl_reg |=
                   1550:                  (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
                   1551:        }
                   1552: #endif
                   1553:        CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
                   1554:
                   1555:        /* Turn on DMA completion state machine */
                   1556:        CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
                   1557:
                   1558:        /* Turn on write DMA state machine */
                   1559:        CSR_WRITE_4(sc, BGE_WDMA_MODE,
                   1560:            BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
                   1561:
                   1562:        /* Turn on read DMA state machine */
                   1563:        CSR_WRITE_4(sc, BGE_RDMA_MODE,
                   1564:            BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
                   1565:
                   1566:        /* Turn on RX data completion state machine */
                   1567:        CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
                   1568:
                   1569:        /* Turn on RX BD initiator state machine */
                   1570:        CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
                   1571:
                   1572:        /* Turn on RX data and RX BD initiator state machine */
                   1573:        CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
                   1574:
                   1575:        /* Turn on Mbuf cluster free state machine */
                   1576:        CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
                   1577:
                   1578:        /* Turn on send BD completion state machine */
                   1579:        CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
                   1580:
                   1581:        /* Turn on send data completion state machine */
                   1582:        CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
                   1583:
                   1584:        /* Turn on send data initiator state machine */
                   1585:        CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
                   1586:
                   1587:        /* Turn on send BD initiator state machine */
                   1588:        CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
                   1589:
                   1590:        /* Turn on send BD selector state machine */
                   1591:        CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
                   1592:
                   1593:        CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
                   1594:        CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
                   1595:            BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
                   1596:
                   1597:        /* init LED register */
                   1598:        CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000);
                   1599:
                   1600:        /* ack/clear link change events */
                   1601:        CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
                   1602:            BGE_MACSTAT_CFG_CHANGED);
                   1603:        CSR_WRITE_4(sc, BGE_MI_STS, 0);
                   1604:
                   1605:        /* Enable PHY auto polling (for MII/GMII only) */
                   1606:        if (sc->bge_tbi) {
                   1607:                CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
                   1608:        } else {
                   1609:                BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
                   1610:                if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
                   1611:                        CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
                   1612:                            BGE_EVTENB_MI_INTERRUPT);
                   1613:        }
                   1614:
                   1615:        /* Enable link state change attentions. */
                   1616:        BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
                   1617:
                   1618:        return(0);
                   1619: }
                   1620:
                   1621: static const struct bge_revision {
                   1622:        uint32_t                br_asicrev;
                   1623:        uint32_t                br_quirks;
                   1624:        const char              *br_name;
                   1625: } bge_revisions[] = {
                   1626:        { BGE_ASICREV_BCM5700_A0,
                   1627:          BGE_QUIRK_LINK_STATE_BROKEN,
                   1628:          "BCM5700 A0" },
                   1629:
                   1630:        { BGE_ASICREV_BCM5700_A1,
                   1631:          BGE_QUIRK_LINK_STATE_BROKEN,
                   1632:          "BCM5700 A1" },
                   1633:
                   1634:        { BGE_ASICREV_BCM5700_B0,
                   1635:          BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
                   1636:          "BCM5700 B0" },
                   1637:
                   1638:        { BGE_ASICREV_BCM5700_B1,
                   1639:          BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
                   1640:          "BCM5700 B1" },
                   1641:
                   1642:        { BGE_ASICREV_BCM5700_B2,
                   1643:          BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
                   1644:          "BCM5700 B2" },
                   1645:
                   1646:        /* This is treated like a BCM5700 Bx */
                   1647:        { BGE_ASICREV_BCM5700_ALTIMA,
                   1648:          BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
                   1649:          "BCM5700 Altima" },
                   1650:
                   1651:        { BGE_ASICREV_BCM5700_C0,
                   1652:          0,
                   1653:          "BCM5700 C0" },
                   1654:
                   1655:        { BGE_ASICREV_BCM5701_A0,
                   1656:          0,
                   1657:          "BCM5701 A0" },
                   1658:
                   1659:        { BGE_ASICREV_BCM5701_B0,
                   1660:          0,
                   1661:          "BCM5701 B0" },
                   1662:
                   1663:        { BGE_ASICREV_BCM5701_B2,
                   1664:          0,
                   1665:          "BCM5701 B2" },
                   1666:
                   1667:        { BGE_ASICREV_BCM5701_B5,
                   1668:          BGE_QUIRK_ONLY_PHY_1,
                   1669:          "BCM5701 B5" },
                   1670:
                   1671:        { BGE_ASICREV_BCM5703_A0,
                   1672:          0,
                   1673:          "BCM5703 A0" },
                   1674:
                   1675:        { BGE_ASICREV_BCM5703_A1,
                   1676:          0,
                   1677:          "BCM5703 A1" },
                   1678:
                   1679:        { BGE_ASICREV_BCM5703_A2,
                   1680:          BGE_QUIRK_ONLY_PHY_1,
                   1681:          "BCM5703 A2" },
                   1682:
                   1683:        { BGE_ASICREV_BCM5704_A0,
                   1684:          BGE_QUIRK_ONLY_PHY_1,
                   1685:          "BCM5704 A0" },
                   1686:
                   1687:        { 0, 0, NULL }
                   1688: };
                   1689:
                   1690: static const struct bge_revision *
                   1691: bge_lookup_rev(uint32_t asicrev)
                   1692: {
                   1693:        const struct bge_revision *br;
                   1694:
                   1695:        for (br = bge_revisions; br->br_name != NULL; br++) {
                   1696:                if (br->br_asicrev == asicrev)
                   1697:                        return (br);
                   1698:        }
                   1699:
                   1700:        return (NULL);
                   1701: }
                   1702:
                   1703: static const struct bge_product {
                   1704:        pci_vendor_id_t         bp_vendor;
                   1705:        pci_product_id_t        bp_product;
                   1706:        const char              *bp_name;
                   1707: } bge_products[] = {
                   1708:        /*
                   1709:         * The BCM5700 documentation seems to indicate that the hardware
                   1710:         * still has the Alteon vendor ID burned into it, though it
                   1711:         * should always be overridden by the value in the EEPROM.  We'll
                   1712:         * check for it anyway.
                   1713:         */
                   1714:        { PCI_VENDOR_ALTEON,
                   1715:          PCI_PRODUCT_ALTEON_BCM5700,
                   1716:          "Broadcom BCM5700 Gigabit Ethernet" },
                   1717:        { PCI_VENDOR_ALTEON,
                   1718:          PCI_PRODUCT_ALTEON_BCM5701,
                   1719:          "Broadcom BCM5701 Gigabit Ethernet" },
                   1720:
                   1721:        { PCI_VENDOR_ALTIMA,
                   1722:          PCI_PRODUCT_ALTIMA_AC1000,
                   1723:          "Altima AC1000 Gigabit Ethernet" },
                   1724:        { PCI_VENDOR_ALTIMA,
                   1725:          PCI_PRODUCT_ALTIMA_AC1001,
                   1726:          "Altima AC1001 Gigabit Ethernet" },
                   1727:        { PCI_VENDOR_ALTIMA,
                   1728:          PCI_PRODUCT_ALTIMA_AC9100,
                   1729:          "Altima AC9100 Gigabit Ethernet" },
                   1730:
                   1731:        { PCI_VENDOR_BROADCOM,
                   1732:          PCI_PRODUCT_BROADCOM_BCM5700,
                   1733:          "Broadcom BCM5700 Gigabit Ethernet" },
                   1734:        { PCI_VENDOR_BROADCOM,
                   1735:          PCI_PRODUCT_BROADCOM_BCM5701,
                   1736:          "Broadcom BCM5701 Gigabit Ethernet" },
                   1737:        { PCI_VENDOR_BROADCOM,
                   1738:          PCI_PRODUCT_BROADCOM_BCM5702,
                   1739:          "Broadcom BCM5702 Gigabit Ethernet" },
                   1740:        { PCI_VENDOR_BROADCOM,
                   1741:          PCI_PRODUCT_BROADCOM_BCM5702X,
                   1742:          "Broadcom BCM5702X Gigabit Ethernet" },
                   1743:        { PCI_VENDOR_BROADCOM,
                   1744:          PCI_PRODUCT_BROADCOM_BCM5703,
                   1745:          "Broadcom BCM5703 Gigabit Ethernet" },
                   1746:        { PCI_VENDOR_BROADCOM,
                   1747:          PCI_PRODUCT_BROADCOM_BCM5703X,
                   1748:          "Broadcom BCM5703X Gigabit Ethernet" },
                   1749:        { PCI_VENDOR_BROADCOM,
                   1750:          PCI_PRODUCT_BROADCOM_BCM5704C,
                   1751:          "Broadcom BCM5704C Dual Gigabit Ethernet" },
                   1752:        { PCI_VENDOR_BROADCOM,
                   1753:          PCI_PRODUCT_BROADCOM_BCM5704S,
                   1754:          "Broadcom BCM5704S Dual Gigabit Ethernet" },
                   1755:
                   1756:
                   1757:        { PCI_VENDOR_SCHNEIDERKOCH,
                   1758:          PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
                   1759:          "SysKonnect SK-9Dx1 Gigabit Ethernet" },
                   1760:
                   1761:        { PCI_VENDOR_3COM,
                   1762:          PCI_PRODUCT_3COM_3C996,
                   1763:          "3Com 3c996 Gigabit Ethernet" },
                   1764:
                   1765:        { 0,
                   1766:          0,
                   1767:          NULL },
                   1768: };
                   1769:
                   1770: static const struct bge_product *
                   1771: bge_lookup(const struct pci_attach_args *pa)
                   1772: {
                   1773:        const struct bge_product *bp;
                   1774:
                   1775:        for (bp = bge_products; bp->bp_name != NULL; bp++) {
                   1776:                if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
                   1777:                    PCI_PRODUCT(pa->pa_id) == bp->bp_product)
                   1778:                        return (bp);
                   1779:        }
                   1780:
                   1781:        return (NULL);
                   1782: }
                   1783:
                   1784: int
                   1785: bge_setpowerstate(sc, powerlevel)
                   1786:        struct bge_softc *sc;
                   1787:        int powerlevel;
                   1788: {
                   1789: #ifdef NOTYET
                   1790:        u_int32_t pm_ctl = 0;
                   1791:
                   1792:        /* XXX FIXME: make sure indirect accesses enabled? */
                   1793:        pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
                   1794:        pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
                   1795:        pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
                   1796:
                   1797:        /* clear the PME_assert bit and power state bits, enable PME */
                   1798:        pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
                   1799:        pm_ctl &= ~PCIM_PSTAT_DMASK;
                   1800:        pm_ctl |= (1 << 8);
                   1801:
                   1802:        if (powerlevel == 0) {
                   1803:                pm_ctl |= PCIM_PSTAT_D0;
                   1804:                pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
                   1805:                    pm_ctl, 2);
                   1806:                DELAY(10000);
                   1807:                CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
                   1808:                DELAY(10000);
                   1809:
                   1810: #ifdef NOTYET
                   1811:                /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
                   1812:                bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
                   1813: #endif
                   1814:                DELAY(40); DELAY(40); DELAY(40);
                   1815:                DELAY(10000);   /* above not quite adequate on 5700 */
                   1816:                return 0;
                   1817:        }
                   1818:
                   1819:
                   1820:        /*
                   1821:         * Entering ACPI power states D1-D3 is achieved by wiggling
                   1822:         * GMII gpio pins. Example code assumes all hardware vendors
                   1823:         * followed Broadom's sample pcb layout. Until we verify that
                   1824:         * for all supported OEM cards, states D1-D3 are  unsupported.
                   1825:         */
                   1826:        printf("%s: power state %d unimplemented; check GPIO pins\n",
                   1827:               sc->bge_dev.dv_xname, powerlevel);
                   1828: #endif
                   1829:        return EOPNOTSUPP;
                   1830: }
                   1831:
                   1832:
                   1833: /*
                   1834:  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
                   1835:  * against our list and return its name if we find a match. Note
                   1836:  * that since the Broadcom controller contains VPD support, we
                   1837:  * can get the device name string from the controller itself instead
                   1838:  * of the compiled-in string. This is a little slow, but it guarantees
                   1839:  * we'll always announce the right product name.
                   1840:  */
                   1841: int
                   1842: bge_probe(parent, match, aux)
                   1843:        struct device *parent;
                   1844:        struct cfdata *match;
                   1845:        void *aux;
                   1846: {
                   1847:        struct pci_attach_args *pa = (struct pci_attach_args *)aux;
                   1848:
                   1849:        if (bge_lookup(pa) != NULL)
                   1850:                return (1);
                   1851:
                   1852:        return (0);
                   1853: }
                   1854:
                   1855: void
                   1856: bge_attach(parent, self, aux)
                   1857:        struct device *parent, *self;
                   1858:        void *aux;
                   1859: {
                   1860:        struct bge_softc        *sc = (struct bge_softc *)self;
                   1861:        struct pci_attach_args  *pa = aux;
                   1862:        const struct bge_product *bp;
                   1863:        const struct bge_revision *br;
                   1864:        pci_chipset_tag_t       pc = pa->pa_pc;
                   1865:        pci_intr_handle_t       ih;
                   1866:        const char              *intrstr = NULL;
                   1867:        bus_dma_segment_t       seg;
                   1868:        int                     rseg;
                   1869:        u_int32_t               hwcfg = 0;
                   1870:        u_int32_t               mac_addr = 0;
                   1871:        u_int32_t               command;
                   1872:        struct ifnet            *ifp;
                   1873:        caddr_t                 kva;
                   1874:        u_char                  eaddr[ETHER_ADDR_LEN];
                   1875:        pcireg_t                memtype;
                   1876:        bus_addr_t              memaddr;
                   1877:        bus_size_t              memsize;
                   1878:        u_int32_t               pm_ctl;
                   1879:
                   1880:        bp = bge_lookup(pa);
                   1881:        KASSERT(bp != NULL);
                   1882:
                   1883:        sc->bge_pa = *pa;
                   1884:
                   1885:        printf(": %s\n", bp->bp_name);
                   1886:
                   1887:        /*
                   1888:         * Map control/status registers.
                   1889:         */
                   1890:        DPRINTFN(5, ("Map control/status regs\n"));
                   1891:        command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
                   1892:        command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
                   1893:        pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
                   1894:        command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
                   1895:
                   1896:        if (!(command & PCI_COMMAND_MEM_ENABLE)) {
                   1897:                printf("%s: failed to enable memory mapping!\n",
                   1898:                    sc->bge_dev.dv_xname);
                   1899:                return;
                   1900:        }
                   1901:
                   1902:        DPRINTFN(5, ("pci_mem_find\n"));
                   1903:        memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
                   1904:        switch (memtype) {
                   1905:         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
                   1906:         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
                   1907:                if (pci_mapreg_map(pa, BGE_PCI_BAR0,
                   1908:                     memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
                   1909:                    &memaddr, &memsize) == 0)
                   1910:                        break;
                   1911:        default:
                   1912:                printf("%s: can't find mem space\n",
                   1913:                    sc->bge_dev.dv_xname);
                   1914:                return;
                   1915:        }
                   1916:
                   1917:        DPRINTFN(5, ("pci_intr_map\n"));
                   1918:        if (pci_intr_map(pa, &ih)) {
                   1919:                printf("%s: couldn't map interrupt\n",
                   1920:                    sc->bge_dev.dv_xname);
                   1921:                return;
                   1922:        }
                   1923:
                   1924:        DPRINTFN(5, ("pci_intr_string\n"));
                   1925:        intrstr = pci_intr_string(pc, ih);
                   1926:
                   1927:        DPRINTFN(5, ("pci_intr_establish\n"));
                   1928:        sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
                   1929:
                   1930:        if (sc->bge_intrhand == NULL) {
                   1931:                printf("%s: couldn't establish interrupt",
                   1932:                    sc->bge_dev.dv_xname);
                   1933:                if (intrstr != NULL)
                   1934:                        printf(" at %s", intrstr);
                   1935:                printf("\n");
                   1936:                return;
                   1937:        }
                   1938:        printf("%s: interrupting at %s\n", sc->bge_dev.dv_xname, intrstr);
                   1939:
                   1940:        /*
                   1941:         * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
                   1942:         * can clobber the chip's PCI config-space power control registers,
                   1943:         * leaving the card in D3 powersave state.
                   1944:         * We do not have memory-mapped registers in this state,
                   1945:         * so force device into D0 state before starting initialization.
                   1946:         */
                   1947:        pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
                   1948:        pm_ctl &= ~7;
                   1949:        pm_ctl |= (1 << 8); /* D0 state */
                   1950:        pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
                   1951:        DELAY(1000);    /* 27 usec is allegedly sufficent */
                   1952:
                   1953:        /* Try to reset the chip. */
                   1954:        DPRINTFN(5, ("bge_reset\n"));
                   1955:        bge_reset(sc);
                   1956:
                   1957:        if (bge_chipinit(sc)) {
                   1958:                printf("%s: chip initialization failed\n",
                   1959:                    sc->bge_dev.dv_xname);
                   1960:                bge_release_resources(sc);
                   1961:                return;
                   1962:        }
                   1963:
                   1964:        /*
                   1965:         * Get station address from the EEPROM.
                   1966:         */
                   1967:        mac_addr = bge_readmem_ind(sc, 0x0c14);
                   1968:        if ((mac_addr >> 16) == 0x484b) {
                   1969:                eaddr[0] = (u_char)(mac_addr >> 8);
                   1970:                eaddr[1] = (u_char)(mac_addr >> 0);
                   1971:                mac_addr = bge_readmem_ind(sc, 0x0c18);
                   1972:                eaddr[2] = (u_char)(mac_addr >> 24);
                   1973:                eaddr[3] = (u_char)(mac_addr >> 16);
                   1974:                eaddr[4] = (u_char)(mac_addr >> 8);
                   1975:                eaddr[5] = (u_char)(mac_addr >> 0);
                   1976:        } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
                   1977:            BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
                   1978:                printf("%s: failed to read station address\n",
                   1979:                    sc->bge_dev.dv_xname);
                   1980:                bge_release_resources(sc);
                   1981:                return;
                   1982:        }
                   1983:
                   1984:        /*
                   1985:         * Save ASIC rev.  Look up any quirks associated with this
                   1986:         * ASIC.
                   1987:         */
                   1988:        sc->bge_asicrev =
                   1989:            pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
                   1990:            BGE_PCIMISCCTL_ASICREV;
                   1991:        br = bge_lookup_rev(sc->bge_asicrev);
                   1992:
                   1993:        printf("%s: ", sc->bge_dev.dv_xname);
                   1994:        if (br == NULL) {
                   1995:                printf("unknown ASIC 0x%08x", sc->bge_asicrev);
                   1996:                sc->bge_quirks = 0;
                   1997:        } else {
                   1998:                printf("ASIC %s", br->br_name);
                   1999:                sc->bge_quirks = br->br_quirks;
                   2000:        }
                   2001:        printf(", Ethernet address %s\n", ether_sprintf(eaddr));
                   2002:
                   2003:        /* Allocate the general information block and ring buffers. */
                   2004:        sc->bge_dmatag = pa->pa_dmat;
                   2005:        DPRINTFN(5, ("bus_dmamem_alloc\n"));
                   2006:        if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
                   2007:                             PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
                   2008:                printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
                   2009:                return;
                   2010:        }
                   2011:        DPRINTFN(5, ("bus_dmamem_map\n"));
                   2012:        if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
                   2013:                           sizeof(struct bge_ring_data), &kva,
                   2014:                           BUS_DMA_NOWAIT)) {
                   2015:                printf("%s: can't map dma buffers (%d bytes)\n",
                   2016:                    sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
                   2017:                bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
                   2018:                return;
                   2019:        }
                   2020:        DPRINTFN(5, ("bus_dmamem_create\n"));
                   2021:        if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
                   2022:            sizeof(struct bge_ring_data), 0,
                   2023:            BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
                   2024:                printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
                   2025:                bus_dmamem_unmap(sc->bge_dmatag, kva,
                   2026:                                 sizeof(struct bge_ring_data));
                   2027:                bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
                   2028:                return;
                   2029:        }
                   2030:        DPRINTFN(5, ("bus_dmamem_load\n"));
                   2031:        if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
                   2032:                            sizeof(struct bge_ring_data), NULL,
                   2033:                            BUS_DMA_NOWAIT)) {
                   2034:                bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
                   2035:                bus_dmamem_unmap(sc->bge_dmatag, kva,
                   2036:                                 sizeof(struct bge_ring_data));
                   2037:                bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
                   2038:                return;
                   2039:        }
                   2040:
                   2041:        DPRINTFN(5, ("bzero\n"));
                   2042:        sc->bge_rdata = (struct bge_ring_data *)kva;
                   2043:
                   2044:        memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
                   2045:
                   2046:        /* Try to allocate memory for jumbo buffers. */
                   2047:        if (bge_alloc_jumbo_mem(sc)) {
                   2048:                printf("%s: jumbo buffer allocation failed\n",
                   2049:                    sc->bge_dev.dv_xname);
                   2050:        } else
                   2051:                sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
                   2052:
                   2053:        /* Set default tuneable values. */
                   2054:        sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
                   2055:        sc->bge_rx_coal_ticks = 150;
                   2056:        sc->bge_rx_max_coal_bds = 64;
                   2057: #ifdef ORIG_WPAUL_VALUES
                   2058:        sc->bge_tx_coal_ticks = 150;
                   2059:        sc->bge_tx_max_coal_bds = 128;
                   2060: #else
                   2061:        sc->bge_tx_coal_ticks = 300;
                   2062:        sc->bge_tx_max_coal_bds = 400;
                   2063: #endif
                   2064:
                   2065:        /* Set up ifnet structure */
                   2066:        ifp = &sc->ethercom.ec_if;
                   2067:        ifp->if_softc = sc;
                   2068:        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
                   2069:        ifp->if_ioctl = bge_ioctl;
                   2070:        ifp->if_start = bge_start;
                   2071:        ifp->if_init = bge_init;
                   2072:        ifp->if_watchdog = bge_watchdog;
                   2073:        IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
                   2074:        IFQ_SET_READY(&ifp->if_snd);
                   2075:        DPRINTFN(5, ("bcopy\n"));
                   2076:        strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
                   2077:
                   2078:        if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
                   2079:                sc->ethercom.ec_if.if_capabilities |=
                   2080:                    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
                   2081:        sc->ethercom.ec_capabilities |=
                   2082:            ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
                   2083:
                   2084:        /*
                   2085:         * Do MII setup.
                   2086:         */
                   2087:        DPRINTFN(5, ("mii setup\n"));
                   2088:        sc->bge_mii.mii_ifp = ifp;
                   2089:        sc->bge_mii.mii_readreg = bge_miibus_readreg;
                   2090:        sc->bge_mii.mii_writereg = bge_miibus_writereg;
                   2091:        sc->bge_mii.mii_statchg = bge_miibus_statchg;
                   2092:
                   2093:        /*
                   2094:         * Figure out what sort of media we have by checking the
1.29.2.5! grant    2095:         * hardware config word in the first 32k of NIC internal memory,
        !          2096:         * or fall back to the config word in the EEPROM. Note: on some BCM5700
1.29.2.2  jmc      2097:         * cards, this value appears to be unset. If that's the
                   2098:         * case, we have to rely on identifying the NIC by its PCI
                   2099:         * subsystem ID, as we do below for the SysKonnect SK-9D41.
                   2100:         */
1.29.2.5! grant    2101:        if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
        !          2102:                hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
        !          2103:        } else {
        !          2104:                bge_read_eeprom(sc, (caddr_t)&hwcfg,
1.29.2.2  jmc      2105:                    BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1.29.2.5! grant    2106:                hwcfg = be32toh(hwcfg);
        !          2107:        }
        !          2108:        if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1.29.2.2  jmc      2109:                sc->bge_tbi = 1;
                   2110:
                   2111:        /* The SysKonnect SK-9D41 is a 1000baseSX card. */
                   2112:        if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
                   2113:            SK_SUBSYSID_9D41)
                   2114:                sc->bge_tbi = 1;
                   2115:
                   2116:        if (sc->bge_tbi) {
                   2117:                ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
                   2118:                    bge_ifmedia_sts);
                   2119:                ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
                   2120:                ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
                   2121:                            0, NULL);
                   2122:                ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
                   2123:                ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
                   2124:        } else {
                   2125:                /*
                   2126:                 * Do transceiver setup.
                   2127:                 */
                   2128:                ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
                   2129:                             bge_ifmedia_sts);
                   2130:                mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
                   2131:                           MII_PHY_ANY, MII_OFFSET_ANY, 0);
                   2132:
                   2133:                if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
                   2134:                        printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
                   2135:                        ifmedia_add(&sc->bge_mii.mii_media,
                   2136:                                    IFM_ETHER|IFM_MANUAL, 0, NULL);
                   2137:                        ifmedia_set(&sc->bge_mii.mii_media,
                   2138:                                    IFM_ETHER|IFM_MANUAL);
                   2139:                } else
                   2140:                        ifmedia_set(&sc->bge_mii.mii_media,
                   2141:                                    IFM_ETHER|IFM_AUTO);
                   2142:        }
                   2143:
                   2144:        /*
                   2145:         * Call MI attach routine.
                   2146:         */
                   2147:        DPRINTFN(5, ("if_attach\n"));
                   2148:        if_attach(ifp);
                   2149:        DPRINTFN(5, ("ether_ifattach\n"));
                   2150:        ether_ifattach(ifp, eaddr);
                   2151:        DPRINTFN(5, ("callout_init\n"));
                   2152:        callout_init(&sc->bge_timeout);
                   2153: }
                   2154:
                   2155: void
                   2156: bge_release_resources(sc)
                   2157:        struct bge_softc *sc;
                   2158: {
                   2159:        if (sc->bge_vpd_prodname != NULL)
                   2160:                free(sc->bge_vpd_prodname, M_DEVBUF);
                   2161:
                   2162:        if (sc->bge_vpd_readonly != NULL)
                   2163:                free(sc->bge_vpd_readonly, M_DEVBUF);
                   2164: }
                   2165:
                   2166: void
                   2167: bge_reset(sc)
                   2168:        struct bge_softc *sc;
                   2169: {
                   2170:        struct pci_attach_args *pa = &sc->bge_pa;
                   2171:        u_int32_t cachesize, command, pcistate;
                   2172:        int i, val = 0;
                   2173:
                   2174:        /* Save some important PCI state. */
                   2175:        cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
                   2176:        command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
                   2177:        pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
                   2178:
                   2179:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
                   2180:            BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
                   2181:            BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
                   2182:
                   2183:        /* Issue global reset */
                   2184:        bge_writereg_ind(sc, BGE_MISC_CFG,
                   2185:            BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
                   2186:
                   2187:        DELAY(1000);
                   2188:
                   2189:        /* Reset some of the PCI state that got zapped by reset */
                   2190:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
                   2191:            BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
                   2192:            BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
                   2193:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
                   2194:        pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
                   2195:        bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
                   2196:
                   2197:        /* Enable memory arbiter. */
                   2198:        CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
                   2199:
                   2200:        /*
                   2201:         * Prevent PXE restart: write a magic number to the
                   2202:         * general communications memory at 0xB50.
                   2203:         */
                   2204:        bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
                   2205:
                   2206:        /*
                   2207:         * Poll the value location we just wrote until
                   2208:         * we see the 1's complement of the magic number.
                   2209:         * This indicates that the firmware initialization
                   2210:         * is complete.
                   2211:         */
                   2212:        for (i = 0; i < 750; i++) {
                   2213:                val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
                   2214:                if (val == ~BGE_MAGIC_NUMBER)
                   2215:                        break;
                   2216:                DELAY(1000);
                   2217:        }
                   2218:
                   2219:        if (i == 750) {
                   2220:                printf("%s: firmware handshake timed out, val = %x\n",
                   2221:                    sc->bge_dev.dv_xname, val);
                   2222:                return;
                   2223:        }
                   2224:
                   2225:        /*
                   2226:         * XXX Wait for the value of the PCISTATE register to
                   2227:         * return to its original pre-reset state. This is a
                   2228:         * fairly good indicator of reset completion. If we don't
                   2229:         * wait for the reset to fully complete, trying to read
                   2230:         * from the device's non-PCI registers may yield garbage
                   2231:         * results.
                   2232:         */
                   2233:        for (i = 0; i < BGE_TIMEOUT; i++) {
                   2234:                if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) ==
                   2235:                    pcistate)
                   2236:                        break;
                   2237:                DELAY(10);
                   2238:        }
                   2239:
                   2240:        /* Enable memory arbiter. */
                   2241:        CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
                   2242:
                   2243:        /* Fix up byte swapping */
                   2244:        CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
                   2245:
                   2246:        CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
                   2247:
                   2248:        DELAY(10000);
                   2249: }
                   2250:
                   2251: /*
                   2252:  * Frame reception handling. This is called if there's a frame
                   2253:  * on the receive return list.
                   2254:  *
                   2255:  * Note: we have to be able to handle two possibilities here:
                   2256:  * 1) the frame is from the jumbo recieve ring
                   2257:  * 2) the frame is from the standard receive ring
                   2258:  */
                   2259:
                   2260: void
                   2261: bge_rxeof(sc)
                   2262:        struct bge_softc *sc;
                   2263: {
                   2264:        struct ifnet *ifp;
                   2265:        int stdcnt = 0, jumbocnt = 0;
                   2266:        int have_tag = 0;
                   2267:        u_int16_t vlan_tag = 0;
                   2268:        bus_dmamap_t dmamap;
                   2269:        bus_addr_t offset, toff;
                   2270:        bus_size_t tlen;
                   2271:        int tosync;
                   2272:
                   2273:        ifp = &sc->ethercom.ec_if;
                   2274:
                   2275:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2276:            offsetof(struct bge_ring_data, bge_status_block),
                   2277:            sizeof (struct bge_status_block),
                   2278:            BUS_DMASYNC_POSTREAD);
                   2279:
                   2280:        offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
                   2281:        tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
                   2282:            sc->bge_rx_saved_considx;
                   2283:
                   2284:        toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
                   2285:
                   2286:        if (tosync < 0) {
                   2287:                tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) *
                   2288:                    sizeof (struct bge_rx_bd);
                   2289:                bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2290:                    toff, tlen, BUS_DMASYNC_POSTREAD);
                   2291:                tosync = -tosync;
                   2292:        }
                   2293:
                   2294:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2295:            offset, tosync * sizeof (struct bge_rx_bd),
                   2296:            BUS_DMASYNC_POSTREAD);
                   2297:
                   2298:        while(sc->bge_rx_saved_considx !=
                   2299:            sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
                   2300:                struct bge_rx_bd        *cur_rx;
                   2301:                u_int32_t               rxidx;
                   2302:                struct mbuf             *m = NULL;
                   2303:
                   2304:                cur_rx = &sc->bge_rdata->
                   2305:                        bge_rx_return_ring[sc->bge_rx_saved_considx];
                   2306:
                   2307:                rxidx = cur_rx->bge_idx;
                   2308:                BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
                   2309:
                   2310:                if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
                   2311:                        have_tag = 1;
                   2312:                        vlan_tag = cur_rx->bge_vlan_tag;
                   2313:                }
                   2314:
                   2315:                if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
                   2316:                        BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
                   2317:                        m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
                   2318:                        sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
                   2319:                        jumbocnt++;
                   2320:                        if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
                   2321:                                ifp->if_ierrors++;
                   2322:                                bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
                   2323:                                continue;
                   2324:                        }
                   2325:                        if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
                   2326:                                             NULL)== ENOBUFS) {
                   2327:                                ifp->if_ierrors++;
                   2328:                                bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
                   2329:                                continue;
                   2330:                        }
                   2331:                } else {
                   2332:                        BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
                   2333:                        m = sc->bge_cdata.bge_rx_std_chain[rxidx];
                   2334:                        sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
                   2335:                        stdcnt++;
                   2336:                        dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
                   2337:                        sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
                   2338:                        if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
                   2339:                                ifp->if_ierrors++;
                   2340:                                bge_newbuf_std(sc, sc->bge_std, m, dmamap);
                   2341:                                continue;
                   2342:                        }
                   2343:                        if (bge_newbuf_std(sc, sc->bge_std,
                   2344:                            NULL, dmamap) == ENOBUFS) {
                   2345:                                ifp->if_ierrors++;
                   2346:                                bge_newbuf_std(sc, sc->bge_std, m, dmamap);
                   2347:                                continue;
                   2348:                        }
                   2349:                }
                   2350:
                   2351:                ifp->if_ipackets++;
                   2352:                m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
                   2353:                m->m_pkthdr.rcvif = ifp;
                   2354:
                   2355: #if NBPFILTER > 0
                   2356:                /*
                   2357:                 * Handle BPF listeners. Let the BPF user see the packet.
                   2358:                 */
                   2359:                if (ifp->if_bpf)
                   2360:                        bpf_mtap(ifp->if_bpf, m);
                   2361: #endif
                   2362:
                   2363:                if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) {
                   2364:                        m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
                   2365:                        if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
                   2366:                                m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
                   2367: #if 0  /* XXX appears to be broken */
                   2368:                        if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
                   2369:                                m->m_pkthdr.csum_data =
                   2370:                                    cur_rx->bge_tcp_udp_csum;
                   2371:                                m->m_pkthdr.csum_flags |=
                   2372:                                    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA);
                   2373:                        }
                   2374: #endif
                   2375:                }
                   2376:
                   2377:                /*
                   2378:                 * If we received a packet with a vlan tag, pass it
                   2379:                 * to vlan_input() instead of ether_input().
                   2380:                 */
                   2381:                if (have_tag) {
                   2382:                        struct mbuf *n;
                   2383:
                   2384:                        n = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
                   2385:                        if (n != NULL) {
                   2386:                                *mtod(n, int *) = vlan_tag;
                   2387:                                n->m_len = sizeof(int);
                   2388:                                have_tag = vlan_tag = 0;
                   2389:                        } else {
                   2390:                                printf("%s: no mbuf for tag\n", ifp->if_xname);
                   2391:                                m_freem(m);
                   2392:                                have_tag = vlan_tag = 0;
                   2393:                                continue;
                   2394:                        }
                   2395:                }
                   2396:                (*ifp->if_input)(ifp, m);
                   2397:        }
                   2398:
                   2399:        CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
                   2400:        if (stdcnt)
                   2401:                CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
                   2402:        if (jumbocnt)
                   2403:                CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
                   2404: }
                   2405:
                   2406: void
                   2407: bge_txeof(sc)
                   2408:        struct bge_softc *sc;
                   2409: {
                   2410:        struct bge_tx_bd *cur_tx = NULL;
                   2411:        struct ifnet *ifp;
                   2412:        struct txdmamap_pool_entry *dma;
                   2413:        bus_addr_t offset, toff;
                   2414:        bus_size_t tlen;
                   2415:        int tosync;
                   2416:        struct mbuf *m;
                   2417:
                   2418:        ifp = &sc->ethercom.ec_if;
                   2419:
                   2420:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2421:            offsetof(struct bge_ring_data, bge_status_block),
                   2422:            sizeof (struct bge_status_block),
                   2423:            BUS_DMASYNC_POSTREAD);
                   2424:
                   2425:        offset = offsetof(struct bge_ring_data, bge_tx_ring);
                   2426:        tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
                   2427:            sc->bge_tx_saved_considx;
                   2428:
                   2429:        toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
                   2430:
                   2431:        if (tosync < 0) {
                   2432:                tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
                   2433:                    sizeof (struct bge_tx_bd);
                   2434:                bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2435:                    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
                   2436:                tosync = -tosync;
                   2437:        }
                   2438:
                   2439:        bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
                   2440:            offset, tosync * sizeof (struct bge_tx_bd),
                   2441:            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
                   2442:
                   2443:        /*
                   2444:         * Go through our tx ring and free mbufs for those
                   2445:         * frames that have been sent.
                   2446:         */
                   2447:        while (sc->bge_tx_saved_considx !=
                   2448:            sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
                   2449:                u_int32_t               idx = 0;
                   2450:
                   2451:                idx = sc->bge_tx_saved_considx;
                   2452:                cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
                   2453:                if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
                   2454:                        ifp->if_opackets++;
                   2455:                m = sc->bge_cdata.bge_tx_chain[idx];
                   2456:                if (m != NULL) {
                   2457:                        sc->bge_cdata.bge_tx_chain[idx] = NULL;
                   2458:                        dma = sc->txdma[idx];
                   2459:                        bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
                   2460:                            dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
                   2461:                        bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
                   2462:                        SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
                   2463:                        sc->txdma[idx] = NULL;
                   2464:
                   2465:                        m_freem(m);
                   2466:                }
                   2467:                sc->bge_txcnt--;
                   2468:                BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
                   2469:                ifp->if_timer = 0;
                   2470:        }
                   2471:
                   2472:        if (cur_tx != NULL)
                   2473:                ifp->if_flags &= ~IFF_OACTIVE;
                   2474: }
                   2475:
                   2476: int
                   2477: bge_intr(xsc)
                   2478:        void *xsc;
                   2479: {
                   2480:        struct bge_softc *sc;
                   2481:        struct ifnet *ifp;
                   2482:
                   2483:        sc = xsc;
                   2484:        ifp = &sc->ethercom.ec_if;
                   2485:
                   2486: #ifdef notdef
                   2487:        /* Avoid this for now -- checking this register is expensive. */
                   2488:        /* Make sure this is really our interrupt. */
                   2489:        if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
                   2490:                return (0);
                   2491: #endif
                   2492:        /* Ack interrupt and stop others from occuring. */
                   2493:        CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
                   2494:
                   2495:        /*
                   2496:         * Process link state changes.
                   2497:         * Grrr. The link status word in the status block does
                   2498:         * not work correctly on the BCM5700 rev AX and BX chips,
                   2499:         * according to all avaibable information. Hence, we have
                   2500:         * to enable MII interrupts in order to properly obtain
                   2501:         * async link changes. Unfortunately, this also means that
                   2502:         * we have to read the MAC status register to detect link
                   2503:         * changes, thereby adding an additional register access to
                   2504:         * the interrupt handler.
                   2505:         */
                   2506:
                   2507:        if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
                   2508:                u_int32_t               status;
                   2509:
                   2510:                status = CSR_READ_4(sc, BGE_MAC_STS);
                   2511:                if (status & BGE_MACSTAT_MI_INTERRUPT) {
                   2512:                        sc->bge_link = 0;
                   2513:                        callout_stop(&sc->bge_timeout);
                   2514:                        bge_tick(sc);
                   2515:                        /* Clear the interrupt */
                   2516:                        CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
                   2517:                            BGE_EVTENB_MI_INTERRUPT);
                   2518:                        bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
                   2519:                        bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
                   2520:                            BRGPHY_INTRS);
                   2521:                }
                   2522:        } else {
                   2523:                if (sc->bge_rdata->bge_status_block.bge_status &
                   2524:                    BGE_STATFLAG_LINKSTATE_CHANGED) {
                   2525:                        sc->bge_link = 0;
                   2526:                        callout_stop(&sc->bge_timeout);
                   2527:                        bge_tick(sc);
                   2528:                        /* Clear the interrupt */
                   2529:                        CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
                   2530:                            BGE_MACSTAT_CFG_CHANGED);
                   2531:                }
                   2532:        }
                   2533:
                   2534:        if (ifp->if_flags & IFF_RUNNING) {
                   2535:                /* Check RX return ring producer/consumer */
                   2536:                bge_rxeof(sc);
                   2537:
                   2538:                /* Check TX ring producer/consumer */
                   2539:                bge_txeof(sc);
                   2540:        }
                   2541:
                   2542:        bge_handle_events(sc);
                   2543:
                   2544:        /* Re-enable interrupts. */
                   2545:        CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
                   2546:
                   2547:        if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
                   2548:                bge_start(ifp);
                   2549:
                   2550:        return (1);
                   2551: }
                   2552:
                   2553: void
                   2554: bge_tick(xsc)
                   2555:        void *xsc;
                   2556: {
                   2557:        struct bge_softc *sc = xsc;
                   2558:        struct mii_data *mii = &sc->bge_mii;
                   2559:        struct ifmedia *ifm = NULL;
                   2560:        struct ifnet *ifp = &sc->ethercom.ec_if;
                   2561:        int s;
                   2562:
                   2563:        s = splnet();
                   2564:
                   2565:        bge_stats_update(sc);
                   2566:        callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
                   2567:        if (sc->bge_link) {
                   2568:                splx(s);
                   2569:                return;
                   2570:        }
                   2571:
                   2572:        if (sc->bge_tbi) {
                   2573:                ifm = &sc->bge_ifmedia;
                   2574:                if (CSR_READ_4(sc, BGE_MAC_STS) &
                   2575:                    BGE_MACSTAT_TBI_PCS_SYNCHED) {
                   2576:                        sc->bge_link++;
                   2577:                        CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
                   2578:                        if (!IFQ_IS_EMPTY(&ifp->if_snd))
                   2579:                                bge_start(ifp);
                   2580:                }
                   2581:                splx(s);
                   2582:                return;
                   2583:        }
                   2584:
                   2585:        mii_tick(mii);
                   2586:
                   2587:        if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
                   2588:            IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
                   2589:                sc->bge_link++;
                   2590:                if (!IFQ_IS_EMPTY(&ifp->if_snd))
                   2591:                        bge_start(ifp);
                   2592:        }
                   2593:
                   2594:        splx(s);
                   2595: }
                   2596:
                   2597: void
                   2598: bge_stats_update(sc)
                   2599:        struct bge_softc *sc;
                   2600: {
                   2601:        struct ifnet *ifp = &sc->ethercom.ec_if;
                   2602:        bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
                   2603:
                   2604: #define READ_STAT(sc, stats, stat) \
                   2605:          CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
                   2606:
                   2607:        ifp->if_collisions +=
                   2608:          (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
                   2609:           READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
                   2610:           READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
                   2611:           READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
                   2612:          ifp->if_collisions;
                   2613:
                   2614: #undef READ_STAT
                   2615:
                   2616: #ifdef notdef
                   2617:        ifp->if_collisions +=
                   2618:           (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
                   2619:           sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
                   2620:           sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
                   2621:           sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
                   2622:           ifp->if_collisions;
                   2623: #endif
                   2624: }
                   2625:
                   2626: /*
                   2627:  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
                   2628:  * pointers to descriptors.
                   2629:  */
                   2630: int
                   2631: bge_encap(sc, m_head, txidx)
                   2632:        struct bge_softc *sc;
                   2633:        struct mbuf *m_head;
                   2634:        u_int32_t *txidx;
                   2635: {
                   2636:        struct bge_tx_bd        *f = NULL;
                   2637:        u_int32_t               frag, cur, cnt = 0;
                   2638:        u_int16_t               csum_flags = 0;
                   2639:        struct txdmamap_pool_entry *dma;
                   2640:        bus_dmamap_t dmamap;
                   2641:        int                     i = 0;
                   2642:        struct mbuf             *n;
                   2643:     struct mbuf *prev, *m;
                   2644:     int                        totlen, prevlen;
                   2645:
                   2646:        cur = frag = *txidx;
                   2647:
                   2648:        if (m_head->m_pkthdr.csum_flags) {
                   2649:                if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
                   2650:                        csum_flags |= BGE_TXBDFLAG_IP_CSUM;
                   2651:                if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
                   2652:                        csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
                   2653:        }
                   2654:
                   2655:        if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
                   2656:         goto doit;
                   2657:        /*
                   2658:         * bcm5700 Revision B silicon cannot handle DMA descriptors with
                   2659:         * less than eight bytes.  If we encounter a teeny mbuf
                   2660:         * at the end of a chain, we can pad.  Otherwise, copy.
                   2661:         */
                   2662:        prev = NULL;
                   2663:        totlen = 0;
                   2664:        for (m = m_head; m != NULL; prev = m,m = m->m_next) {
                   2665:                int mlen = m->m_len;
                   2666:
                   2667:                totlen += mlen;
                   2668:                if (mlen == 0) {
                   2669:                        /* print a warning? */
                   2670:                        continue;
                   2671:                }
                   2672:                if (mlen >= 8)
                   2673:                        continue;
                   2674:
                   2675:                /* If we get here, mbuf data is too small for DMA engine. */
                   2676:                if (m->m_next != 0) {
                   2677:                          /* Internal frag. If fits in prev, copy it there. */
                   2678:                          if (prev && M_TRAILINGSPACE(prev) >= m->m_len &&
                   2679:                   !M_READONLY(prev)) {
                   2680:                                bcopy(m->m_data,
                   2681:                                      prev->m_data+prev->m_len,
                   2682:                                      mlen);
                   2683:                                prev->m_len += mlen;
                   2684:                                m->m_len = 0;
                   2685:                                MFREE(m, prev->m_next); /* XXX stitch chain */
                   2686:                                m = prev;
                   2687:                                continue;
                   2688:                          } else {
                   2689:                                struct mbuf *n;
                   2690:                                /* slow copy */
                   2691: slowcopy:
                   2692:                                n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT);
                   2693:                                m_freem(m_head);
                   2694:                                if (n == 0)
                   2695:                                        return 0;
                   2696:                                m_head  = n;
                   2697:                                goto doit;
                   2698:                          }
                   2699:                } else if ((totlen -mlen +8) >= 1500) {
                   2700:                        goto slowcopy;
                   2701:                }
                   2702:                prevlen = m->m_len;
                   2703:        }
                   2704:
                   2705: doit:
                   2706:        dma = SLIST_FIRST(&sc->txdma_list);
                   2707:        if (dma == NULL)
                   2708:                return ENOBUFS;
                   2709:        dmamap = dma->dmamap;
                   2710:
                   2711:        /*
                   2712:         * Start packing the mbufs in this chain into
                   2713:         * the fragment pointers. Stop when we run out
                   2714:         * of fragments or hit the end of the mbuf chain.
                   2715:         */
                   2716:        if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
                   2717:            BUS_DMA_NOWAIT))
                   2718:                return(ENOBUFS);
                   2719:
                   2720:        n = sc->ethercom.ec_nvlans ?
                   2721:            m_aux_find(m_head, AF_LINK, ETHERTYPE_VLAN) : NULL;
                   2722:
                   2723:        for (i = 0; i < dmamap->dm_nsegs; i++) {
                   2724:                f = &sc->bge_rdata->bge_tx_ring[frag];
                   2725:                if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
                   2726:                        break;
                   2727:                bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
                   2728:                f->bge_len = dmamap->dm_segs[i].ds_len;
                   2729:                f->bge_flags = csum_flags;
                   2730:
                   2731:                if (n != NULL) {
                   2732:                        f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
                   2733:                        f->bge_vlan_tag = *mtod(n, int *);
                   2734:                } else {
                   2735:                        f->bge_vlan_tag = 0;
                   2736:                }
                   2737:                /*
                   2738:                 * Sanity check: avoid coming within 16 descriptors
                   2739:                 * of the end of the ring.
                   2740:                 */
                   2741:                if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
                   2742:                        return(ENOBUFS);
                   2743:                cur = frag;
                   2744:                BGE_INC(frag, BGE_TX_RING_CNT);
                   2745:                cnt++;
                   2746:        }
                   2747:
                   2748:        if (i < dmamap->dm_nsegs)
                   2749:                return ENOBUFS;
                   2750:
                   2751:        bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
                   2752:            BUS_DMASYNC_PREWRITE);
                   2753:
                   2754:        if (frag == sc->bge_tx_saved_considx)
                   2755:                return(ENOBUFS);
                   2756:
                   2757:        sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
                   2758:        sc->bge_cdata.bge_tx_chain[cur] = m_head;
                   2759:        SLIST_REMOVE_HEAD(&sc->txdma_list, link);
                   2760:        sc->txdma[cur] = dma;
                   2761:        sc->bge_txcnt += cnt;
                   2762:
                   2763:        *txidx = frag;
                   2764:
                   2765:        return(0);
                   2766: }
                   2767:
                   2768: /*
                   2769:  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
                   2770:  * to the mbuf data regions directly in the transmit descriptors.
                   2771:  */
                   2772: void
                   2773: bge_start(ifp)
                   2774:        struct ifnet *ifp;
                   2775: {
                   2776:        struct bge_softc *sc;
                   2777:        struct mbuf *m_head = NULL;
                   2778:        u_int32_t prodidx = 0;
                   2779:        int pkts = 0;
                   2780:
                   2781:        sc = ifp->if_softc;
                   2782:
                   2783:        if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
                   2784:                return;
                   2785:
                   2786:        prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
                   2787:
                   2788:        while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
                   2789:                IFQ_POLL(&ifp->if_snd, m_head);
                   2790:                if (m_head == NULL)
                   2791:                        break;
                   2792:
                   2793: #if 0
                   2794:                /*
                   2795:                 * XXX
                   2796:                 * safety overkill.  If this is a fragmented packet chain
                   2797:                 * with delayed TCP/UDP checksums, then only encapsulate
                   2798:                 * it if we have enough descriptors to handle the entire
                   2799:                 * chain at once.
                   2800:                 * (paranoia -- may not actually be needed)
                   2801:                 */
                   2802:                if (m_head->m_flags & M_FIRSTFRAG &&
                   2803:                    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
                   2804:                        if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
                   2805:                            m_head->m_pkthdr.csum_data + 16) {
                   2806:                                ifp->if_flags |= IFF_OACTIVE;
                   2807:                                break;
                   2808:                        }
                   2809:                }
                   2810: #endif
                   2811:
                   2812:                /*
                   2813:                 * Pack the data into the transmit ring. If we
                   2814:                 * don't have room, set the OACTIVE flag and wait
                   2815:                 * for the NIC to drain the ring.
                   2816:                 */
                   2817:                if (bge_encap(sc, m_head, &prodidx)) {
                   2818:                        ifp->if_flags |= IFF_OACTIVE;
                   2819:                        break;
                   2820:                }
                   2821:
                   2822:                /* now we are committed to transmit the packet */
                   2823:                IFQ_DEQUEUE(&ifp->if_snd, m_head);
                   2824:                pkts++;
                   2825:
                   2826: #if NBPFILTER > 0
                   2827:                /*
                   2828:                 * If there's a BPF listener, bounce a copy of this frame
                   2829:                 * to him.
                   2830:                 */
                   2831:                if (ifp->if_bpf)
                   2832:                        bpf_mtap(ifp->if_bpf, m_head);
                   2833: #endif
                   2834:        }
                   2835:        if (pkts == 0)
                   2836:                return;
                   2837:
                   2838:        /* Transmit */
                   2839:        CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
                   2840:     if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)       /* 5700 b2 errata */
                   2841:            CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
                   2842:
                   2843:        /*
                   2844:         * Set a timeout in case the chip goes out to lunch.
                   2845:         */
                   2846:        ifp->if_timer = 5;
                   2847: }
                   2848:
                   2849: int
                   2850: bge_init(ifp)
                   2851:        struct ifnet *ifp;
                   2852: {
                   2853:        struct bge_softc *sc = ifp->if_softc;
                   2854:        u_int16_t *m;
                   2855:        int s, error;
                   2856:
                   2857:        s = splnet();
                   2858:
                   2859:        ifp = &sc->ethercom.ec_if;
                   2860:
                   2861:        /* Cancel pending I/O and flush buffers. */
                   2862:        bge_stop(sc);
                   2863:        bge_reset(sc);
                   2864:        bge_chipinit(sc);
                   2865:
                   2866:        /*
                   2867:         * Init the various state machines, ring
                   2868:         * control blocks and firmware.
                   2869:         */
                   2870:        error = bge_blockinit(sc);
                   2871:        if (error != 0) {
                   2872:                printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
                   2873:                    error);
                   2874:                splx(s);
                   2875:                return error;
                   2876:        }
                   2877:
                   2878:        ifp = &sc->ethercom.ec_if;
                   2879:
                   2880:        /* Specify MTU. */
                   2881:        CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
                   2882:            ETHER_HDR_LEN + ETHER_CRC_LEN);
                   2883:
                   2884:        /* Load our MAC address. */
                   2885:        m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
                   2886:        CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
                   2887:        CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
                   2888:
                   2889:        /* Enable or disable promiscuous mode as needed. */
                   2890:        if (ifp->if_flags & IFF_PROMISC) {
                   2891:                BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
                   2892:        } else {
                   2893:                BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
                   2894:        }
                   2895:
                   2896:        /* Program multicast filter. */
                   2897:        bge_setmulti(sc);
                   2898:
                   2899:        /* Init RX ring. */
                   2900:        bge_init_rx_ring_std(sc);
                   2901:
                   2902:        /* Init jumbo RX ring. */
                   2903:        if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
                   2904:                bge_init_rx_ring_jumbo(sc);
                   2905:
                   2906:        /* Init our RX return ring index */
                   2907:        sc->bge_rx_saved_considx = 0;
                   2908:
                   2909:        /* Init TX ring. */
                   2910:        bge_init_tx_ring(sc);
                   2911:
                   2912:        /* Turn on transmitter */
                   2913:        BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
                   2914:
                   2915:        /* Turn on receiver */
                   2916:        BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
                   2917:
                   2918:        /* Tell firmware we're alive. */
                   2919:        BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
                   2920:
                   2921:        /* Enable host interrupts. */
                   2922:        BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
                   2923:        BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
                   2924:        CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
                   2925:
                   2926:        bge_ifmedia_upd(ifp);
                   2927:
                   2928:        ifp->if_flags |= IFF_RUNNING;
                   2929:        ifp->if_flags &= ~IFF_OACTIVE;
                   2930:
                   2931:        splx(s);
                   2932:
                   2933:        callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
                   2934:
                   2935:        return 0;
                   2936: }
                   2937:
                   2938: /*
                   2939:  * Set media options.
                   2940:  */
                   2941: int
                   2942: bge_ifmedia_upd(ifp)
                   2943:        struct ifnet *ifp;
                   2944: {
                   2945:        struct bge_softc *sc = ifp->if_softc;
                   2946:        struct mii_data *mii = &sc->bge_mii;
                   2947:        struct ifmedia *ifm = &sc->bge_ifmedia;
                   2948:
                   2949:        /* If this is a 1000baseX NIC, enable the TBI port. */
                   2950:        if (sc->bge_tbi) {
                   2951:                if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
                   2952:                        return(EINVAL);
                   2953:                switch(IFM_SUBTYPE(ifm->ifm_media)) {
                   2954:                case IFM_AUTO:
                   2955:                        break;
                   2956:                case IFM_1000_SX:
                   2957:                        if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
                   2958:                                BGE_CLRBIT(sc, BGE_MAC_MODE,
                   2959:                                    BGE_MACMODE_HALF_DUPLEX);
                   2960:                        } else {
                   2961:                                BGE_SETBIT(sc, BGE_MAC_MODE,
                   2962:                                    BGE_MACMODE_HALF_DUPLEX);
                   2963:                        }
                   2964:                        break;
                   2965:                default:
                   2966:                        return(EINVAL);
                   2967:                }
                   2968:                return(0);
                   2969:        }
                   2970:
                   2971:        sc->bge_link = 0;
                   2972:        mii_mediachg(mii);
                   2973:
                   2974:        return(0);
                   2975: }
                   2976:
                   2977: /*
                   2978:  * Report current media status.
                   2979:  */
                   2980: void
                   2981: bge_ifmedia_sts(ifp, ifmr)
                   2982:        struct ifnet *ifp;
                   2983:        struct ifmediareq *ifmr;
                   2984: {
                   2985:        struct bge_softc *sc = ifp->if_softc;
                   2986:        struct mii_data *mii = &sc->bge_mii;
                   2987:
                   2988:        if (sc->bge_tbi) {
                   2989:                ifmr->ifm_status = IFM_AVALID;
                   2990:                ifmr->ifm_active = IFM_ETHER;
                   2991:                if (CSR_READ_4(sc, BGE_MAC_STS) &
                   2992:                    BGE_MACSTAT_TBI_PCS_SYNCHED)
                   2993:                        ifmr->ifm_status |= IFM_ACTIVE;
                   2994:                ifmr->ifm_active |= IFM_1000_SX;
                   2995:                if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
                   2996:                        ifmr->ifm_active |= IFM_HDX;
                   2997:                else
                   2998:                        ifmr->ifm_active |= IFM_FDX;
                   2999:                return;
                   3000:        }
                   3001:
                   3002:        mii_pollstat(mii);
                   3003:        ifmr->ifm_active = mii->mii_media_active;
                   3004:        ifmr->ifm_status = mii->mii_media_status;
                   3005: }
                   3006:
                   3007: int
                   3008: bge_ioctl(ifp, command, data)
                   3009:        struct ifnet *ifp;
                   3010:        u_long command;
                   3011:        caddr_t data;
                   3012: {
                   3013:        struct bge_softc *sc = ifp->if_softc;
                   3014:        struct ifreq *ifr = (struct ifreq *) data;
                   3015:        int s, error = 0;
                   3016:        struct mii_data *mii;
                   3017:
                   3018:        s = splnet();
                   3019:
                   3020:        switch(command) {
                   3021:        case SIOCSIFFLAGS:
                   3022:                if (ifp->if_flags & IFF_UP) {
                   3023:                        /*
                   3024:                         * If only the state of the PROMISC flag changed,
                   3025:                         * then just use the 'set promisc mode' command
                   3026:                         * instead of reinitializing the entire NIC. Doing
                   3027:                         * a full re-init means reloading the firmware and
                   3028:                         * waiting for it to start up, which may take a
                   3029:                         * second or two.
                   3030:                         */
                   3031:                        if (ifp->if_flags & IFF_RUNNING &&
                   3032:                            ifp->if_flags & IFF_PROMISC &&
                   3033:                            !(sc->bge_if_flags & IFF_PROMISC)) {
                   3034:                                BGE_SETBIT(sc, BGE_RX_MODE,
                   3035:                                    BGE_RXMODE_RX_PROMISC);
                   3036:                        } else if (ifp->if_flags & IFF_RUNNING &&
                   3037:                            !(ifp->if_flags & IFF_PROMISC) &&
                   3038:                            sc->bge_if_flags & IFF_PROMISC) {
                   3039:                                BGE_CLRBIT(sc, BGE_RX_MODE,
                   3040:                                    BGE_RXMODE_RX_PROMISC);
                   3041:                        } else
                   3042:                                bge_init(ifp);
                   3043:                } else {
                   3044:                        if (ifp->if_flags & IFF_RUNNING) {
                   3045:                                bge_stop(sc);
                   3046:                        }
                   3047:                }
                   3048:                sc->bge_if_flags = ifp->if_flags;
                   3049:                error = 0;
                   3050:                break;
                   3051:        case SIOCSIFMEDIA:
                   3052:        case SIOCGIFMEDIA:
                   3053:                if (sc->bge_tbi) {
                   3054:                        error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
                   3055:                            command);
                   3056:                } else {
                   3057:                        mii = &sc->bge_mii;
                   3058:                        error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
                   3059:                            command);
                   3060:                }
                   3061:                error = 0;
                   3062:                break;
                   3063:        default:
                   3064:                error = ether_ioctl(ifp, command, data);
                   3065:                if (error == ENETRESET) {
                   3066:                        bge_setmulti(sc);
                   3067:                        error = 0;
                   3068:                }
                   3069:                break;
                   3070:        }
                   3071:
                   3072:        splx(s);
                   3073:
                   3074:        return(error);
                   3075: }
                   3076:
                   3077: void
                   3078: bge_watchdog(ifp)
                   3079:        struct ifnet *ifp;
                   3080: {
                   3081:        struct bge_softc *sc;
                   3082:
                   3083:        sc = ifp->if_softc;
                   3084:
                   3085:        printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
                   3086:
                   3087:        ifp->if_flags &= ~IFF_RUNNING;
                   3088:        bge_init(ifp);
                   3089:
                   3090:        ifp->if_oerrors++;
                   3091: }
                   3092:
                   3093: static void
                   3094: bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
                   3095: {
                   3096:        int i;
                   3097:
                   3098:        BGE_CLRBIT(sc, reg, bit);
                   3099:
                   3100:        for (i = 0; i < BGE_TIMEOUT; i++) {
                   3101:                if ((CSR_READ_4(sc, reg) & bit) == 0)
                   3102:                        return;
                   3103:                delay(100);
                   3104:        }
                   3105:
                   3106:        printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
                   3107:            sc->bge_dev.dv_xname, (u_long) reg, bit);
                   3108: }
                   3109:
                   3110: /*
                   3111:  * Stop the adapter and free any mbufs allocated to the
                   3112:  * RX and TX lists.
                   3113:  */
                   3114: void
                   3115: bge_stop(sc)
                   3116:        struct bge_softc *sc;
                   3117: {
                   3118:        struct ifnet *ifp = &sc->ethercom.ec_if;
                   3119:
                   3120:        callout_stop(&sc->bge_timeout);
                   3121:
                   3122:        /*
                   3123:         * Disable all of the receiver blocks
                   3124:         */
                   3125:        bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
                   3126:        bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
                   3127:        bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
                   3128:        bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
                   3129:        bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
                   3130:        bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
                   3131:        bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
                   3132:
                   3133:        /*
                   3134:         * Disable all of the transmit blocks
                   3135:         */
                   3136:        bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
                   3137:        bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
                   3138:        bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
                   3139:        bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
                   3140:        bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
                   3141:        bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
                   3142:        bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
                   3143:
                   3144:        /*
                   3145:         * Shut down all of the memory managers and related
                   3146:         * state machines.
                   3147:         */
                   3148:        bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
                   3149:        bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
                   3150:        bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
                   3151:
                   3152:        CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
                   3153:        CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
                   3154:
                   3155:        bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
                   3156:        bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
                   3157:
                   3158:        /* Disable host interrupts. */
                   3159:        BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
                   3160:        CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
                   3161:
                   3162:        /*
                   3163:         * Tell firmware we're shutting down.
                   3164:         */
                   3165:        BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
                   3166:
                   3167:        /* Free the RX lists. */
                   3168:        bge_free_rx_ring_std(sc);
                   3169:
                   3170:        /* Free jumbo RX list. */
                   3171:        bge_free_rx_ring_jumbo(sc);
                   3172:
                   3173:        /* Free TX buffers. */
                   3174:        bge_free_tx_ring(sc);
                   3175:
                   3176:        /*
                   3177:         * Isolate/power down the PHY.
                   3178:         */
                   3179:        if (!sc->bge_tbi)
                   3180:                mii_down(&sc->bge_mii);
                   3181:
                   3182:        sc->bge_link = 0;
                   3183:
                   3184:        sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
                   3185:
                   3186:        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
                   3187: }
                   3188:
                   3189: /*
                   3190:  * Stop all chip I/O so that the kernel's probe routines don't
                   3191:  * get confused by errant DMAs when rebooting.
                   3192:  */
                   3193: void
                   3194: bge_shutdown(xsc)
                   3195:        void *xsc;
                   3196: {
                   3197:        struct bge_softc *sc = (struct bge_softc *)xsc;
                   3198:
                   3199:        bge_stop(sc);
                   3200:        bge_reset(sc);
                   3201: }

CVSweb <webmaster@jp.NetBSD.org>