Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/dev/pci/if_wm.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/dev/pci/if_wm.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.508 retrieving revision 1.508.4.36 diff -u -p -r1.508 -r1.508.4.36 --- src/sys/dev/pci/if_wm.c 2017/04/13 10:37:36 1.508 +++ src/sys/dev/pci/if_wm.c 2019/11/06 10:23:06 1.508.4.36 @@ -1,4 +1,4 @@ -/* $NetBSD: if_wm.c,v 1.508 2017/04/13 10:37:36 knakahara Exp $ */ +/* $NetBSD: if_wm.c,v 1.508.4.36 2019/11/06 10:23:06 martin Exp $ */ /* * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. @@ -73,18 +73,16 @@ * TODO (in order of importance): * * - Check XXX'ed comments - * - Disable D0 LPLU on 8257[12356], 82580 and I350. * - TX Multi queue improvement (refine queue selection logic) * - Split header buffer for newer descriptors * - EEE (Energy Efficiency Ethernet) * - Virtual Function * - Set LED correctly (based on contents in EEPROM) * - Rework how parameters are loaded from the EEPROM. - * - Image Unique ID */ #include -__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508 2017/04/13 10:37:36 knakahara Exp $"); +__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.508.4.36 2019/11/06 10:23:06 martin Exp $"); #ifdef _KERNEL_OPT #include "opt_net_mpsafe.h" @@ -117,6 +115,8 @@ __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1. #include +#include + #include /* XXX for struct ip */ #include /* XXX for struct ip */ #include /* XXX for struct ip */ @@ -135,6 +135,7 @@ __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1. #include #include #include +#include #include #include @@ -155,9 +156,9 @@ __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1. int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK; -#define DPRINTF(x, y) if (wm_debug & (x)) printf y +#define DPRINTF(x, y) do { if (wm_debug & (x)) printf y; } while (0) #else -#define DPRINTF(x, y) /* nothing */ +#define DPRINTF(x, y) __nothing #endif /* WM_DEBUG */ #ifdef NET_MPSAFE @@ -183,18 +184,24 @@ int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX int wm_disable_msi = WM_DISABLE_MSI; int wm_disable_msix = WM_DISABLE_MSIX; +#ifndef WM_WATCHDOG_TIMEOUT +#define WM_WATCHDOG_TIMEOUT 5 +#endif +static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; + /* * Transmit descriptor list size. Due to errata, we can only have * 256 hardware descriptors in the ring on < 82544, but we use 4096 - * on >= 82544. We tell the upper layers that they can queue a lot + * on >= 82544. We tell the upper layers that they can queue a lot * of packets, and we go ahead and manage up to 64 (16 for the i82547) * of them at a time. * - * We allow up to 256 (!) DMA segments per packet. Pathological packet + * We allow up to 64 DMA segments per packet. Pathological packet * chains containing many small mbufs have been observed in zero-copy - * situations with jumbo frames. + * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, + * m_defrag() is called to reduce it. */ -#define WM_NTXSEGS 256 +#define WM_NTXSEGS 64 #define WM_IFQUEUELEN 256 #define WM_TXQUEUELEN_MAX 64 #define WM_TXQUEUELEN_MAX_82547 16 @@ -213,13 +220,20 @@ int wm_disable_msix = WM_DISABLE_MSIX; #define WM_TXINTERQSIZE 256 +#ifndef WM_TX_PROCESS_LIMIT_DEFAULT +#define WM_TX_PROCESS_LIMIT_DEFAULT 100U +#endif +#ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT +#define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U +#endif + /* * Receive descriptor list size. We have one Rx buffer for normal * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized * packet. We allocate 256 receive descriptors, each with a 2k * buffer (MCLBYTES), which gives us room for 50 jumbo packets. */ -#define WM_NRXDESC 256 +#define WM_NRXDESC 256U #define WM_NRXDESC_MASK (WM_NRXDESC - 1) #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) @@ -233,13 +247,13 @@ int wm_disable_msix = WM_DISABLE_MSIX; typedef union txdescs { wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; - nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; + nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; } txdescs_t; typedef union rxdescs { wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; - ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ - nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ + ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ + nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ } rxdescs_t; #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) @@ -257,9 +271,9 @@ struct wm_txsoft { }; /* - * Software state for receive buffers. Each descriptor gets a - * 2k (MCLBYTES) buffer and a DMA map. For packets which fill - * more than one buffer, we chain them together. + * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) + * buffer and a DMA map. For packets which fill more than one buffer, we chain + * them together. */ struct wm_rxsoft { struct mbuf *rxs_mbuf; /* head of our mbuf chain */ @@ -287,14 +301,14 @@ struct wm_softc; struct evcnt qname##_ev_##evname; #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ - do{ \ + do { \ snprintf((q)->qname##_##evname##_evcnt_name, \ sizeof((q)->qname##_##evname##_evcnt_name), \ "%s%02d%s", #qname, (qnum), #evname); \ evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ (evtype), NULL, (xname), \ (q)->qname##_##evname##_evcnt_name); \ - }while(0) + } while (0) #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) @@ -319,7 +333,7 @@ struct wm_txqueue { int txq_ndesc; /* must be a power of two */ size_t txq_descsize; /* a tx descriptor size */ txdescs_t *txq_descs_u; - bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ + bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ bus_dma_segment_t txq_desc_seg; /* control data segment */ int txq_desc_rseg; /* real number of control segment */ #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr @@ -356,26 +370,33 @@ struct wm_txqueue { bool txq_stopping; + bool txq_sending; + time_t txq_lastsent; + uint32_t txq_packets; /* for AIM */ uint32_t txq_bytes; /* for AIM */ #ifdef WM_EVENT_COUNTERS - WM_Q_EVCNT_DEFINE(txq, txsstall) /* Tx stalled due to no txs */ - WM_Q_EVCNT_DEFINE(txq, txdstall) /* Tx stalled due to no txd */ - WM_Q_EVCNT_DEFINE(txq, txfifo_stall) /* Tx FIFO stalls (82547) */ - WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ - WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ - /* XXX not used? */ - - WM_Q_EVCNT_DEFINE(txq, txipsum) /* IP checksums comp. out-bound */ - WM_Q_EVCNT_DEFINE(txq,txtusum) /* TCP/UDP cksums comp. out-bound */ - WM_Q_EVCNT_DEFINE(txq, txtusum6) /* TCP/UDP v6 cksums comp. out-bound */ - WM_Q_EVCNT_DEFINE(txq, txtso) /* TCP seg offload out-bound (IPv4) */ - WM_Q_EVCNT_DEFINE(txq, txtso6) /* TCP seg offload out-bound (IPv6) */ - WM_Q_EVCNT_DEFINE(txq, txtsopain) /* painful header manip. for TSO */ - - WM_Q_EVCNT_DEFINE(txq, txdrop) /* Tx packets dropped(too many segs) */ - - WM_Q_EVCNT_DEFINE(txq, tu) /* Tx underrun */ + /* TX event counters */ + WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ + WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ + WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ + WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ + WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ + /* XXX not used? */ + + WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ + WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ + WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ + WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ + WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ + WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ + WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ + WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ + /* other than toomanyseg */ + + WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ + WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ + WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ @@ -416,15 +437,17 @@ struct wm_rxqueue { uint32_t rxq_packets; /* for AIM */ uint32_t rxq_bytes; /* for AIM */ #ifdef WM_EVENT_COUNTERS - WM_Q_EVCNT_DEFINE(rxq, rxintr); /* Rx interrupts */ + /* RX event counters */ + WM_Q_EVCNT_DEFINE(rxq, intr); /* Interrupts */ + WM_Q_EVCNT_DEFINE(rxq, defer); /* Rx deferred processing */ - WM_Q_EVCNT_DEFINE(rxq, rxipsum); /* IP checksums checked in-bound */ - WM_Q_EVCNT_DEFINE(rxq, rxtusum); /* TCP/UDP cksums checked in-bound */ + WM_Q_EVCNT_DEFINE(rxq, ipsum); /* IP checksums checked */ + WM_Q_EVCNT_DEFINE(rxq, tusum); /* TCP/UDP cksums checked */ #endif }; struct wm_queue { - int wmq_id; /* index of transmit and receive queues */ + int wmq_id; /* index of TX/RX queues */ int wmq_intr_idx; /* index of MSI-X tables */ uint32_t wmq_itr; /* interrupt interval per queue. */ @@ -434,14 +457,23 @@ struct wm_queue { struct wm_rxqueue wmq_rxq; void *wmq_si; + krndsource_t rnd_source; /* random source */ }; struct wm_phyop { int (*acquire)(struct wm_softc *); void (*release)(struct wm_softc *); + int (*readreg_locked)(device_t, int, int, uint16_t *); + int (*writereg_locked)(device_t, int, int, uint16_t); int reset_delay_us; }; +struct wm_nvmop { + int (*acquire)(struct wm_softc *); + void (*release)(struct wm_softc *); + int (*read)(struct wm_softc *, int, int, uint16_t *); +}; + /* * Software state per device. */ @@ -512,8 +544,10 @@ struct wm_softc { int sc_nqueues; struct wm_queue *sc_queue; - u_int sc_rx_process_limit; /* Rx processing repeat limit in softint */ - u_int sc_rx_intr_process_limit; /* Rx processing repeat limit in H/W intr */ + u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */ + u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */ + u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */ + u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */ int sc_affinity_offset; @@ -521,7 +555,7 @@ struct wm_softc { /* Event counters. */ struct evcnt sc_ev_linkintr; /* Link interrupts */ - /* WM_T_82542_2_1 only */ + /* WM_T_82542_2_1 only */ struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ @@ -551,8 +585,6 @@ struct wm_softc { int sc_mchash_type; /* multicast filter offset */ - krndsource_t rnd_source; /* random source */ - struct if_percpuq *sc_ipq; /* softint-based input queues */ kmutex_t *sc_core_lock; /* lock for softc operations */ @@ -564,11 +596,15 @@ struct wm_softc { kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ struct wm_phyop phy; + struct wm_nvmop nvm; }; -#define WM_CORE_LOCK(_sc) if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) -#define WM_CORE_UNLOCK(_sc) if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) -#define WM_CORE_LOCKED(_sc) (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) +#define WM_CORE_LOCK(_sc) \ + if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) +#define WM_CORE_UNLOCK(_sc) \ + if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) +#define WM_CORE_LOCKED(_sc) \ + (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) #define WM_RXCHAIN_RESET(rxq) \ do { \ @@ -604,7 +640,7 @@ do { \ #define CSR_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) #define CSR_WRITE_FLUSH(sc) \ - (void) CSR_READ((sc), WMREG_STATUS) + (void)CSR_READ((sc), WMREG_STATUS) #define ICH8_FLASH_READ32(sc, reg) \ bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ @@ -642,7 +678,7 @@ static inline uint32_t wm_io_read(struct #endif static inline void wm_io_write(struct wm_softc *, int, uint32_t); static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, - uint32_t, uint32_t); + uint32_t, uint32_t); static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); /* @@ -663,7 +699,10 @@ static int wm_detach(device_t, int); static bool wm_suspend(device_t, const pmf_qual_t *); static bool wm_resume(device_t, const pmf_qual_t *); static void wm_watchdog(struct ifnet *); -static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *); +static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, + uint16_t *); +static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, + uint16_t *); static void wm_tick(void *); static int wm_ifflags_cb(struct ethercom *); static int wm_ioctl(struct ifnet *, u_long, void *); @@ -672,6 +711,7 @@ static uint16_t wm_check_alt_mac_addr(st static int wm_read_mac_addr(struct wm_softc *, uint8_t *); static void wm_set_ral(struct wm_softc *, const uint8_t *, int); static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); +static int wm_rar_count(struct wm_softc *); static void wm_set_filter(struct wm_softc *); /* Reset and init related */ static void wm_set_vlan(struct wm_softc *); @@ -679,14 +719,17 @@ static void wm_set_pcie_completion_timeo static void wm_get_auto_rd_done(struct wm_softc *); static void wm_lan_init_done(struct wm_softc *); static void wm_get_cfg_done(struct wm_softc *); +static void wm_phy_post_reset(struct wm_softc *); +static int wm_write_smbus_addr(struct wm_softc *); +static void wm_init_lcd_from_nvm(struct wm_softc *); +static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); static void wm_initialize_hardware_bits(struct wm_softc *); static uint32_t wm_rxpbs_adjust_82580(uint32_t); -static void wm_reset_phy(struct wm_softc *); +static int wm_reset_phy(struct wm_softc *); static void wm_flush_desc_rings(struct wm_softc *); static void wm_reset(struct wm_softc *); static int wm_add_rxbuf(struct wm_rxqueue *, int); static void wm_rxdrain(struct wm_rxqueue *); -static void wm_rss_getkey(uint8_t *); static void wm_init_rss(struct wm_softc *); static void wm_adjust_qnum(struct wm_softc *, int); static inline bool wm_is_using_msix(struct wm_softc *); @@ -696,8 +739,8 @@ static int wm_setup_legacy(struct wm_sof static int wm_setup_msix(struct wm_softc *); static int wm_init(struct ifnet *); static int wm_init_locked(struct ifnet *); -static void wm_turnon(struct wm_softc *); -static void wm_turnoff(struct wm_softc *); +static void wm_unset_stopping_flags(struct wm_softc *); +static void wm_set_stopping_flags(struct wm_softc *); static void wm_stop(struct ifnet *, int); static void wm_stop_locked(struct ifnet *, int); static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); @@ -735,19 +778,21 @@ static void wm_start(struct ifnet *); static void wm_start_locked(struct ifnet *); static int wm_transmit(struct ifnet *, struct mbuf *); static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); -static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); +static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, + bool); static int wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, struct wm_txsoft *, uint32_t *, uint32_t *, bool *); static void wm_nq_start(struct ifnet *); static void wm_nq_start_locked(struct ifnet *); static int wm_nq_transmit(struct ifnet *, struct mbuf *); static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); -static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); +static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, + bool); static void wm_deferred_start_locked(struct wm_txqueue *); static void wm_handle_queue(void *); /* Interrupt */ -static int wm_txeof(struct wm_softc *, struct wm_txqueue *); -static void wm_rxeof(struct wm_rxqueue *, u_int); +static bool wm_txeof(struct wm_txqueue *, u_int); +static bool wm_rxeof(struct wm_rxqueue *, u_int); static void wm_linkintr_gmii(struct wm_softc *, uint32_t); static void wm_linkintr_tbi(struct wm_softc *, uint32_t); static void wm_linkintr_serdes(struct wm_softc *, uint32_t); @@ -767,7 +812,7 @@ static int wm_linkintr_msix(void *); static void wm_tbi_serdes_set_linkled(struct wm_softc *); /* GMII related */ static void wm_gmii_reset(struct wm_softc *); -static void wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t); +static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); static int wm_get_phy_id_82575(struct wm_softc *); static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); static int wm_gmii_mediachange(struct ifnet *); @@ -779,16 +824,21 @@ static void wm_gmii_i82543_writereg(devi static int wm_gmii_mdic_readreg(device_t, int, int); static void wm_gmii_mdic_writereg(device_t, int, int, int); static int wm_gmii_i82544_readreg(device_t, int, int); +static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); static void wm_gmii_i82544_writereg(device_t, int, int, int); +static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); static int wm_gmii_i80003_readreg(device_t, int, int); static void wm_gmii_i80003_writereg(device_t, int, int, int); static int wm_gmii_bm_readreg(device_t, int, int); static void wm_gmii_bm_writereg(device_t, int, int, int); -static void wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int); +static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); +static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); +static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, + bool); static int wm_gmii_hv_readreg(device_t, int, int); -static int wm_gmii_hv_readreg_locked(device_t, int, int); +static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); static void wm_gmii_hv_writereg(device_t, int, int, int); -static void wm_gmii_hv_writereg_locked(device_t, int, int, int); +static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); static int wm_gmii_82580_readreg(device_t, int, int); static void wm_gmii_82580_writereg(device_t, int, int, int); static int wm_gmii_gs40g_readreg(device_t, int, int); @@ -799,15 +849,16 @@ static void wm_gmii_statchg(struct ifnet * These functions are not for accessing MII registers but for accessing * kumeran specific registers. */ -static int wm_kmrn_readreg(struct wm_softc *, int); -static int wm_kmrn_readreg_locked(struct wm_softc *, int); -static void wm_kmrn_writereg(struct wm_softc *, int, int); -static void wm_kmrn_writereg_locked(struct wm_softc *, int, int); +static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); +static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); +static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); +static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); /* SGMII */ static bool wm_sgmii_uses_mdio(struct wm_softc *); static int wm_sgmii_readreg(device_t, int, int); static void wm_sgmii_writereg(device_t, int, int, int); /* TBI related */ +static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); static void wm_tbi_mediainit(struct wm_softc *); static int wm_tbi_mediachange(struct ifnet *); static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); @@ -844,7 +895,7 @@ static int wm_nvm_valid_bank_detect_ich8 static int32_t wm_ich8_cycle_init(struct wm_softc *); static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, - uint32_t *); + uint32_t *); static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); @@ -854,10 +905,8 @@ static int wm_nvm_read_spt(struct wm_sof static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); /* Lock, detecting NVM type, validate checksum and read */ -static int wm_nvm_acquire(struct wm_softc *); -static void wm_nvm_release(struct wm_softc *); static int wm_nvm_is_onboard_eeprom(struct wm_softc *); -static int wm_nvm_get_flash_presence_i210(struct wm_softc *); +static int wm_nvm_flash_presence_i210(struct wm_softc *); static int wm_nvm_validate_checksum(struct wm_softc *); static void wm_nvm_version_invm(struct wm_softc *); static void wm_nvm_version(struct wm_softc *); @@ -869,17 +918,23 @@ static int wm_nvm_read(struct wm_softc * */ static int wm_get_null(struct wm_softc *); static void wm_put_null(struct wm_softc *); +static int wm_get_eecd(struct wm_softc *); +static void wm_put_eecd(struct wm_softc *); static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ static void wm_put_swsm_semaphore(struct wm_softc *); static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); +static int wm_get_nvm_80003(struct wm_softc *); +static void wm_put_nvm_80003(struct wm_softc *); +static int wm_get_nvm_82571(struct wm_softc *); +static void wm_put_nvm_82571(struct wm_softc *); static int wm_get_phy_82575(struct wm_softc *); static void wm_put_phy_82575(struct wm_softc *); static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ static void wm_put_swfwhw_semaphore(struct wm_softc *); static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ static void wm_put_swflag_ich8lan(struct wm_softc *); -static int wm_get_nvm_ich8lan(struct wm_softc *); /* For NVM */ +static int wm_get_nvm_ich8lan(struct wm_softc *); static void wm_put_nvm_ich8lan(struct wm_softc *); static int wm_get_hw_semaphore_82573(struct wm_softc *); static void wm_put_hw_semaphore_82573(struct wm_softc *); @@ -899,17 +954,19 @@ static bool wm_phy_resetisblocked(struct static void wm_get_hw_control(struct wm_softc *); static void wm_release_hw_control(struct wm_softc *); static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); -static void wm_smbustopci(struct wm_softc *); +static int wm_init_phy_workarounds_pchlan(struct wm_softc *); static void wm_init_manageability(struct wm_softc *); static void wm_release_manageability(struct wm_softc *); static void wm_get_wakeup(struct wm_softc *); -static void wm_ulp_disable(struct wm_softc *); -static void wm_enable_phy_wakeup(struct wm_softc *); +static int wm_ulp_disable(struct wm_softc *); +static int wm_enable_phy_wakeup(struct wm_softc *); static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); +static void wm_suspend_workarounds_ich8lan(struct wm_softc *); +static int wm_resume_workarounds_pchlan(struct wm_softc *); static void wm_enable_wakeup(struct wm_softc *); +static void wm_disable_aspm(struct wm_softc *); /* LPLU (Low Power Link Up) */ static void wm_lplu_d0_disable(struct wm_softc *); -static void wm_lplu_d0_disable_pch(struct wm_softc *); /* EEE */ static void wm_set_eee_i350(struct wm_softc *); @@ -919,9 +976,13 @@ static void wm_set_eee_i350(struct wm_so */ static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); -static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); -static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); +static void wm_hv_phy_workarounds_ich8lan(struct wm_softc *); +static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); +static void wm_lv_phy_workarounds_ich8lan(struct wm_softc *); +static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); static int wm_k1_gig_workaround_hv(struct wm_softc *, int); +static int wm_k1_workaround_lv(struct wm_softc *); +static int wm_link_stall_workaround_hv(struct wm_softc *); static void wm_set_mdio_slow_mode_hv(struct wm_softc *); static void wm_configure_k1_ich8lan(struct wm_softc *, int); static void wm_reset_init_script_82575(struct wm_softc *); @@ -930,6 +991,7 @@ static bool wm_phy_is_accessible_pchlan( static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); static void wm_pll_workaround_i210(struct wm_softc *); +static void wm_legacy_irq_quirk_spt(struct wm_softc *); CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); @@ -1404,7 +1466,7 @@ static const struct wm_product { WM_T_I210, WMP_F_COPPER }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF, - "I210 Ethernet (FLASH less)", + "I210 Ethernet (Copper, FLASH less)", WM_T_I210, WMP_F_COPPER }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, @@ -1416,13 +1478,17 @@ static const struct wm_product { WM_T_I210, WMP_F_SERDES }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF, - "I210 Gigabit Ethernet (FLASH less)", + "I210 Gigabit Ethernet (SERDES, FLASH less)", WM_T_I210, WMP_F_SERDES }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, "I210 Gigabit Ethernet (SGMII)", WM_T_I210, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF, + "I210 Gigabit Ethernet (SGMII, FLASH less)", + WM_T_I210, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, "I211 Ethernet (COPPER)", WM_T_I211, WMP_F_COPPER }, @@ -1450,19 +1516,6 @@ static const struct wm_product { { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3, "I218 LM Ethernet Connection", WM_T_PCH_LPT, WMP_F_COPPER }, -#if 0 - { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, - "I219 V Ethernet Connection", - WM_T_PCH_SPT, WMP_F_COPPER }, - { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, - "I219 V Ethernet Connection", - WM_T_PCH_SPT, WMP_F_COPPER }, - { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, - "I219 V Ethernet Connection", - WM_T_PCH_SPT, WMP_F_COPPER }, - { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, - "I219 V Ethernet Connection", - WM_T_PCH_SPT, WMP_F_COPPER }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM, "I219 LM Ethernet Connection", WM_T_PCH_SPT, WMP_F_COPPER }, @@ -1478,7 +1531,42 @@ static const struct wm_product { { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5, "I219 LM Ethernet Connection", WM_T_PCH_SPT, WMP_F_COPPER }, -#endif + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6, + "I219 LM Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7, + "I219 LM Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8, + "I219 LM Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9, + "I219 LM Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, + "I219 V Ethernet Connection", + WM_T_PCH_SPT, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, + "I219 V Ethernet Connection", + WM_T_PCH_SPT, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, + "I219 V Ethernet Connection", + WM_T_PCH_SPT, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, + "I219 V Ethernet Connection", + WM_T_PCH_SPT, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6, + "I219 V Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7, + "I219 V Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8, + "I219 V Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9, + "I219 V Ethernet Connection", + WM_T_PCH_CNP, WMP_F_COPPER }, { 0, 0, NULL, 0, 0 }, @@ -1597,13 +1685,13 @@ wm_init_rxdesc(struct wm_rxqueue *rxq, i if (sc->sc_type == WM_T_82574) { ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start]; rxd->erx_data.erxd_addr = - htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); + htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); rxd->erx_data.erxd_dd = 0; } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start]; rxd->nqrx_data.nrxd_paddr = - htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); + htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); /* Currently, split header is not supported. */ rxd->nqrx_data.nrxd_haddr = 0; } else { @@ -1674,6 +1762,7 @@ wm_attach(device_t parent, device_t self prop_data_t ea; prop_number_t pn; uint8_t enaddr[ETHER_ADDR_LEN]; + char buf[256]; uint16_t cfg1, cfg2, swdpin, nvmword; pcireg_t preg, memtype; uint16_t eeprom_data, apme_mask; @@ -1709,8 +1798,8 @@ wm_attach(device_t parent, device_t self sc->sc_type = wmp->wmp_type; /* Set default function pointers */ - sc->phy.acquire = wm_get_null; - sc->phy.release = wm_put_null; + sc->phy.acquire = sc->nvm.acquire = wm_get_null; + sc->phy.release = sc->nvm.release = wm_put_null; sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; if (sc->sc_type < WM_T_82543) { @@ -1726,7 +1815,7 @@ wm_attach(device_t parent, device_t self /* * Disable MSI for Errata: * "Message Signaled Interrupt Feature May Corrupt Write Transactions" - * + * * 82544: Errata 25 * 82540: Errata 6 (easy to reproduce device timeout) * 82545: Errata 4 (easy to reproduce device timeout) @@ -1760,7 +1849,7 @@ wm_attach(device_t parent, device_t self case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, - memtype, 0, &memt, &memh, NULL, &memsize) == 0); + memtype, 0, &memt, &memh, NULL, &memsize) == 0); break; default: memh_valid = 0; @@ -1812,10 +1901,9 @@ wm_attach(device_t parent, device_t self 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios) == 0) { sc->sc_flags |= WM_F_IOH_VALID; - } else { + } else aprint_error_dev(sc->sc_dev, "WARNING: unable to map I/O space\n"); - } } } @@ -1827,18 +1915,33 @@ wm_attach(device_t parent, device_t self preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); - /* power up chip */ - if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, - NULL)) && error != EOPNOTSUPP) { + /* Power up chip */ + if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) + && error != EOPNOTSUPP) { aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); return; } wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); + /* + * Don't use MSI-X if we can use only one queue to save interrupt + * resource. + */ + if (sc->sc_nqueues > 1) { + max_type = PCI_INTR_TYPE_MSIX; + /* + * 82583 has a MSI-X capability in the PCI configuration space + * but it doesn't support it. At least the document doesn't + * say anything about MSI-X. + */ + counts[PCI_INTR_TYPE_MSIX] + = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1; + } else { + max_type = PCI_INTR_TYPE_MSI; + counts[PCI_INTR_TYPE_MSIX] = 0; + } /* Allocation settings */ - max_type = PCI_INTR_TYPE_MSIX; - counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1; counts[PCI_INTR_TYPE_MSI] = 1; counts[PCI_INTR_TYPE_INTX] = 1; /* overridden by disable flags */ @@ -1871,8 +1974,8 @@ alloc_retry: counts[PCI_INTR_TYPE_INTX] = 1; goto alloc_retry; } - } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { - wm_adjust_qnum(sc, 0); /* must not use multiqueue */ + } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { + wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ error = wm_setup_legacy(sc); if (error) { pci_intr_release(sc->sc_pc, sc->sc_intrs, @@ -1884,7 +1987,7 @@ alloc_retry: goto alloc_retry; } } else { - wm_adjust_qnum(sc, 0); /* must not use multiqueue */ + wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ error = wm_setup_legacy(sc); if (error) { pci_intr_release(sc->sc_pc, sc->sc_intrs, @@ -1897,7 +2000,7 @@ alloc_retry: * Check the function ID (unit number of the chip). */ if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) - || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) + || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) @@ -1924,7 +2027,7 @@ alloc_retry: if (sc->sc_type == WM_T_82547) { callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS); callout_setfunc(&sc->sc_txfifo_ch, - wm_82547_txfifo_stall, sc); + wm_82547_txfifo_stall, sc); aprint_verbose_dev(sc->sc_dev, "using 82547 Tx FIFO stall work-around\n"); } @@ -1935,7 +2038,8 @@ alloc_retry: && (sc->sc_type != WM_T_PCH) && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) - && (sc->sc_type != WM_T_PCH_SPT)) { + && (sc->sc_type != WM_T_PCH_SPT) + && (sc->sc_type != WM_T_PCH_CNP)) { /* ICH* and PCH* have no PCIe capability registers */ if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, @@ -1978,7 +2082,7 @@ alloc_retry: 512 << bytecnt, 512 << maxb); pcix_cmd = (pcix_cmd & ~PCIX_CMD_BYTECNT_MASK) | - (maxb << PCIX_CMD_BYTECNT_SHIFT); + (maxb << PCIX_CMD_BYTECNT_SHIFT); pci_conf_write(pa->pa_pc, pa->pa_tag, sc->sc_pcixe_capoff + PCIX_CMD, pcix_cmd); @@ -2035,6 +2139,7 @@ alloc_retry: case WM_T_82543: case WM_T_82544: /* Microwire */ + sc->nvm.read = wm_nvm_read_uwire; sc->sc_nvm_wordsize = 64; sc->sc_nvm_addrbits = 6; break; @@ -2044,6 +2149,7 @@ alloc_retry: case WM_T_82546: case WM_T_82546_3: /* Microwire */ + sc->nvm.read = wm_nvm_read_uwire; reg = CSR_READ(sc, WMREG_EECD); if (reg & EECD_EE_SIZE) { sc->sc_nvm_wordsize = 256; @@ -2053,19 +2159,29 @@ alloc_retry: sc->sc_nvm_addrbits = 6; } sc->sc_flags |= WM_F_LOCK_EECD; + sc->nvm.acquire = wm_get_eecd; + sc->nvm.release = wm_put_eecd; break; case WM_T_82541: case WM_T_82541_2: case WM_T_82547: case WM_T_82547_2: - sc->sc_flags |= WM_F_LOCK_EECD; reg = CSR_READ(sc, WMREG_EECD); + /* + * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only + * on 8254[17], so set flags and functios before calling it. + */ + sc->sc_flags |= WM_F_LOCK_EECD; + sc->nvm.acquire = wm_get_eecd; + sc->nvm.release = wm_put_eecd; if (reg & EECD_EE_TYPE) { /* SPI */ + sc->nvm.read = wm_nvm_read_spi; sc->sc_flags |= WM_F_EEPROM_SPI; wm_nvm_set_addrbits_size_eecd(sc); } else { /* Microwire */ + sc->nvm.read = wm_nvm_read_uwire; if ((reg & EECD_EE_ABITS) != 0) { sc->sc_nvm_wordsize = 256; sc->sc_nvm_addrbits = 8; @@ -2078,25 +2194,30 @@ alloc_retry: case WM_T_82571: case WM_T_82572: /* SPI */ + sc->nvm.read = wm_nvm_read_eerd; + /* Not use WM_F_LOCK_EECD because we use EERD */ sc->sc_flags |= WM_F_EEPROM_SPI; wm_nvm_set_addrbits_size_eecd(sc); - sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM; sc->phy.acquire = wm_get_swsm_semaphore; sc->phy.release = wm_put_swsm_semaphore; + sc->nvm.acquire = wm_get_nvm_82571; + sc->nvm.release = wm_put_nvm_82571; break; case WM_T_82573: case WM_T_82574: case WM_T_82583: + sc->nvm.read = wm_nvm_read_eerd; + /* Not use WM_F_LOCK_EECD because we use EERD */ if (sc->sc_type == WM_T_82573) { - sc->sc_flags |= WM_F_LOCK_SWSM; sc->phy.acquire = wm_get_swsm_semaphore; sc->phy.release = wm_put_swsm_semaphore; + sc->nvm.acquire = wm_get_nvm_82571; + sc->nvm.release = wm_put_nvm_82571; } else { - sc->sc_flags |= WM_F_LOCK_EXTCNF; /* Both PHY and NVM use the same semaphore. */ - sc->phy.acquire + sc->phy.acquire = sc->nvm.acquire = wm_get_swfwhw_semaphore; - sc->phy.release + sc->phy.release = sc->nvm.release = wm_put_swfwhw_semaphore; } if (wm_nvm_is_onboard_eeprom(sc) == 0) { @@ -2107,7 +2228,6 @@ alloc_retry: sc->sc_flags |= WM_F_EEPROM_SPI; wm_nvm_set_addrbits_size_eecd(sc); } - sc->sc_flags |= WM_F_EEPROM_EERDEEWR; break; case WM_T_82575: case WM_T_82576: @@ -2118,10 +2238,18 @@ alloc_retry: /* SPI */ sc->sc_flags |= WM_F_EEPROM_SPI; wm_nvm_set_addrbits_size_eecd(sc); - sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW - | WM_F_LOCK_SWSM; + if ((sc->sc_type == WM_T_80003) + || (sc->sc_nvm_wordsize < (1 << 15))) { + sc->nvm.read = wm_nvm_read_eerd; + /* Don't use WM_F_LOCK_EECD because we use EERD */ + } else { + sc->nvm.read = wm_nvm_read_spi; + sc->sc_flags |= WM_F_LOCK_EECD; + } sc->phy.acquire = wm_get_phy_82575; sc->phy.release = wm_put_phy_82575; + sc->nvm.acquire = wm_get_nvm_80003; + sc->nvm.release = wm_put_nvm_80003; break; case WM_T_ICH8: case WM_T_ICH9: @@ -2129,8 +2257,9 @@ alloc_retry: case WM_T_PCH: case WM_T_PCH2: case WM_T_PCH_LPT: + sc->nvm.read = wm_nvm_read_ich8; /* FLASH */ - sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF; + sc->sc_flags |= WM_F_EEPROM_FLASH; sc->sc_nvm_wordsize = 2048; memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, @@ -2150,45 +2279,53 @@ alloc_retry: sc->sc_flashreg_offset = 0; sc->phy.acquire = wm_get_swflag_ich8lan; sc->phy.release = wm_put_swflag_ich8lan; + sc->nvm.acquire = wm_get_nvm_ich8lan; + sc->nvm.release = wm_put_nvm_ich8lan; break; case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + sc->nvm.read = wm_nvm_read_spt; /* SPT has no GFPREG; flash registers mapped through BAR0 */ - sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF; + sc->sc_flags |= WM_F_EEPROM_FLASH; sc->sc_flasht = sc->sc_st; sc->sc_flashh = sc->sc_sh; sc->sc_ich8_flash_base = 0; sc->sc_nvm_wordsize = - (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) - * NVM_SIZE_MULTIPLIER; + (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; /* It is size in bytes, we want words */ sc->sc_nvm_wordsize /= 2; - /* assume 2 banks */ + /* Assume 2 banks */ sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; sc->phy.acquire = wm_get_swflag_ich8lan; sc->phy.release = wm_put_swflag_ich8lan; + sc->nvm.acquire = wm_get_nvm_ich8lan; + sc->nvm.release = wm_put_nvm_ich8lan; break; case WM_T_I210: case WM_T_I211: - if (wm_nvm_get_flash_presence_i210(sc)) { - wm_nvm_set_addrbits_size_eecd(sc); + /* Allow a single clear of the SW semaphore on I210 and newer*/ + sc->sc_flags |= WM_F_WA_I210_CLSEM; + if (wm_nvm_flash_presence_i210(sc)) { + sc->nvm.read = wm_nvm_read_eerd; + /* Don't use WM_F_LOCK_EECD because we use EERD */ sc->sc_flags |= WM_F_EEPROM_FLASH_HW; - sc->sc_flags |= WM_F_EEPROM_EERDEEWR; + wm_nvm_set_addrbits_size_eecd(sc); } else { - sc->sc_nvm_wordsize = INVM_SIZE; + sc->nvm.read = wm_nvm_read_invm; sc->sc_flags |= WM_F_EEPROM_INVM; + sc->sc_nvm_wordsize = INVM_SIZE; } - sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM; sc->phy.acquire = wm_get_phy_82575; sc->phy.release = wm_put_phy_82575; + sc->nvm.acquire = wm_get_nvm_80003; + sc->nvm.release = wm_put_nvm_80003; break; default: break; } - /* Reset the chip to a known state. */ - wm_reset(sc); - /* Ensure the SMBI bit is clear before first NVM or PHY access */ switch (sc->sc_type) { case WM_T_82571: @@ -2235,9 +2372,6 @@ alloc_retry: sc->sc_flags |= WM_F_EEPROM_INVALID; } - /* Set device properties (macflags) */ - prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); - if (sc->sc_flags & WM_F_EEPROM_INVALID) aprint_verbose_dev(sc->sc_dev, "No EEPROM"); else { @@ -2261,12 +2395,48 @@ alloc_retry: wm_nvm_version(sc); aprint_verbose("\n"); - /* Check for I21[01] PLL workaround */ - if (sc->sc_type == WM_T_I210) + /* + * XXX The first call of wm_gmii_setup_phytype. The result might be + * incorrect. + */ + wm_gmii_setup_phytype(sc, 0, 0); + + /* Check for WM_F_WOL on some chips before wm_reset() */ + switch (sc->sc_type) { + case WM_T_ICH8: + case WM_T_ICH9: + case WM_T_ICH10: + case WM_T_PCH: + case WM_T_PCH2: + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + apme_mask = WUC_APME; + eeprom_data = CSR_READ(sc, WMREG_WUC); + if ((eeprom_data & apme_mask) != 0) + sc->sc_flags |= WM_F_WOL; + break; + default: + break; + } + + /* Reset the chip to a known state. */ + wm_reset(sc); + + /* + * Check for I21[01] PLL workaround. + * + * Three cases: + * a) Chip is I211. + * b) Chip is I210 and it uses INVM (not FLASH). + * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 + */ + if (sc->sc_type == WM_T_I211) sc->sc_flags |= WM_F_PLL_WA_I210; - if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) { - /* NVM image release 3.25 has a workaround */ - if ((sc->sc_nvm_ver_major < 3) + if (sc->sc_type == WM_T_I210) { + if (!wm_nvm_flash_presence_i210(sc)) + sc->sc_flags |= WM_F_PLL_WA_I210; + else if ((sc->sc_nvm_ver_major < 3) || ((sc->sc_nvm_ver_major == 3) && (sc->sc_nvm_ver_minor < 25))) { aprint_verbose_dev(sc->sc_dev, @@ -2351,16 +2521,22 @@ alloc_retry: case WM_T_82574: case WM_T_82583: case WM_T_80003: - default: + case WM_T_82575: + case WM_T_82576: apme_mask = NVM_CFG3_APME; wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); break; - case WM_T_82575: - case WM_T_82576: case WM_T_82580: case WM_T_I350: - case WM_T_I354: /* XXX ok? */ + case WM_T_I354: + case WM_T_I210: + case WM_T_I211: + apme_mask = NVM_CFG3_APME; + wm_nvm_read(sc, + NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, + 1, &eeprom_data); + break; case WM_T_ICH8: case WM_T_ICH9: case WM_T_ICH10: @@ -2368,19 +2544,54 @@ alloc_retry: case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: - /* XXX The funcid should be checked on some devices */ - apme_mask = WUC_APME; - eeprom_data = CSR_READ(sc, WMREG_WUC); + case WM_T_PCH_CNP: + /* Already checked before wm_reset () */ + apme_mask = eeprom_data = 0; + break; + default: /* XXX 82540 */ + apme_mask = NVM_CFG3_APME; + wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data); break; } - /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ if ((eeprom_data & apme_mask) != 0) sc->sc_flags |= WM_F_WOL; -#ifdef WM_DEBUG - if ((sc->sc_flags & WM_F_WOL) != 0) - printf("WOL\n"); -#endif + + /* + * We have the eeprom settings, now apply the special cases + * where the eeprom may be wrong or the board won't support + * wake on lan on a particular port + */ + switch (sc->sc_pcidevid) { + case PCI_PRODUCT_INTEL_82546GB_PCIE: + sc->sc_flags &= ~WM_F_WOL; + break; + case PCI_PRODUCT_INTEL_82546EB_FIBER: + case PCI_PRODUCT_INTEL_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting */ + if (sc->sc_funcid == 1) + sc->sc_flags &= ~WM_F_WOL; + break; + case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3: + /* If quad port adapter, disable WoL on all but port A */ + if (sc->sc_funcid != 0) + sc->sc_flags &= ~WM_F_WOL; + break; + case PCI_PRODUCT_INTEL_82571EB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting */ + if (sc->sc_funcid == 1) + sc->sc_flags &= ~WM_F_WOL; + break; + case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER: + case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER: + case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER: + /* If quad port adapter, disable WoL on all but port A */ + if (sc->sc_funcid != 0) + sc->sc_flags &= ~WM_F_WOL; + break; + } if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) { /* Check NVM for autonegotiation */ @@ -2477,17 +2688,14 @@ alloc_retry: sc->sc_nvm_k1_enabled = 0; } - /* - * Determine if we're TBI,GMII or SGMII mode, and initialize the - * media structures accordingly. - */ + /* Determine if we're GMII, TBI, SERDES or SGMII mode */ if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT - || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573 + || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP + || sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { - /* STATUS_TBIMODE reserved/reused, can't rely on it */ - wm_gmii_mediainit(sc, wmp->wmp_product); + /* Copper only */ } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350) || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210) @@ -2553,11 +2761,6 @@ alloc_retry: else reg &= ~CTRL_EXT_I2C_ENA; CSR_WRITE(sc, WMREG_CTRL_EXT, reg); - - if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) - wm_gmii_mediainit(sc, wmp->wmp_product); - else - wm_tbi_mediainit(sc); } else if (sc->sc_type < WM_T_82543 || (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { @@ -2565,15 +2768,24 @@ alloc_retry: "WARNING: TBIMODE set on 1000BASE-T product!\n"); sc->sc_mediatype = WM_MEDIATYPE_FIBER; } - wm_tbi_mediainit(sc); } else { if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) { aprint_error_dev(sc->sc_dev, "WARNING: TBIMODE clear on 1000BASE-X product!\n"); sc->sc_mediatype = WM_MEDIATYPE_COPPER; } - wm_gmii_mediainit(sc, wmp->wmp_product); } + snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags); + aprint_verbose_dev(sc->sc_dev, "%s\n", buf); + + /* Set device properties (macflags) */ + prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); + + /* Initialize the media structures accordingly. */ + if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) + wm_gmii_mediainit(sc, wmp->wmp_product); + else + wm_tbi_mediainit(sc); /* All others */ ifp = &sc->sc_ethercom.ec_if; xname = device_xname(sc->sc_dev); @@ -2581,7 +2793,7 @@ alloc_retry: ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; #ifdef WM_MPSAFE - ifp->if_extflags = IFEF_START_MPSAFE; + ifp->if_extflags = IFEF_MPSAFE; #endif ifp->if_ioctl = wm_ioctl; if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { @@ -2604,7 +2816,7 @@ alloc_retry: if (wm_is_using_multiqueue(sc)) ifp->if_transmit = wm_transmit; } - ifp->if_watchdog = wm_watchdog; + /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */ ifp->if_init = wm_init; ifp->if_stop = wm_stop; IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); @@ -2621,11 +2833,12 @@ alloc_retry: case WM_T_82571: case WM_T_82572: case WM_T_82574: + case WM_T_82583: case WM_T_82575: case WM_T_82576: case WM_T_82580: case WM_T_I350: - case WM_T_I354: /* XXXX ok? */ + case WM_T_I354: case WM_T_I210: case WM_T_I211: case WM_T_80003: @@ -2634,6 +2847,7 @@ alloc_retry: case WM_T_PCH2: /* PCH2 supports 9K frame size */ case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: /* XXX limited to 9234 */ sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; break; @@ -2643,7 +2857,6 @@ alloc_retry: break; case WM_T_82542_2_0: case WM_T_82542_2_1: - case WM_T_82583: case WM_T_ICH8: /* No support for jumbo frame */ break; @@ -2694,6 +2907,8 @@ alloc_retry: ifp->if_capabilities |= IFCAP_TSOv6; } + sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT; + sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT; sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT; sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT; @@ -2704,13 +2919,16 @@ alloc_retry: #endif /* Attach the interface. */ - if_initialize(ifp); + error = if_initialize(ifp); + if (error != 0) { + aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", + error); + return; /* Error */ + } sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); ether_ifattach(ifp, enaddr); - if_register(ifp); ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); - rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, - RND_FLAG_DEFAULT); + if_register(ifp); #ifdef WM_EVENT_COUNTERS /* Attach event counters. */ @@ -2735,7 +2953,7 @@ alloc_retry: aprint_error_dev(self, "couldn't establish power handler\n"); sc->sc_flags |= WM_F_ATTACHED; - out: +out: return; } @@ -2841,8 +3059,33 @@ static bool wm_resume(device_t self, const pmf_qual_t *qual) { struct wm_softc *sc = device_private(self); + struct ifnet *ifp = &sc->sc_ethercom.ec_if; + pcireg_t reg; + char buf[256]; - wm_init_manageability(sc); + reg = CSR_READ(sc, WMREG_WUS); + if (reg != 0) { + snprintb(buf, sizeof(buf), WUS_FLAGS, reg); + device_printf(sc->sc_dev, "wakeup status %s\n", buf); + CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */ + } + + if (sc->sc_type >= WM_T_PCH2) + wm_resume_workarounds_pchlan(sc); + if ((ifp->if_flags & IFF_UP) == 0) { + wm_reset(sc); + /* Non-AMT based hardware can now take control from firmware */ + if ((sc->sc_flags & WM_F_HAS_AMT) == 0) + wm_get_hw_control(sc); + wm_init_manageability(sc); + } else { + /* + * We called pmf_class_network_register(), so if_init() is + * automatically called when IFF_UP. wm_reset(), + * wm_get_hw_control() and wm_init_manageability() are called + * via wm_init(). + */ + } return true; } @@ -2857,38 +3100,62 @@ wm_watchdog(struct ifnet *ifp) { int qid; struct wm_softc *sc = ifp->if_softc; + uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */ for (qid = 0; qid < sc->sc_nqueues; qid++) { struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; - wm_watchdog_txq(ifp, txq); + wm_watchdog_txq(ifp, txq, &hang_queue); } - /* Reset the interface. */ - (void) wm_init(ifp); + /* IF any of queues hanged up, reset the interface. */ + if (hang_queue != 0) { + (void)wm_init(ifp); - /* - * There are still some upper layer processing which call - * ifp->if_start(). e.g. ALTQ or one CPU system - */ - /* Try to get more packets going. */ - ifp->if_start(ifp); + /* + * There are still some upper layer processing which call + * ifp->if_start(). e.g. ALTQ or one CPU system + */ + /* Try to get more packets going. */ + ifp->if_start(ifp); + } +} + + +static void +wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang) +{ + + mutex_enter(txq->txq_lock); + if (txq->txq_sending && + time_uptime - txq->txq_lastsent > wm_watchdog_timeout) + wm_watchdog_txq_locked(ifp, txq, hang); + + mutex_exit(txq->txq_lock); } static void -wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq) +wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, + uint16_t *hang) { struct wm_softc *sc = ifp->if_softc; + struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); + + KASSERT(mutex_owned(txq->txq_lock)); /* * Since we're using delayed interrupts, sweep up * before we report an error. */ - mutex_enter(txq->txq_lock); - wm_txeof(sc, txq); - mutex_exit(txq->txq_lock); + wm_txeof(txq, UINT_MAX); + + if (txq->txq_sending) + *hang |= __BIT(wmq->wmq_id); - if (txq->txq_free != WM_NTXDESC(txq)) { + if (txq->txq_free == WM_NTXDESC(txq)) { + log(LOG_ERR, "%s: device timeout (lost interrupt)\n", + device_xname(sc->sc_dev)); + } else { #ifdef WM_DEBUG int i, j; struct wm_txsoft *txs; @@ -2899,20 +3166,30 @@ wm_watchdog_txq(struct ifnet *ifp, struc txq->txq_next); ifp->if_oerrors++; #ifdef WM_DEBUG - for (i = txq->txq_sdirty; i != txq->txq_snext ; + for (i = txq->txq_sdirty; i != txq->txq_snext; i = WM_NEXTTXS(txq, i)) { - txs = &txq->txq_soft[i]; - printf("txs %d tx %d -> %d\n", - i, txs->txs_firstdesc, txs->txs_lastdesc); - for (j = txs->txs_firstdesc; ; - j = WM_NEXTTX(txq, j)) { - printf("\tdesc %d: 0x%" PRIx64 "\n", j, - txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); - printf("\t %#08x%08x\n", - txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, - txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); - if (j == txs->txs_lastdesc) - break; + txs = &txq->txq_soft[i]; + printf("txs %d tx %d -> %d\n", + i, txs->txs_firstdesc, txs->txs_lastdesc); + for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) { + if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { + printf("\tdesc %d: 0x%" PRIx64 "\n", j, + txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); + printf("\t %#08x%08x\n", + txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, + txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); + } else { + printf("\tdesc %d: 0x%" PRIx64 "\n", j, + (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 | + txq->txq_descs[j].wtx_addr.wa_low); + printf("\t %#04x%02x%02x%08x\n", + txq->txq_descs[j].wtx_fields.wtxu_vlan, + txq->txq_descs[j].wtx_fields.wtxu_options, + txq->txq_descs[j].wtx_fields.wtxu_status, + txq->txq_descs[j].wtx_cmdlen); + } + if (j == txs->txs_lastdesc) + break; } } #endif @@ -2936,8 +3213,13 @@ wm_tick(void *arg) WM_CORE_LOCK(sc); - if (sc->sc_core_stopping) - goto out; + if (sc->sc_core_stopping) { + WM_CORE_UNLOCK(sc); +#ifndef WM_MPSAFE + splx(s); +#endif + return; + } if (sc->sc_type >= WM_T_82542_2_1) { WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); @@ -2969,18 +3251,17 @@ wm_tick(void *arg) if (sc->sc_flags & WM_F_HAS_MII) mii_tick(&sc->sc_mii); - else if ((sc->sc_type >= WM_T_82575) + else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) wm_serdes_tick(sc); else wm_tbi_tick(sc); - callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); -out: WM_CORE_UNLOCK(sc); -#ifndef WM_MPSAFE - splx(s); -#endif + + wm_watchdog(ifp); + + callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); } static int @@ -2990,6 +3271,9 @@ wm_ifflags_cb(struct ethercom *ec) struct wm_softc *sc = ifp->if_softc; int rc = 0; + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + WM_CORE_LOCK(sc); int change = ifp->if_flags ^ sc->sc_if_flags; @@ -3020,7 +3304,7 @@ static int wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) { struct wm_softc *sc = ifp->if_softc; - struct ifreq *ifr = (struct ifreq *) data; + struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; struct sockaddr_dl *sdl; int s, error; @@ -3033,7 +3317,6 @@ wm_ioctl(struct ifnet *ifp, u_long cmd, #endif switch (cmd) { case SIOCSIFMEDIA: - case SIOCGIFMEDIA: WM_CORE_LOCK(sc); /* Flow control requires full-duplex mode. */ if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || @@ -3062,7 +3345,7 @@ wm_ioctl(struct ifnet *ifp, u_long cmd, sdl = satosdl(ifp->if_dl->ifa_addr); (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); - /* unicast address is first multicast entry */ + /* Unicast address is the first multicast entry */ wm_set_filter(sc); error = 0; WM_CORE_UNLOCK(sc); @@ -3084,9 +3367,9 @@ wm_ioctl(struct ifnet *ifp, u_long cmd, error = 0; - if (cmd == SIOCSIFCAP) { + if (cmd == SIOCSIFCAP) error = (*ifp->if_init)(ifp); - } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) + else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) ; else if (ifp->if_flags & IFF_RUNNING) { /* @@ -3204,26 +3487,69 @@ wm_read_mac_addr(struct wm_softc *sc, ui static void wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) { - uint32_t ral_lo, ral_hi; + uint32_t ral_lo, ral_hi, addrl, addrh; + uint32_t wlock_mac; + int rv; if (enaddr != NULL) { - ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | - (enaddr[3] << 24); - ral_hi = enaddr[4] | (enaddr[5] << 8); + ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) | + ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24); + ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8); ral_hi |= RAL_AV; } else { ral_lo = 0; ral_hi = 0; } - if (sc->sc_type >= WM_T_82544) { - CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), - ral_lo); - CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), - ral_hi); - } else { - CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); - CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); + switch (sc->sc_type) { + case WM_T_82542_2_0: + case WM_T_82542_2_1: + case WM_T_82543: + CSR_WRITE(sc, WMREG_RAL(idx), ral_lo); + CSR_WRITE_FLUSH(sc); + CSR_WRITE(sc, WMREG_RAH(idx), ral_hi); + CSR_WRITE_FLUSH(sc); + break; + case WM_T_PCH2: + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + if (idx == 0) { + CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); + CSR_WRITE_FLUSH(sc); + CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); + CSR_WRITE_FLUSH(sc); + return; + } + if (sc->sc_type != WM_T_PCH2) { + wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), + FWSM_WLOCK_MAC); + addrl = WMREG_SHRAL(idx - 1); + addrh = WMREG_SHRAH(idx - 1); + } else { + wlock_mac = 0; + addrl = WMREG_PCH_LPT_SHRAL(idx - 1); + addrh = WMREG_PCH_LPT_SHRAH(idx - 1); + } + + if ((wlock_mac == 0) || (idx <= wlock_mac)) { + rv = wm_get_swflag_ich8lan(sc); + if (rv != 0) + return; + CSR_WRITE(sc, addrl, ral_lo); + CSR_WRITE_FLUSH(sc); + CSR_WRITE(sc, addrh, ral_hi); + CSR_WRITE_FLUSH(sc); + wm_put_swflag_ich8lan(sc); + } + + break; + default: + CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); + CSR_WRITE_FLUSH(sc); + CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); + CSR_WRITE_FLUSH(sc); + break; } } @@ -3245,39 +3571,85 @@ wm_mchash(struct wm_softc *sc, const uin if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) { + || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){ hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | - (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); + (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); return (hash & 0x3ff); } hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | - (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); + (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]); return (hash & 0xfff); } /* - * wm_set_filter: * - * Set up the receive filter. + * */ -static void -wm_set_filter(struct wm_softc *sc) +static int +wm_rar_count(struct wm_softc *sc) { - struct ethercom *ec = &sc->sc_ethercom; - struct ifnet *ifp = &sc->sc_ethercom.ec_if; - struct ether_multi *enm; - struct ether_multistep step; - bus_addr_t mta_reg; - uint32_t hash, reg, bit; - int i, size, ralmax; - - DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); + int size; - if (sc->sc_type >= WM_T_82544) - mta_reg = WMREG_CORDOVA_MTA; - else + switch (sc->sc_type) { + case WM_T_ICH8: + size = WM_RAL_TABSIZE_ICH8 -1; + break; + case WM_T_ICH9: + case WM_T_ICH10: + case WM_T_PCH: + size = WM_RAL_TABSIZE_ICH8; + break; + case WM_T_PCH2: + size = WM_RAL_TABSIZE_PCH2; + break; + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + size = WM_RAL_TABSIZE_PCH_LPT; + break; + case WM_T_82575: + case WM_T_I210: + case WM_T_I211: + size = WM_RAL_TABSIZE_82575; + break; + case WM_T_82576: + case WM_T_82580: + size = WM_RAL_TABSIZE_82576; + break; + case WM_T_I350: + case WM_T_I354: + size = WM_RAL_TABSIZE_I350; + break; + default: + size = WM_RAL_TABSIZE; + } + + return size; +} + +/* + * wm_set_filter: + * + * Set up the receive filter. + */ +static void +wm_set_filter(struct wm_softc *sc) +{ + struct ethercom *ec = &sc->sc_ethercom; + struct ifnet *ifp = &sc->sc_ethercom.ec_if; + struct ether_multi *enm; + struct ether_multistep step; + bus_addr_t mta_reg; + uint32_t hash, reg, bit; + int i, size, ralmax; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if (sc->sc_type >= WM_T_82544) + mta_reg = WMREG_CORDOVA_MTA; + else mta_reg = WMREG_MTA; sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); @@ -3293,26 +3665,11 @@ wm_set_filter(struct wm_softc *sc) * Set the station address in the first RAL slot, and * clear the remaining slots. */ - if (sc->sc_type == WM_T_ICH8) - size = WM_RAL_TABSIZE_ICH8 -1; - else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) - || (sc->sc_type == WM_T_PCH)) - size = WM_RAL_TABSIZE_ICH8; - else if (sc->sc_type == WM_T_PCH2) - size = WM_RAL_TABSIZE_PCH2; - else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT)) - size = WM_RAL_TABSIZE_PCH_LPT; - else if (sc->sc_type == WM_T_82575) - size = WM_RAL_TABSIZE_82575; - else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) - size = WM_RAL_TABSIZE_82576; - else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) - size = WM_RAL_TABSIZE_I350; - else - size = WM_RAL_TABSIZE; + size = wm_rar_count(sc); wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); - if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { + if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)) { i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC); switch (i) { case 0: @@ -3324,7 +3681,7 @@ wm_set_filter(struct wm_softc *sc) ralmax = 1; break; default: - /* available SHRA + RAR[0] */ + /* Available SHRA + RAR[0] */ ralmax = i + 1; } } else @@ -3337,13 +3694,15 @@ wm_set_filter(struct wm_softc *sc) if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) + || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)) size = WM_ICH8_MC_TABSIZE; else size = WM_MC_TABSIZE; /* Clear out the multicast table. */ - for (i = 0; i < size; i++) + for (i = 0; i < size; i++) { CSR_WRITE(sc, mta_reg + (i << 2), 0); + CSR_WRITE_FLUSH(sc); + } ETHER_LOCK(ec); ETHER_FIRST_MULTI(step, ec, enm); @@ -3368,7 +3727,8 @@ wm_set_filter(struct wm_softc *sc) || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) + || (sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)) reg &= 0x1f; else reg &= 0x7f; @@ -3385,9 +3745,13 @@ wm_set_filter(struct wm_softc *sc) */ bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); CSR_WRITE(sc, mta_reg + (reg << 2), hash); + CSR_WRITE_FLUSH(sc); CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); - } else + CSR_WRITE_FLUSH(sc); + } else { CSR_WRITE(sc, mta_reg + (reg << 2), hash); + CSR_WRITE_FLUSH(sc); + } ETHER_NEXT_MULTI(step, enm); } @@ -3506,6 +3870,7 @@ wm_lan_init_done(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { reg = CSR_READ(sc, WMREG_STATUS); if ((reg & STATUS_LAN_INIT_DONE) != 0) @@ -3580,10 +3945,9 @@ wm_get_cfg_done(struct wm_softc *sc) break; delay(1000); } - if (i >= WM_PHY_CFG_TIMEOUT) { + if (i >= WM_PHY_CFG_TIMEOUT) DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", device_xname(sc->sc_dev), __func__)); - } break; case WM_T_ICH8: case WM_T_ICH9: @@ -3592,12 +3956,14 @@ wm_get_cfg_done(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: delay(10*1000); if (sc->sc_type >= WM_T_ICH10) wm_lan_init_done(sc); else wm_get_auto_rd_done(sc); + /* Clear PHY Reset Asserted bit */ reg = CSR_READ(sc, WMREG_STATUS); if ((reg & STATUS_PHYRA) != 0) CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); @@ -3609,6 +3975,262 @@ wm_get_cfg_done(struct wm_softc *sc) } } +void +wm_phy_post_reset(struct wm_softc *sc) +{ + uint32_t reg; + + /* This function is only for ICH8 and newer. */ + if (sc->sc_type < WM_T_ICH8) + return; + + if (wm_phy_resetisblocked(sc)) { + /* XXX */ + device_printf(sc->sc_dev, "PHY is blocked\n"); + return; + } + + /* Allow time for h/w to get to quiescent state after reset */ + delay(10*1000); + + /* Perform any necessary post-reset workarounds */ + if (sc->sc_type == WM_T_PCH) + wm_hv_phy_workarounds_ich8lan(sc); + else if (sc->sc_type == WM_T_PCH2) + wm_lv_phy_workarounds_ich8lan(sc); + + /* Clear the host wakeup bit after lcd reset */ + if (sc->sc_type >= WM_T_PCH) { + reg = wm_gmii_hv_readreg(sc->sc_dev, 2, + BM_PORT_GEN_CFG); + reg &= ~BM_WUC_HOST_WU_BIT; + wm_gmii_hv_writereg(sc->sc_dev, 2, + BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + wm_init_lcd_from_nvm(sc); + + /* Configure the LCD with the OEM bits in NVM */ + wm_oem_bits_config_ich8lan(sc, true); + + if (sc->sc_type == WM_T_PCH2) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { + delay(10 * 1000); + wm_gate_hw_phy_config_ich8lan(sc, false); + } + /* XXX Set EEE LPI Update Timer to 200usec */ + } +} + +/* Only for PCH and newer */ +static int +wm_write_smbus_addr(struct wm_softc *sc) +{ + uint32_t strap, freq; + uint16_t phy_data; + int rv; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); + + strap = CSR_READ(sc, WMREG_STRAP); + freq = __SHIFTOUT(strap, STRAP_FREQ); + + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data); + if (rv != 0) + return -1; + + phy_data &= ~HV_SMB_ADDR_ADDR; + phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (sc->sc_phytype == WMPHY_I217) { + /* Restore SMBus frequency */ + if (freq --) { + phy_data &= ~(HV_SMB_ADDR_FREQ_LOW + | HV_SMB_ADDR_FREQ_HIGH); + phy_data |= __SHIFTIN((freq & 0x01) != 0, + HV_SMB_ADDR_FREQ_LOW); + phy_data |= __SHIFTIN((freq & 0x02) != 0, + HV_SMB_ADDR_FREQ_HIGH); + } else + DPRINTF(WM_DEBUG_INIT, + ("%s: %s Unsupported SMB frequency in PHY\n", + device_xname(sc->sc_dev), __func__)); + } + + return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, + phy_data); +} + +void +wm_init_lcd_from_nvm(struct wm_softc *sc) +{ + uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg; + uint16_t phy_page = 0; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + switch (sc->sc_type) { + case WM_T_ICH8: + if ((sc->sc_phytype == WMPHY_UNKNOWN) + || (sc->sc_phytype != WMPHY_IGP_3)) + return; + + if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT) + || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) { + sw_cfg_mask = FEXTNVM_SW_CONFIG; + break; + } + /* FALLTHROUGH */ + case WM_T_PCH: + case WM_T_PCH2: + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return; + } + + sc->phy.acquire(sc); + + reg = CSR_READ(sc, WMREG_FEXTNVM); + if ((reg & sw_cfg_mask) == 0) + goto release; + + /* + * Make sure HW does not configure LCD from PHY extended configuration + * before SW configuration + */ + extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR); + if ((sc->sc_type < WM_T_PCH2) + && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0)) + goto release; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n", + device_xname(sc->sc_dev), __func__)); + /* word_addr is in DWORD */ + word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1; + + reg = CSR_READ(sc, WMREG_EXTCNFSIZE); + cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH); + if (cnf_size == 0) + goto release; + + if (((sc->sc_type == WM_T_PCH) + && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0)) + || (sc->sc_type > WM_T_PCH)) { + /* + * HW configures the SMBus address and LEDs when the OEM and + * LCD Write Enable bits are set in the NVM. When both NVM bits + * are cleared, SW will configure them instead. + */ + DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n", + device_xname(sc->sc_dev), __func__)); + wm_write_smbus_addr(sc); + + reg = CSR_READ(sc, WMREG_LEDCTL); + wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg); + } + + /* Configure LCD from extended configuration region. */ + for (i = 0; i < cnf_size; i++) { + uint16_t reg_data, reg_addr; + + if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0) + goto release; + + if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0) + goto release; + + if (reg_addr == MII_IGPHY_PAGE_SELECT) + phy_page = reg_data; + + reg_addr &= IGPHY_MAXREGADDR; + reg_addr |= phy_page; + + KASSERT(sc->phy.writereg_locked != NULL); + sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data); + } + +release: + sc->phy.release(sc); + return; +} + +/* + * wm_oem_bits_config_ich8lan - SW-based LCD Configuration + * @sc: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + */ +int +wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state) +{ + uint32_t mac_reg; + uint16_t oem_reg; + int rv; + + if (sc->sc_type < WM_T_PCH) + return 0; + + rv = sc->phy.acquire(sc); + if (rv != 0) + return rv; + + if (sc->sc_type == WM_T_PCH) { + mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR); + if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0) + goto release; + } + + mac_reg = CSR_READ(sc, WMREG_FEXTNVM); + if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0) + goto release; + + mac_reg = CSR_READ(sc, WMREG_PHY_CTRL); + + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg); + if (rv != 0) + goto release; + oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if ((mac_reg & PHY_CTRL_GBE_DIS) != 0) + oem_reg |= HV_OEM_BITS_A1KDIS; + if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS)) + != 0) + oem_reg |= HV_OEM_BITS_A1KDIS; + if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU)) + != 0) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (sc->sc_type != WM_T_PCH)) + && (wm_phy_resetisblocked(sc) == false)) + oem_reg |= HV_OEM_BITS_ANEGNOW; + + rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg); + +release: + sc->phy.release(sc); + + return rv; +} + /* Init hardware bits */ void wm_initialize_hardware_bits(struct wm_softc *sc) @@ -3747,11 +4369,20 @@ wm_initialize_hardware_bits(struct wm_so case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: /* TARC0 */ - if ((sc->sc_type == WM_T_ICH8) - || (sc->sc_type == WM_T_PCH_SPT)) { + if (sc->sc_type == WM_T_ICH8) { /* Set TARC0 bits 29 and 28 */ tarc0 |= __BITS(29, 28); + } else if (sc->sc_type == WM_T_PCH_SPT) { + tarc0 |= __BIT(29); + /* + * Drop bit 28. From Linux. + * See I218/I219 spec update + * "5. Buffer Overrun While the I219 is + * Processing DMA Transactions" + */ + tarc0 &= ~__BIT(28); } /* Set TARC0 bits 23,24,26,27 */ tarc0 |= __BITS(27, 26) | __BITS(24, 23); @@ -3820,7 +4451,7 @@ wm_initialize_hardware_bits(struct wm_so CSR_WRITE(sc, WMREG_RFCTL, reg); break; case WM_T_82574: - /* use extened Rx descriptor. */ + /* Use extened Rx descriptor. */ reg = CSR_READ(sc, WMREG_RFCTL); reg |= WMREG_RFCTL_EXSTEN; CSR_WRITE(sc, WMREG_RFCTL, reg); @@ -3862,7 +4493,7 @@ wm_rxpbs_adjust_82580(uint32_t val) * generic PHY reset function. * Same as e1000_phy_hw_reset_generic() */ -static void +static int wm_reset_phy(struct wm_softc *sc) { uint32_t reg; @@ -3870,7 +4501,7 @@ wm_reset_phy(struct wm_softc *sc) DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); if (wm_phy_resetisblocked(sc)) - return; + return -1; sc->phy.acquire(sc); @@ -3884,18 +4515,28 @@ wm_reset_phy(struct wm_softc *sc) CSR_WRITE_FLUSH(sc); delay(150); - + sc->phy.release(sc); wm_get_cfg_done(sc); + wm_phy_post_reset(sc); + + return 0; } +/* + * Only used by WM_T_PCH_SPT which does not use multiqueue, + * so it is enough to check sc->sc_queue[0] only. + */ static void wm_flush_desc_rings(struct wm_softc *sc) { pcireg_t preg; uint32_t reg; + struct wm_txqueue *txq; + wiseman_txdesc_t *txd; int nexttx; + uint32_t rctl; /* First, disable MULR fix in FEXTNVM11 */ reg = CSR_READ(sc, WMREG_FEXTNVM11); @@ -3904,66 +4545,60 @@ wm_flush_desc_rings(struct wm_softc *sc) preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); reg = CSR_READ(sc, WMREG_TDLEN(0)); - if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) { - struct wm_txqueue *txq; - wiseman_txdesc_t *txd; - - /* TX */ - printf("%s: Need TX flush (reg = %08x, len = %u)\n", - device_xname(sc->sc_dev), preg, reg); - reg = CSR_READ(sc, WMREG_TCTL); - CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN); + if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0)) + return; + + /* TX */ + device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n", + preg, reg); + reg = CSR_READ(sc, WMREG_TCTL); + CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN); + + txq = &sc->sc_queue[0].wmq_txq; + nexttx = txq->txq_next; + txd = &txq->txq_descs[nexttx]; + wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx)); + txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512); + txd->wtx_fields.wtxu_status = 0; + txd->wtx_fields.wtxu_options = 0; + txd->wtx_fields.wtxu_vlan = 0; + + bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, + BUS_SPACE_BARRIER_WRITE); + + txq->txq_next = WM_NEXTTX(txq, txq->txq_next); + CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next); + bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, + BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); + delay(250); - txq = &sc->sc_queue[0].wmq_txq; - nexttx = txq->txq_next; - txd = &txq->txq_descs[nexttx]; - wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx)); - txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512); - txd->wtx_fields.wtxu_status = 0; - txd->wtx_fields.wtxu_options = 0; - txd->wtx_fields.wtxu_vlan = 0; - - bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, - BUS_SPACE_BARRIER_WRITE); - - txq->txq_next = WM_NEXTTX(txq, txq->txq_next); - CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next); - bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, - BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - delay(250); - } preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); - if (preg & DESCRING_STATUS_FLUSH_REQ) { - uint32_t rctl; + if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0) + return; - /* RX */ - printf("%s: Need RX flush (reg = %08x)\n", - device_xname(sc->sc_dev), preg); - rctl = CSR_READ(sc, WMREG_RCTL); - CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); - CSR_WRITE_FLUSH(sc); - delay(150); + /* RX */ + device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg); + rctl = CSR_READ(sc, WMREG_RCTL); + CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); + CSR_WRITE_FLUSH(sc); + delay(150); - reg = CSR_READ(sc, WMREG_RXDCTL(0)); - /* zero the lower 14 bits (prefetch and host thresholds) */ - reg &= 0xffffc000; - /* - * update thresholds: prefetch threshold to 31, host threshold - * to 1 and make sure the granularity is "descriptors" and not - * "cache lines" - */ - reg |= (0x1f | (1 << 8) | RXDCTL_GRAN); - CSR_WRITE(sc, WMREG_RXDCTL(0), reg); + reg = CSR_READ(sc, WMREG_RXDCTL(0)); + /* Zero the lower 14 bits (prefetch and host thresholds) */ + reg &= 0xffffc000; + /* + * Update thresholds: prefetch threshold to 31, host threshold + * to 1 and make sure the granularity is "descriptors" and not + * "cache lines" + */ + reg |= (0x1f | (1 << 8) | RXDCTL_GRAN); + CSR_WRITE(sc, WMREG_RXDCTL(0), reg); - /* - * momentarily enable the RX ring for the changes to take - * effect - */ - CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN); - CSR_WRITE_FLUSH(sc); - delay(150); - CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); - } + /* Momentarily enable the RX ring for the changes to take effect */ + CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN); + CSR_WRITE_FLUSH(sc); + delay(150); + CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); } /* @@ -3977,6 +4612,8 @@ wm_reset(struct wm_softc *sc) int phy_reset = 0; int i, error = 0; uint32_t reg; + uint16_t kmreg; + int rv; DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); @@ -3997,7 +4634,7 @@ wm_reset(struct wm_softc *sc) txq->txq_fifo_head = 0; txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; txq->txq_fifo_size = - (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; + (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; txq->txq_fifo_stall = 0; } break; @@ -4038,9 +4675,10 @@ wm_reset(struct wm_softc *sc) PBA_14K : PBA_10K; break; case WM_T_PCH: - case WM_T_PCH2: + case WM_T_PCH2: /* XXX 14K? */ case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: sc->sc_pba = PBA_26K; break; default: @@ -4069,6 +4707,9 @@ wm_reset(struct wm_softc *sc) break; delay(100); } + if (timeout == 0) + device_printf(sc->sc_dev, + "failed to disable busmastering\n"); } /* Set the completion timeout for interface */ @@ -4084,9 +4725,8 @@ wm_reset(struct wm_softc *sc) if (sc->sc_type != WM_T_82574) { CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); CSR_WRITE(sc, WMREG_EIAC, 0); - } else { + } else CSR_WRITE(sc, WMREG_EIAC_82574, 0); - } } /* Stop the transmit and receive processes. */ @@ -4131,12 +4771,12 @@ wm_reset(struct wm_softc *sc) /* * On some chipsets, a reset through a memory-mapped write * cycle can cause the chip to reset before completing the - * write cycle. This causes major headache that can be - * avoided by issuing the reset via indirect register writes - * through I/O space. + * write cycle. This causes major headache that can be avoided + * by issuing the reset via indirect register writes through + * I/O space. * * So, if we successfully mapped the I/O BAR at attach time, - * use that. Otherwise, try our luck with a memory-mapped + * use that. Otherwise, try our luck with a memory-mapped * reset. */ if (sc->sc_flags & WM_F_IOH_VALID) @@ -4162,6 +4802,7 @@ wm_reset(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; if (wm_phy_resetisblocked(sc) == false) { /* @@ -4176,7 +4817,7 @@ wm_reset(struct wm_softc *sc) reg |= CTRL_PHY_RESET; phy_reset = 1; } else - printf("XXX reset is blocked!!!\n"); + device_printf(sc->sc_dev, "XXX reset is blocked!!!\n"); sc->phy.acquire(sc); CSR_WRITE(sc, WMREG_CTRL, reg); /* Don't insert a completion barrier when reset */ @@ -4224,10 +4865,18 @@ wm_reset(struct wm_softc *sc) break; } + /* Set Phy Config Counter to 50msec */ + if (sc->sc_type == WM_T_PCH2) { + reg = CSR_READ(sc, WMREG_FEXTNVM3); + reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; + CSR_WRITE(sc, WMREG_FEXTNVM3, reg); + } + if (phy_reset != 0) wm_get_cfg_done(sc); - /* reload EEPROM */ + /* Reload EEPROM */ switch (sc->sc_type) { case WM_T_82542_2_0: case WM_T_82542_2_1: @@ -4293,6 +4942,7 @@ wm_reset(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: break; default: panic("%s: unknown type\n", __func__); @@ -4318,9 +4968,12 @@ wm_reset(struct wm_softc *sc) break; } + if (phy_reset != 0) + wm_phy_post_reset(sc); + if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { - /* clear global device reset status bit */ + /* Clear global device reset status bit */ CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); } @@ -4335,21 +4988,26 @@ wm_reset(struct wm_softc *sc) CSR_WRITE(sc, WMREG_EIAC_82574, 0); } - /* reload sc_ctrl */ - sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); - - if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) - wm_set_eee_i350(sc); - - /* Clear the host wakeup bit after lcd reset */ - if (sc->sc_type >= WM_T_PCH) { - reg = wm_gmii_hv_readreg(sc->sc_dev, 2, - BM_PORT_GEN_CFG); - reg &= ~BM_WUC_HOST_WU_BIT; - wm_gmii_hv_writereg(sc->sc_dev, 2, - BM_PORT_GEN_CFG, reg); + if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) + || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) + || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) + || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){ + reg = CSR_READ(sc, WMREG_KABGTXD); + reg |= KABGTXD_BGSQLBIAS; + CSR_WRITE(sc, WMREG_KABGTXD, reg); } + /* Reload sc_ctrl */ + sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); + + if (sc->sc_type == WM_T_I354) { +#if 0 + /* I354 uses an external PHY */ + wm_set_eee_i354(sc); +#endif + } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) + wm_set_eee_i350(sc); + /* * For PCH, this write will make sure that any noise will be detected * as a CRC error and be dropped rather than show up as a bad packet @@ -4361,10 +5019,28 @@ wm_reset(struct wm_softc *sc) if (sc->sc_type >= WM_T_82544) CSR_WRITE(sc, WMREG_WUC, 0); + if (sc->sc_type < WM_T_82575) + wm_disable_aspm(sc); /* Workaround for some chips */ + wm_reset_mdicnfg_82580(sc); if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) wm_pll_workaround_i210(sc); + + if (sc->sc_type == WM_T_80003) { + /* Default to TRUE to enable the MDIC W/A */ + sc->sc_flags |= WM_F_80003_MDIC_WA; + + rv = wm_kmrn_readreg(sc, + KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg); + if (rv == 0) { + if ((kmreg & KUMCTRLSTA_OPMODE_MASK) + == KUMCTRLSTA_OPMODE_INBAND_MDIO) + sc->sc_flags &= ~WM_F_80003_MDIC_WA; + else + sc->sc_flags |= WM_F_80003_MDIC_WA; + } + } } /* @@ -4403,8 +5079,7 @@ wm_add_rxbuf(struct wm_rxqueue *rxq, int if (error) { /* XXX XXX XXX */ aprint_error_dev(sc->sc_dev, - "unable to load rx DMA map %d, error = %d\n", - idx, error); + "unable to load rx DMA map %d, error = %d\n", idx, error); panic("wm_add_rxbuf"); } @@ -4444,43 +5119,6 @@ wm_rxdrain(struct wm_rxqueue *rxq) } } - -/* - * XXX copy from FreeBSD's sys/net/rss_config.c - */ -/* - * RSS secret key, intended to prevent attacks on load-balancing. Its - * effectiveness may be limited by algorithm choice and available entropy - * during the boot. - * - * XXXRW: And that we don't randomize it yet! - * - * This is the default Microsoft RSS specification key which is also - * the Chelsio T5 firmware default key. - */ -#define RSS_KEYSIZE 40 -static uint8_t wm_rss_key[RSS_KEYSIZE] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa, -}; - -/* - * Caller must pass an array of size sizeof(rss_key). - * - * XXX - * As if_ixgbe may use this function, this function should not be - * if_wm specific function. - */ -static void -wm_rss_getkey(uint8_t *key) -{ - - memcpy(key, wm_rss_key, sizeof(wm_rss_key)); -} - /* * Setup registers for RSS. * @@ -4492,13 +5130,13 @@ wm_init_rss(struct wm_softc *sc) uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS]; int i; - CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key)); + CTASSERT(sizeof(rss_key) == RSS_KEYSIZE); for (i = 0; i < RETA_NUM_ENTRIES; i++) { - int qid, reta_ent; + unsigned int qid, reta_ent; qid = i % sc->sc_nqueues; - switch(sc->sc_type) { + switch (sc->sc_type) { case WM_T_82574: reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK_82574); @@ -4518,7 +5156,7 @@ wm_init_rss(struct wm_softc *sc) CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); } - wm_rss_getkey((uint8_t *)rss_key); + rss_getkey((uint8_t *)rss_key); for (i = 0; i < RSSRK_NUM_REGS; i++) CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]); @@ -4557,7 +5195,7 @@ wm_adjust_qnum(struct wm_softc *sc, int return; } - switch(sc->sc_type) { + switch (sc->sc_type) { case WM_T_82572: hw_ntxqueues = 2; hw_nrxqueues = 2; @@ -4611,11 +5249,10 @@ wm_adjust_qnum(struct wm_softc *sc, int * As queues more than MSI-X vectors cannot improve scaling, we limit * the number of queues used actually. */ - if (nvectors < hw_nqueues + 1) { + if (nvectors < hw_nqueues + 1) sc->sc_nqueues = nvectors - 1; - } else { + else sc->sc_nqueues = hw_nqueues; - } /* * As queues more then cpus cannot improve scaling, we limit @@ -4779,9 +5416,7 @@ wm_setup_msix(struct wm_softc *sc) intr_idx++; } - /* - * LINK - */ + /* LINK */ intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, sizeof(intrbuf)); #ifdef WM_MPSAFE @@ -4791,7 +5426,7 @@ wm_setup_msix(struct wm_softc *sc) snprintf(intr_xname, sizeof(intr_xname), "%sLINK", device_xname(sc->sc_dev)); vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], - IPL_NET, wm_linkintr_msix, sc, intr_xname); + IPL_NET, wm_linkintr_msix, sc, intr_xname); if (vih == NULL) { aprint_error_dev(sc->sc_dev, "unable to establish MSI-X(for LINK)%s%s\n", @@ -4800,7 +5435,7 @@ wm_setup_msix(struct wm_softc *sc) goto fail; } - /* keep default affinity to LINK interrupt */ + /* Keep default affinity to LINK interrupt */ aprint_normal_dev(sc->sc_dev, "for LINK interrupting at %s\n", intrstr); sc->sc_ihs[intr_idx] = vih; @@ -4822,16 +5457,14 @@ wm_setup_msix(struct wm_softc *sc) } static void -wm_turnon(struct wm_softc *sc) +wm_unset_stopping_flags(struct wm_softc *sc) { int i; KASSERT(WM_CORE_LOCKED(sc)); - /* - * must unset stopping flags in ascending order. - */ - for(i = 0; i < sc->sc_nqueues; i++) { + /* Must unset stopping flags in ascending order. */ + for (i = 0; i < sc->sc_nqueues; i++) { struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; @@ -4848,7 +5481,7 @@ wm_turnon(struct wm_softc *sc) } static void -wm_turnoff(struct wm_softc *sc) +wm_set_stopping_flags(struct wm_softc *sc) { int i; @@ -4856,10 +5489,8 @@ wm_turnoff(struct wm_softc *sc) sc->sc_core_stopping = true; - /* - * must set stopping flags in ascending order. - */ - for(i = 0; i < sc->sc_nqueues; i++) { + /* Must set stopping flags in ascending order. */ + for (i = 0; i < sc->sc_nqueues; i++) { struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; @@ -4874,7 +5505,7 @@ wm_turnoff(struct wm_softc *sc) } /* - * write interrupt interval value to ITR or EITR + * Write interrupt interval value to ITR or EITR */ static void wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq) @@ -4902,7 +5533,7 @@ wm_itrs_writereg(struct wm_softc *sc, st * the multi queue function with MSI-X. */ CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx), - wmq->wmq_itr & EITR_ITR_INT_MASK_82574); + wmq->wmq_itr & EITR_ITR_INT_MASK_82574); } else { KASSERT(wmq->wmq_id == 0); CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr); @@ -5025,7 +5656,7 @@ wm_init_locked(struct ifnet *ifp) /* Cancel any pending I/O. */ wm_stop_locked(ifp, 0); - /* update statistics before reset */ + /* Update statistics before reset */ ifp->if_collisions += CSR_READ(sc, WMREG_COLC); ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); @@ -5036,10 +5667,17 @@ wm_init_locked(struct ifnet *ifp) /* Reset the chip to a known state. */ wm_reset(sc); - /* AMT based hardware can now take control from firmware */ + /* + * AMT based hardware can now take control from firmware + * Do this after reset. + */ if ((sc->sc_flags & WM_F_HAS_AMT) != 0) wm_get_hw_control(sc); + if ((sc->sc_type >= WM_T_PCH_SPT) && + pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX) + wm_legacy_irq_quirk_spt(sc); + /* Init hardware bits */ wm_initialize_hardware_bits(sc); @@ -5047,6 +5685,27 @@ wm_init_locked(struct ifnet *ifp) if (sc->sc_flags & WM_F_HAS_MII) wm_gmii_reset(sc); + if (sc->sc_type >= WM_T_ICH8) { + reg = CSR_READ(sc, WMREG_GCR); + /* + * ICH8 No-snoop bits are opposite polarity. Set to snoop by + * default after reset. + */ + if (sc->sc_type == WM_T_ICH8) + reg |= GCR_NO_SNOOP_ALL; + else + reg &= ~GCR_NO_SNOOP_ALL; + CSR_WRITE(sc, WMREG_GCR, reg); + } + if ((sc->sc_type >= WM_T_ICH8) + || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER) + || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) { + + reg = CSR_READ(sc, WMREG_CTRL_EXT); + reg |= CTRL_EXT_RO_DIS; + CSR_WRITE(sc, WMREG_CTRL_EXT, reg); + } + /* Calculate (E)ITR value */ if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) { /* @@ -5087,9 +5746,7 @@ wm_init_locked(struct ifnet *ifp) if (error) goto out; - /* - * Clear out the VLAN table -- we don't use it (yet). - */ + /* Clear out the VLAN table -- we don't use it (yet). */ CSR_WRITE(sc, WMREG_VET, 0); if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) trynum = 10; /* Due to hw errata */ @@ -5107,7 +5764,7 @@ wm_init_locked(struct ifnet *ifp) if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) - && (sc->sc_type != WM_T_PCH_SPT)) { + && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){ CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); @@ -5131,7 +5788,7 @@ wm_init_locked(struct ifnet *ifp) wm_set_vlan(sc); if (sc->sc_flags & WM_F_HAS_MII) { - int val; + uint16_t kmreg; switch (sc->sc_type) { case WM_T_80003: @@ -5142,6 +5799,7 @@ wm_init_locked(struct ifnet *ifp) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: /* * Set the mac to wait the maximum time between each * iteration and increase the max iterations when @@ -5150,19 +5808,20 @@ wm_init_locked(struct ifnet *ifp) */ wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF); - val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM); - val |= 0x3F; - wm_kmrn_writereg(sc, - KUMCTRLSTA_OFFSET_INB_PARAM, val); + wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, + &kmreg); + kmreg |= 0x3F; + wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, + kmreg); break; default: break; } if (sc->sc_type == WM_T_80003) { - val = CSR_READ(sc, WMREG_CTRL_EXT); - val &= ~CTRL_EXT_LINK_MODE_MASK; - CSR_WRITE(sc, WMREG_CTRL_EXT, val); + reg = CSR_READ(sc, WMREG_CTRL_EXT); + reg &= ~CTRL_EXT_LINK_MODE_MASK; + CSR_WRITE(sc, WMREG_CTRL_EXT, reg); /* Bypass RX and TX FIFO's */ wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, @@ -5190,9 +5849,9 @@ wm_init_locked(struct ifnet *ifp) /* Set registers about MSI-X */ if (wm_is_using_msix(sc)) { - uint32_t ivar; + uint32_t ivar, qintr_idx; struct wm_queue *wmq; - int qid, qintr_idx; + unsigned int qid; if (sc->sc_type == WM_T_82575) { /* Interrupt control */ @@ -5217,7 +5876,7 @@ wm_init_locked(struct ifnet *ifp) CSR_WRITE(sc, WMREG_CTRL_EXT, reg); /* - * workaround issue with spurious interrupts + * Workaround issue with spurious interrupts * in MSI-X mode. * At wm_initialize_hardware_bits(), sc_nintrs has not * initialized yet. So re-initialize WMREG_RFCTL here. @@ -5363,15 +6022,6 @@ wm_init_locked(struct ifnet *ifp) } else CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); - if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) - || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) - || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) { - reg = CSR_READ(sc, WMREG_KABGTXD); - reg |= KABGTXD_BGSQLBIAS; - CSR_WRITE(sc, WMREG_KABGTXD, reg); - } - /* Set up the inter-packet gap. */ CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); @@ -5423,20 +6073,18 @@ wm_init_locked(struct ifnet *ifp) wm_init_manageability(sc); /* - * Set up the receive control register; we actually program - * the register when we set the receive filter. Use multicast - * address offset type 0. + * Set up the receive control register; we actually program the + * register when we set the receive filter. Use multicast address + * offset type 0. * - * Only the i82544 has the ability to strip the incoming - * CRC, so we don't enable that feature. + * Only the i82544 has the ability to strip the incoming CRC, so we + * don't enable that feature. */ sc->sc_mchash_type = 0; sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF - | RCTL_MO(sc->sc_mchash_type); + | __SHIFTIN(sc->sc_mchash_type, RCTL_MO); - /* - * 82574 use one buffer extended Rx descriptor. - */ + /* 82574 use one buffer extended Rx descriptor. */ if (sc->sc_type == WM_T_82574) sc->sc_rctl |= RCTL_DTYP_ONEBUF; @@ -5455,9 +6103,9 @@ wm_init_locked(struct ifnet *ifp) CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); } - if (MCLBYTES == 2048) { + if (MCLBYTES == 2048) sc->sc_rctl |= RCTL_2k; - } else { + else { if (sc->sc_type >= WM_T_82543) { switch (MCLBYTES) { case 4096: @@ -5474,12 +6122,10 @@ wm_init_locked(struct ifnet *ifp) MCLBYTES); break; } - } else panic("wm_init: i82542 requires MCLBYTES = 2048"); + } else + panic("wm_init: i82542 requires MCLBYTES = 2048"); } - /* Set the receive filter. */ - wm_set_filter(sc); - /* Enable ECC */ switch (sc->sc_type) { case WM_T_82571: @@ -5489,6 +6135,7 @@ wm_init_locked(struct ifnet *ifp) break; case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: reg = CSR_READ(sc, WMREG_PBECCSTS); reg |= PBECCSTS_UNCORR_ECC_ENABLE; CSR_WRITE(sc, WMREG_PBECCSTS, reg); @@ -5500,6 +6147,14 @@ wm_init_locked(struct ifnet *ifp) break; } + /* + * Set the receive filter. + * + * For 82575 and 82576, the RX descriptors must be initialized after + * the setting of RCTL.EN in wm_set_filter() + */ + wm_set_filter(sc); + /* On 575 and later set RDT only if RX enabled */ if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { int qidx; @@ -5514,7 +6169,7 @@ wm_init_locked(struct ifnet *ifp) } } - wm_turnon(sc); + wm_unset_stopping_flags(sc); /* Start the one second link check clock. */ callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); @@ -5557,7 +6212,7 @@ wm_stop_locked(struct ifnet *ifp, int di device_xname(sc->sc_dev), __func__)); KASSERT(WM_CORE_LOCKED(sc)); - wm_turnoff(sc); + wm_set_stopping_flags(sc); /* Stop the one second clock. */ callout_stop(&sc->sc_tick_ch); @@ -5602,6 +6257,7 @@ wm_stop_locked(struct ifnet *ifp, int di struct wm_queue *wmq = &sc->sc_queue[qidx]; struct wm_txqueue *txq = &wmq->wmq_txq; mutex_enter(txq->txq_lock); + txq->txq_sending = false; /* Ensure watchdog disabled */ for (i = 0; i < WM_TXQUEUELEN(txq); i++) { txs = &txq->txq_soft[i]; if (txs->txs_mbuf != NULL) { @@ -5615,7 +6271,6 @@ wm_stop_locked(struct ifnet *ifp, int di /* Mark the interface as down and cancel the watchdog timer. */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); - ifp->if_timer = 0; if (disable) { for (i = 0; i < sc->sc_nqueues; i++) { @@ -5706,7 +6361,7 @@ out: * if the Tx FIFO ring buffer, otherwise the chip will croak. * * We do this by checking the amount of space before the end - * of the Tx FIFO buffer. If the packet will not fit, we "stall" + * of the Tx FIFO buffer. If the packet will not fit, we "stall" * the Tx FIFO, wait for all remaining packets to drain, reset * the internal FIFO pointers to the beginning, and restart * transmission on the interface. @@ -6004,9 +6659,7 @@ wm_alloc_txrx_queues(struct wm_softc *sc goto fail_0; } - /* - * For transmission - */ + /* For transmission */ error = 0; tx_done = 0; for (i = 0; i < sc->sc_nqueues; i++) { @@ -6039,16 +6692,15 @@ wm_alloc_txrx_queues(struct wm_softc *sc WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname); WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); - - WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname); - WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname); for (j = 0; j < WM_NTXSEGS; j++) { snprintf(txq->txq_txseg_evcnt_names[j], @@ -6057,9 +6709,11 @@ wm_alloc_txrx_queues(struct wm_softc *sc NULL, xname, txq->txq_txseg_evcnt_names[j]); } - WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname); - - WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname); #endif /* WM_EVENT_COUNTERS */ tx_done++; @@ -6067,9 +6721,7 @@ wm_alloc_txrx_queues(struct wm_softc *sc if (error) goto fail_1; - /* - * For recieve - */ + /* For receive */ error = 0; rx_done = 0; for (i = 0; i < sc->sc_nqueues; i++) { @@ -6093,10 +6745,11 @@ wm_alloc_txrx_queues(struct wm_softc *sc #ifdef WM_EVENT_COUNTERS xname = device_xname(sc->sc_dev); - WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname); + WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); + WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); - WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname); - WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); + WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); #endif /* WM_EVENT_COUNTERS */ rx_done++; @@ -6104,6 +6757,15 @@ wm_alloc_txrx_queues(struct wm_softc *sc if (error) goto fail_2; + for (i = 0; i < sc->sc_nqueues; i++) { + char rndname[16]; + + snprintf(rndname, sizeof(rndname), "%sTXRX%d", + device_xname(sc->sc_dev), i); + rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname, + RND_TYPE_NET, RND_FLAG_DEFAULT); + } + return 0; fail_2: @@ -6139,13 +6801,17 @@ wm_free_txrx_queues(struct wm_softc *sc) { int i; + for (i = 0; i < sc->sc_nqueues; i++) + rnd_detach_source(&sc->sc_queue[i].rnd_source); + for (i = 0; i < sc->sc_nqueues; i++) { struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; #ifdef WM_EVENT_COUNTERS - WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i); - WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i); - WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i); + WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); + WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); + WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); + WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); #endif /* WM_EVENT_COUNTERS */ wm_free_rx_buffer(sc, rxq); @@ -6162,24 +6828,27 @@ wm_free_txrx_queues(struct wm_softc *sc) WM_Q_EVCNT_DETACH(txq, txsstall, txq, i); WM_Q_EVCNT_DETACH(txq, txdstall, txq, i); - WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i); + WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i); WM_Q_EVCNT_DETACH(txq, txdw, txq, i); WM_Q_EVCNT_DETACH(txq, txqe, txq, i); - WM_Q_EVCNT_DETACH(txq, txipsum, txq, i); - WM_Q_EVCNT_DETACH(txq, txtusum, txq, i); - WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i); - WM_Q_EVCNT_DETACH(txq, txtso, txq, i); - WM_Q_EVCNT_DETACH(txq, txtso6, txq, i); - WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i); + WM_Q_EVCNT_DETACH(txq, ipsum, txq, i); + WM_Q_EVCNT_DETACH(txq, tusum, txq, i); + WM_Q_EVCNT_DETACH(txq, tusum6, txq, i); + WM_Q_EVCNT_DETACH(txq, tso, txq, i); + WM_Q_EVCNT_DETACH(txq, tso6, txq, i); + WM_Q_EVCNT_DETACH(txq, tsopain, txq, i); for (j = 0; j < WM_NTXSEGS; j++) evcnt_detach(&txq->txq_ev_txseg[j]); - WM_Q_EVCNT_DETACH(txq, txdrop, txq, i); - WM_Q_EVCNT_DETACH(txq, tu, txq, i); + WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i); + WM_Q_EVCNT_DETACH(txq, descdrop, txq, i); + WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i); + WM_Q_EVCNT_DETACH(txq, defrag, txq, i); + WM_Q_EVCNT_DETACH(txq, underrun, txq, i); #endif /* WM_EVENT_COUNTERS */ - /* drain txq_interq */ + /* Drain txq_interq */ while ((m = pcq_get(txq->txq_interq)) != NULL) m_freem(m); pcq_destroy(txq->txq_interq); @@ -6243,7 +6912,7 @@ wm_init_tx_regs(struct wm_softc *sc, str /* XXX should update with AIM? */ CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4); if (sc->sc_type >= WM_T_82540) { - /* should be same */ + /* Should be the same */ CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4); } @@ -6288,6 +6957,9 @@ wm_init_tx_queue(struct wm_softc *sc, st wm_init_tx_descs(sc, txq); wm_init_tx_regs(sc, wmq, txq); wm_init_tx_buffer(sc, txq); + + txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */ + txq->txq_sending = false; } static void @@ -6321,7 +6993,8 @@ wm_init_rx_regs(struct wm_softc *sc, str CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); - CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc); + CSR_WRITE(sc, WMREG_RDLEN(qid), + rxq->rxq_descsize * rxq->rxq_ndesc); if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) @@ -6339,7 +7012,8 @@ wm_init_rx_regs(struct wm_softc *sc, str CSR_WRITE(sc, WMREG_RDH(qid), 0); CSR_WRITE(sc, WMREG_RDT(qid), 0); /* XXX should update with AIM? */ - CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD); + CSR_WRITE(sc, WMREG_RDTR, + (wmq->wmq_itr / 4) | RDTR_FPD); /* MUST be same */ CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | @@ -6371,13 +7045,13 @@ wm_init_rx_buffer(struct wm_softc *sc, s return ENOMEM; } } else { - if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) - wm_init_rxdesc(rxq, i); /* - * For 82575 and newer device, the RX descriptors - * must be initialized after the setting of RCTL.EN in + * For 82575 and 82576, the RX descriptors must be + * initialized after the setting of RCTL.EN in * wm_set_filter() */ + if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) + wm_init_rxdesc(rxq, i); } } rxq->rxq_ptr = 0; @@ -6486,9 +7160,7 @@ wm_tx_offload(struct wm_softc *sc, struc break; default: - /* - * Don't support this protocol or encapsulation. - */ + /* Don't support this protocol or encapsulation. */ *fieldsp = 0; *cmdp = 0; return 0; @@ -6497,9 +7169,9 @@ wm_tx_offload(struct wm_softc *sc, struc if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); - } else { + } else iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); - } + ipcse = offset + iphl - 1; cmd = WTX_CMD_DEXT | WTX_DTYP_D; @@ -6515,12 +7187,12 @@ wm_tx_offload(struct wm_softc *sc, struc (hlen + sizeof(struct tcphdr)))) { /* * TCP/IP headers are not in the first mbuf; we need - * to do this the slow and painful way. Let's just + * to do this the slow and painful way. Let's just * hope this doesn't happen very often. */ struct tcphdr th; - WM_Q_EVCNT_INCR(txq, txtsopain); + WM_Q_EVCNT_INCR(txq, tsopain); m_copydata(m0, hlen, sizeof(th), &th); if (v4) { @@ -6576,10 +7248,10 @@ wm_tx_offload(struct wm_softc *sc, struc } if (v4) { - WM_Q_EVCNT_INCR(txq, txtso); + WM_Q_EVCNT_INCR(txq, tso); cmdlen |= WTX_TCPIP_CMD_IP; } else { - WM_Q_EVCNT_INCR(txq, txtso6); + WM_Q_EVCNT_INCR(txq, tso6); ipcse = 0; } cmd |= WTX_TCPIP_CMD_TSE; @@ -6599,7 +7271,7 @@ wm_tx_offload(struct wm_softc *sc, struc WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | WTX_TCPIP_IPCSE(ipcse); if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { - WM_Q_EVCNT_INCR(txq, txipsum); + WM_Q_EVCNT_INCR(txq, ipsum); fields |= WTX_IXSM; } @@ -6607,25 +7279,25 @@ wm_tx_offload(struct wm_softc *sc, struc if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) { - WM_Q_EVCNT_INCR(txq, txtusum); + WM_Q_EVCNT_INCR(txq, tusum); fields |= WTX_TXSM; tucs = WTX_TCPIP_TUCSS(offset) | WTX_TCPIP_TUCSO(offset + - M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | - WTX_TCPIP_TUCSE(0) /* rest of packet */; + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | + WTX_TCPIP_TUCSE(0) /* Rest of packet */; } else if ((m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) { - WM_Q_EVCNT_INCR(txq, txtusum6); + WM_Q_EVCNT_INCR(txq, tusum6); fields |= WTX_TXSM; tucs = WTX_TCPIP_TUCSS(offset) | WTX_TCPIP_TUCSO(offset + - M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | - WTX_TCPIP_TUCSE(0) /* rest of packet */; + M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | + WTX_TCPIP_TUCSE(0) /* Rest of packet */; } else { /* Just initialize it to a valid TCP context. */ tucs = WTX_TCPIP_TUCSS(offset) | WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | - WTX_TCPIP_TUCSE(0) /* rest of packet */; + WTX_TCPIP_TUCSE(0) /* Rest of packet */; } /* @@ -6664,7 +7336,7 @@ wm_select_txqueue(struct ifnet *ifp, str * TODO: * distribute by flowid(RSS has value). */ - return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues; + return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues; } /* @@ -6679,7 +7351,7 @@ wm_start(struct ifnet *ifp) struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; #ifdef WM_MPSAFE - KASSERT(ifp->if_extflags & IFEF_START_MPSAFE); + KASSERT(if_is_mpsafe(ifp)); #endif /* * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. @@ -6712,13 +7384,11 @@ wm_transmit(struct ifnet *ifp, struct mb if (__predict_false(!pcq_put(txq->txq_interq, m))) { m_freem(m); - WM_Q_EVCNT_INCR(txq, txdrop); + WM_Q_EVCNT_INCR(txq, pcqdrop); return ENOBUFS; } - /* - * XXXX NOMPSAFE: ifp->if_data should be percpu. - */ + /* XXX NOMPSAFE: ifp->if_data should be percpu. */ ifp->if_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) ifp->if_omcasts++; @@ -6745,7 +7415,6 @@ wm_send_common_locked(struct ifnet *ifp, { struct wm_softc *sc = ifp->if_softc; struct mbuf *m0; - struct m_tag *mtag; struct wm_txsoft *txs; bus_dmamap_t dmamap; int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; @@ -6753,6 +7422,7 @@ wm_send_common_locked(struct ifnet *ifp, bus_size_t seglen, curlen; uint32_t cksumcmd; uint8_t cksumfields; + bool remap = true; KASSERT(mutex_owned(txq->txq_lock)); @@ -6776,7 +7446,7 @@ wm_send_common_locked(struct ifnet *ifp, /* Get a work queue entry. */ if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { - wm_txeof(sc, txq); + wm_txeof(txq, UINT_MAX); if (txq->txq_sfree == 0) { DPRINTF(WM_DEBUG_TX, ("%s: TX: no free job descriptors\n", @@ -6796,7 +7466,7 @@ wm_send_common_locked(struct ifnet *ifp, DPRINTF(WM_DEBUG_TX, ("%s: TX: have packet to transmit: %p\n", - device_xname(sc->sc_dev), m0)); + device_xname(sc->sc_dev), m0)); txs = &txq->txq_soft[txq->txq_snext]; dmamap = txs->txs_dmamap; @@ -6808,7 +7478,7 @@ wm_send_common_locked(struct ifnet *ifp, * So says the Linux driver: * The controller does a simple calculation to make sure * there is enough room in the FIFO before initiating the - * DMA for each buffer. The calc is: + * DMA for each buffer. The calc is: * 4 = ceil(buffer len / MSS) * To make sure we don't overrun the FIFO, adjust the max * buffer len if the MSS drops. @@ -6826,11 +7496,23 @@ wm_send_common_locked(struct ifnet *ifp, * since we can't sanely copy a jumbo packet to a single * buffer. */ +retry: error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_WRITE | BUS_DMA_NOWAIT); - if (error) { + if (__predict_false(error)) { if (error == EFBIG) { - WM_Q_EVCNT_INCR(txq, txdrop); + if (remap == true) { + struct mbuf *m; + + remap = false; + m = m_defrag(m0, M_NOWAIT); + if (m != NULL) { + WM_Q_EVCNT_INCR(txq, defrag); + m0 = m; + goto retry; + } + } + WM_Q_EVCNT_INCR(txq, toomanyseg); log(LOG_ERR, "%s: Tx packet consumes too many " "DMA segments, dropping...\n", device_xname(sc->sc_dev)); @@ -6838,10 +7520,10 @@ wm_send_common_locked(struct ifnet *ifp, m_freem(m0); continue; } - /* Short on resources, just stop for now. */ + /* Short on resources, just stop for now. */ DPRINTF(WM_DEBUG_TX, ("%s: TX: dmamap load failed: %d\n", - device_xname(sc->sc_dev), error)); + device_xname(sc->sc_dev), error)); break; } @@ -6853,7 +7535,7 @@ wm_send_common_locked(struct ifnet *ifp, /* * Ensure we have enough descriptors free to describe - * the packet. Note, we always reserve one descriptor + * the packet. Note, we always reserve one descriptor * at the end of the ring due to the semantics of the * TDT register, plus one more in the event we need * to load offload context. @@ -6863,13 +7545,13 @@ wm_send_common_locked(struct ifnet *ifp, * Not enough free descriptors to transmit this * packet. We haven't committed anything yet, * so just unload the DMA map, put the packet - * pack on the queue, and punt. Notify the upper + * pack on the queue, and punt. Notify the upper * layer that there are no more slots left. */ DPRINTF(WM_DEBUG_TX, ("%s: TX: need %d (%d) descriptors, have %d\n", - device_xname(sc->sc_dev), dmamap->dm_nsegs, - segs_needed, txq->txq_free - 1)); + device_xname(sc->sc_dev), dmamap->dm_nsegs, + segs_needed, txq->txq_free - 1)); if (!is_transmit) ifp->if_flags |= IFF_OACTIVE; txq->txq_flags |= WM_TXQ_NO_SPACE; @@ -6879,7 +7561,7 @@ wm_send_common_locked(struct ifnet *ifp, } /* - * Check for 82547 Tx FIFO bug. We need to do this + * Check for 82547 Tx FIFO bug. We need to do this * once we know we can transmit the packet, since we * do some internal FIFO space accounting here. */ @@ -6887,12 +7569,12 @@ wm_send_common_locked(struct ifnet *ifp, wm_82547_txfifo_bugchk(sc, m0)) { DPRINTF(WM_DEBUG_TX, ("%s: TX: 82547 Tx FIFO bug detected\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); if (!is_transmit) ifp->if_flags |= IFF_OACTIVE; txq->txq_flags |= WM_TXQ_NO_SPACE; bus_dmamap_unload(sc->sc_dmat, dmamap); - WM_Q_EVCNT_INCR(txq, txfifo_stall); + WM_Q_EVCNT_INCR(txq, fifo_stall); break; } @@ -6972,9 +7654,9 @@ wm_send_common_locked(struct ifnet *ifp, DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: low %#" PRIx64 ", " - "len %#04zx\n", - device_xname(sc->sc_dev), nexttx, - (uint64_t)curaddr, curlen)); + "len %#04zx\n", + device_xname(sc->sc_dev), nexttx, + (uint64_t)curaddr, curlen)); } } @@ -6982,7 +7664,7 @@ wm_send_common_locked(struct ifnet *ifp, /* * Set up the command byte on the last descriptor of - * the packet. If we're in the interrupt delay window, + * the packet. If we're in the interrupt delay window, * delay the interrupt. */ txq->txq_descs[lasttx].wtx_cmdlen |= @@ -6994,19 +7676,19 @@ wm_send_common_locked(struct ifnet *ifp, * * This is only valid on the last descriptor of the packet. */ - if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { + if (vlan_has_tag(m0)) { txq->txq_descs[lasttx].wtx_cmdlen |= htole32(WTX_CMD_VLE); txq->txq_descs[lasttx].wtx_fields.wtxu_vlan - = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); + = htole16(vlan_get_tag(m0)); } txs->txs_lastdesc = lasttx; DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n", - device_xname(sc->sc_dev), - lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); + device_xname(sc->sc_dev), + lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); /* Sync the descriptors we're using. */ wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, @@ -7020,7 +7702,7 @@ wm_send_common_locked(struct ifnet *ifp, DPRINTF(WM_DEBUG_TX, ("%s: TX: finished transmitting packet, job %d\n", - device_xname(sc->sc_dev), txq->txq_snext)); + device_xname(sc->sc_dev), txq->txq_snext)); /* Advance the tx pointer. */ txq->txq_free -= txs->txs_ndesc; @@ -7037,7 +7719,7 @@ wm_send_common_locked(struct ifnet *ifp, if (!is_transmit) ifp->if_flags |= IFF_OACTIVE; txq->txq_flags |= WM_TXQ_NO_SPACE; - WM_Q_EVCNT_INCR(txq, txdrop); + WM_Q_EVCNT_INCR(txq, descdrop); DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__)); m_freem(m0); @@ -7052,7 +7734,8 @@ wm_send_common_locked(struct ifnet *ifp, if (txq->txq_free != ofree) { /* Set a watchdog timer in case the chip flakes out. */ - ifp->if_timer = 5; + txq->txq_lastsent = time_uptime; + txq->txq_sending = true; } } @@ -7067,7 +7750,6 @@ wm_nq_tx_offload(struct wm_softc *sc, st struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) { struct mbuf *m0 = txs->txs_mbuf; - struct m_tag *mtag; uint32_t vl_len, mssidx, cmdc; struct ether_header *eh; int offset, iphl; @@ -7111,9 +7793,9 @@ wm_nq_tx_offload(struct wm_softc *sc, st vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); - if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { - vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) - << NQTXC_VLLEN_VLAN_SHIFT); + if (vlan_has_tag(m0)) { + vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK) + << NQTXC_VLLEN_VLAN_SHIFT); *cmdlenp |= NQTX_CMD_VLE; } @@ -7128,12 +7810,12 @@ wm_nq_tx_offload(struct wm_softc *sc, st (hlen + sizeof(struct tcphdr)))) { /* * TCP/IP headers are not in the first mbuf; we need - * to do this the slow and painful way. Let's just + * to do this the slow and painful way. Let's just * hope this doesn't happen very often. */ struct tcphdr th; - WM_Q_EVCNT_INCR(txq, txtsopain); + WM_Q_EVCNT_INCR(txq, tsopain); m_copydata(m0, hlen, sizeof(th), &th); if (v4) { @@ -7191,10 +7873,10 @@ wm_nq_tx_offload(struct wm_softc *sc, st *cmdlenp |= NQTX_CMD_TSE; if (v4) { - WM_Q_EVCNT_INCR(txq, txtso); + WM_Q_EVCNT_INCR(txq, tso); *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; } else { - WM_Q_EVCNT_INCR(txq, txtso6); + WM_Q_EVCNT_INCR(txq, tso6); *fieldsp |= NQTXD_FIELDS_TUXSM; } *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); @@ -7215,23 +7897,23 @@ wm_nq_tx_offload(struct wm_softc *sc, st if (m0->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { - WM_Q_EVCNT_INCR(txq, txtusum); - if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { + WM_Q_EVCNT_INCR(txq, tusum); + if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) cmdc |= NQTXC_CMD_TCP; - } else { + else cmdc |= NQTXC_CMD_UDP; - } + cmdc |= NQTXC_CMD_IP4; *fieldsp |= NQTXD_FIELDS_TUXSM; } if (m0->m_pkthdr.csum_flags & (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { - WM_Q_EVCNT_INCR(txq, txtusum6); - if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { + WM_Q_EVCNT_INCR(txq, tusum6); + if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) cmdc |= NQTXC_CMD_TCP; - } else { + else cmdc |= NQTXC_CMD_UDP; - } + cmdc |= NQTXC_CMD_IP6; *fieldsp |= NQTXD_FIELDS_TUXSM; } @@ -7255,7 +7937,7 @@ wm_nq_tx_offload(struct wm_softc *sc, st wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); DPRINTF(WM_DEBUG_TX, ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), - txq->txq_next, 0, vl_len)); + txq->txq_next, 0, vl_len)); DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); txq->txq_next = WM_NEXTTX(txq, txq->txq_next); txs->txs_ndesc++; @@ -7274,7 +7956,7 @@ wm_nq_start(struct ifnet *ifp) struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; #ifdef WM_MPSAFE - KASSERT(ifp->if_extflags & IFEF_START_MPSAFE); + KASSERT(if_is_mpsafe(ifp)); #endif /* * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. @@ -7307,13 +7989,11 @@ wm_nq_transmit(struct ifnet *ifp, struct if (__predict_false(!pcq_put(txq->txq_interq, m))) { m_freem(m); - WM_Q_EVCNT_INCR(txq, txdrop); + WM_Q_EVCNT_INCR(txq, pcqdrop); return ENOBUFS; } - /* - * XXXX NOMPSAFE: ifp->if_data should be percpu. - */ + /* XXX NOMPSAFE: ifp->if_data should be percpu. */ ifp->if_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) ifp->if_omcasts++; @@ -7325,8 +8005,9 @@ wm_nq_transmit(struct ifnet *ifp, struct * (2) contention with deferred if_start softint(wm_handle_queue()) * In the case of (1), the last packet enqueued to txq->txq_interq is * dequeued by wm_deferred_start_locked(). So, it does not get stuck. - * In the case of (2), the last packet enqueued to txq->txq_interq is also - * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either. + * In the case of (2), the last packet enqueued to txq->txq_interq is + * also dequeued by wm_deferred_start_locked(). So, it does not get + * stuck, either. */ if (mutex_tryenter(txq->txq_lock)) { if (!txq->txq_stopping) @@ -7350,11 +8031,11 @@ wm_nq_send_common_locked(struct ifnet *i { struct wm_softc *sc = ifp->if_softc; struct mbuf *m0; - struct m_tag *mtag; struct wm_txsoft *txs; bus_dmamap_t dmamap; int error, nexttx, lasttx = -1, seg, segs_needed; bool do_csum, sent; + bool remap = true; KASSERT(mutex_owned(txq->txq_lock)); @@ -7377,7 +8058,7 @@ wm_nq_send_common_locked(struct ifnet *i /* Get a work queue entry. */ if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { - wm_txeof(sc, txq); + wm_txeof(txq, UINT_MAX); if (txq->txq_sfree == 0) { DPRINTF(WM_DEBUG_TX, ("%s: TX: no free job descriptors\n", @@ -7410,11 +8091,23 @@ wm_nq_send_common_locked(struct ifnet *i * since we can't sanely copy a jumbo packet to a single * buffer. */ +retry: error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_WRITE | BUS_DMA_NOWAIT); - if (error) { + if (__predict_false(error)) { if (error == EFBIG) { - WM_Q_EVCNT_INCR(txq, txdrop); + if (remap == true) { + struct mbuf *m; + + remap = false; + m = m_defrag(m0, M_NOWAIT); + if (m != NULL) { + WM_Q_EVCNT_INCR(txq, defrag); + m0 = m; + goto retry; + } + } + WM_Q_EVCNT_INCR(txq, toomanyseg); log(LOG_ERR, "%s: Tx packet consumes too many " "DMA segments, dropping...\n", device_xname(sc->sc_dev)); @@ -7425,7 +8118,7 @@ wm_nq_send_common_locked(struct ifnet *i /* Short on resources, just stop for now. */ DPRINTF(WM_DEBUG_TX, ("%s: TX: dmamap load failed: %d\n", - device_xname(sc->sc_dev), error)); + device_xname(sc->sc_dev), error)); break; } @@ -7433,7 +8126,7 @@ wm_nq_send_common_locked(struct ifnet *i /* * Ensure we have enough descriptors free to describe - * the packet. Note, we always reserve one descriptor + * the packet. Note, we always reserve one descriptor * at the end of the ring due to the semantics of the * TDT register, plus one more in the event we need * to load offload context. @@ -7443,13 +8136,13 @@ wm_nq_send_common_locked(struct ifnet *i * Not enough free descriptors to transmit this * packet. We haven't committed anything yet, * so just unload the DMA map, put the packet - * pack on the queue, and punt. Notify the upper + * pack on the queue, and punt. Notify the upper * layer that there are no more slots left. */ DPRINTF(WM_DEBUG_TX, ("%s: TX: need %d (%d) descriptors, have %d\n", - device_xname(sc->sc_dev), dmamap->dm_nsegs, - segs_needed, txq->txq_free - 1)); + device_xname(sc->sc_dev), dmamap->dm_nsegs, + segs_needed, txq->txq_free - 1)); if (!is_transmit) ifp->if_flags |= IFF_OACTIVE; txq->txq_flags |= WM_TXQ_NO_SPACE; @@ -7481,7 +8174,7 @@ wm_nq_send_common_locked(struct ifnet *i /* Set up offload parameters for this packet. */ uint32_t cmdlen, fields, dcmdlen; - if (m0->m_pkthdr.csum_flags & + if (m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { @@ -7504,25 +8197,24 @@ wm_nq_send_common_locked(struct ifnet *i /* Initialize the first transmit descriptor. */ nexttx = txq->txq_next; if (!do_csum) { - /* setup a legacy descriptor */ + /* Setup a legacy descriptor */ wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr, dmamap->dm_segs[0].ds_addr); txq->txq_descs[nexttx].wtx_cmdlen = htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; - if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != - NULL) { + if (vlan_has_tag(m0)) { txq->txq_descs[nexttx].wtx_cmdlen |= htole32(WTX_CMD_VLE); txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = - htole16(VLAN_TAG_VALUE(mtag) & 0xffff); - } else { + htole16(vlan_get_tag(m0)); + } else txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; - } + dcmdlen = 0; } else { - /* setup an advanced data descriptor */ + /* Setup an advanced data descriptor */ txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = htole64(dmamap->dm_segs[0].ds_addr); KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); @@ -7532,22 +8224,22 @@ wm_nq_send_common_locked(struct ifnet *i htole32(fields); DPRINTF(WM_DEBUG_TX, ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", - device_xname(sc->sc_dev), nexttx, - (uint64_t)dmamap->dm_segs[0].ds_addr)); + device_xname(sc->sc_dev), nexttx, + (uint64_t)dmamap->dm_segs[0].ds_addr)); DPRINTF(WM_DEBUG_TX, ("\t 0x%08x%08x\n", fields, - (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); + (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; } lasttx = nexttx; nexttx = WM_NEXTTX(txq, nexttx); /* - * fill in the next descriptors. legacy or adcanced format + * Fill in the next descriptors. legacy or advanced format * is the same here */ for (seg = 1; seg < dmamap->dm_nsegs; - seg++, nexttx = WM_NEXTTX(txq, nexttx)) { + seg++, nexttx = WM_NEXTTX(txq, nexttx)) { txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = htole64(dmamap->dm_segs[seg].ds_addr); txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = @@ -7557,18 +8249,17 @@ wm_nq_send_common_locked(struct ifnet *i lasttx = nexttx; DPRINTF(WM_DEBUG_TX, - ("%s: TX: desc %d: %#" PRIx64 ", " - "len %#04zx\n", - device_xname(sc->sc_dev), nexttx, - (uint64_t)dmamap->dm_segs[seg].ds_addr, - dmamap->dm_segs[seg].ds_len)); + ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n", + device_xname(sc->sc_dev), nexttx, + (uint64_t)dmamap->dm_segs[seg].ds_addr, + dmamap->dm_segs[seg].ds_len)); } KASSERT(lasttx != -1); /* * Set up the command byte on the last descriptor of - * the packet. If we're in the interrupt delay window, + * the packet. If we're in the interrupt delay window, * delay the interrupt. */ KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == @@ -7595,7 +8286,7 @@ wm_nq_send_common_locked(struct ifnet *i DPRINTF(WM_DEBUG_TX, ("%s: TX: finished transmitting packet, job %d\n", - device_xname(sc->sc_dev), txq->txq_snext)); + device_xname(sc->sc_dev), txq->txq_snext)); /* Advance the tx pointer. */ txq->txq_free -= txs->txs_ndesc; @@ -7612,7 +8303,7 @@ wm_nq_send_common_locked(struct ifnet *i if (!is_transmit) ifp->if_flags |= IFF_OACTIVE; txq->txq_flags |= WM_TXQ_NO_SPACE; - WM_Q_EVCNT_INCR(txq, txdrop); + WM_Q_EVCNT_INCR(txq, descdrop); DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__)); m_freem(m0); @@ -7627,7 +8318,8 @@ wm_nq_send_common_locked(struct ifnet *i if (sent) { /* Set a watchdog timer in case the chip flakes out. */ - ifp->if_timer = 5; + txq->txq_lastsent = time_uptime; + txq->txq_sending = true; } } @@ -7666,24 +8358,25 @@ wm_deferred_start_locked(struct wm_txque * * Helper; handle transmit interrupts. */ -static int -wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq) +static bool +wm_txeof(struct wm_txqueue *txq, u_int limit) { + struct wm_softc *sc = txq->txq_sc; struct ifnet *ifp = &sc->sc_ethercom.ec_if; struct wm_txsoft *txs; - bool processed = false; int count = 0; int i; uint8_t status; struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); + bool more = false; KASSERT(mutex_owned(txq->txq_lock)); if (txq->txq_stopping) - return 0; + return false; txq->txq_flags &= ~WM_TXQ_NO_SPACE; - /* for ALTQ and legacy(not use multiqueue) ethernet controller */ + /* For ALTQ and legacy(not use multiqueue) ethernet controller */ if (wmq->wmq_id == 0) ifp->if_flags &= ~IFF_OACTIVE; @@ -7693,6 +8386,14 @@ wm_txeof(struct wm_softc *sc, struct wm_ */ for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { + if (limit-- == 0) { + more = true; + DPRINTF(WM_DEBUG_TX, + ("%s: TX: loop limited, job %d is not processed\n", + device_xname(sc->sc_dev), i)); + break; + } + txs = &txq->txq_soft[i]; DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n", @@ -7709,7 +8410,6 @@ wm_txeof(struct wm_softc *sc, struct wm_ break; } - processed = true; count++; DPRINTF(WM_DEBUG_TX, ("%s: TX: job %d done: descs %d..%d\n", @@ -7724,16 +8424,30 @@ wm_txeof(struct wm_softc *sc, struct wm_ #ifdef WM_EVENT_COUNTERS if (status & WTX_ST_TU) - WM_Q_EVCNT_INCR(txq, tu); + WM_Q_EVCNT_INCR(txq, underrun); #endif /* WM_EVENT_COUNTERS */ - if (status & (WTX_ST_EC | WTX_ST_LC)) { + /* + * 82574 and newer's document says the status field has neither + * EC (Excessive Collision) bit nor LC (Late Collision) bit + * (reserved). Refer "PCIe GbE Controller Open Source Software + * Developer's Manual", 82574 datasheet and newer. + * + * XXX I saw the LC bit was set on I218 even though the media + * was full duplex, so the bit might be used for other + * meaning ...(I have no document). + */ + + if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0) + && ((sc->sc_type < WM_T_82574) + || (sc->sc_type == WM_T_80003))) { ifp->if_oerrors++; if (status & WTX_ST_LC) log(LOG_WARNING, "%s: late collision\n", device_xname(sc->sc_dev)); else if (status & WTX_ST_EC) { - ifp->if_collisions += 16; + ifp->if_collisions += + TX_COLLISION_THRESHOLD + 1; log(LOG_WARNING, "%s: excessive collisions\n", device_xname(sc->sc_dev)); } @@ -7756,17 +8470,14 @@ wm_txeof(struct wm_softc *sc, struct wm_ DPRINTF(WM_DEBUG_TX, ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); - if (count != 0) - rnd_add_uint32(&sc->rnd_source, count); - /* * If there are no more pending transmissions, cancel the watchdog * timer. */ if (txq->txq_sfree == WM_TXQUEUELEN(txq)) - ifp->if_timer = 0; + txq->txq_sending = false; - return processed; + return more; } static inline uint32_t @@ -7891,18 +8602,22 @@ wm_rxdesc_has_errors(struct wm_rxqueue * { struct wm_softc *sc = rxq->rxq_sc; - /* XXXX missing error bit for newqueue? */ + /* XXX missing error bit for newqueue? */ if (wm_rxdesc_is_set_error(sc, errors, - WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE, - EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE, + WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE, + EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ + | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE, NQRXC_ERROR_RXE)) { - if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0)) + if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, + EXTRXC_ERROR_SE, 0)) log(LOG_WARNING, "%s: symbol error\n", device_xname(sc->sc_dev)); - else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0)) + else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, + EXTRXC_ERROR_SEQ, 0)) log(LOG_WARNING, "%s: receive sequence error\n", device_xname(sc->sc_dev)); - else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0)) + else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, + EXTRXC_ERROR_CE, 0)) log(LOG_WARNING, "%s: CRC error\n", device_xname(sc->sc_dev)); return true; @@ -7927,14 +8642,13 @@ wm_rxdesc_dd(struct wm_rxqueue *rxq, int } static inline bool -wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag, - struct mbuf *m) +wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, + uint16_t vlantag, struct mbuf *m) { - struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if; if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) { - VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false); + vlan_set_tag(m, le16toh(vlantag)); } return true; @@ -7949,12 +8663,11 @@ wm_rxdesc_ensure_checksum(struct wm_rxqu if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) { if (wm_rxdesc_is_set_status(sc, status, WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) { - WM_Q_EVCNT_INCR(rxq, rxipsum); + WM_Q_EVCNT_INCR(rxq, ipsum); m->m_pkthdr.csum_flags |= M_CSUM_IPv4; if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE)) - m->m_pkthdr.csum_flags |= - M_CSUM_IPv4_BAD; + m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; } if (wm_rxdesc_is_set_status(sc, status, WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) { @@ -7963,14 +8676,13 @@ wm_rxdesc_ensure_checksum(struct wm_rxqu * so we just set both bits, and expect the * upper layers to deal. */ - WM_Q_EVCNT_INCR(rxq, rxtusum); + WM_Q_EVCNT_INCR(rxq, tusum); m->m_pkthdr.csum_flags |= - M_CSUM_TCPv4 | M_CSUM_UDPv4 | - M_CSUM_TCPv6 | M_CSUM_UDPv6; - if (wm_rxdesc_is_set_error(sc, errors, - WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E)) - m->m_pkthdr.csum_flags |= - M_CSUM_TCP_UDP_BAD; + M_CSUM_TCPv4 | M_CSUM_UDPv4 | + M_CSUM_TCPv6 | M_CSUM_UDPv6; + if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE, + EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E)) + m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; } } } @@ -7980,7 +8692,7 @@ wm_rxdesc_ensure_checksum(struct wm_rxqu * * Helper; handle receive interrupts. */ -static void +static bool wm_rxeof(struct wm_rxqueue *rxq, u_int limit) { struct wm_softc *sc = rxq->rxq_sc; @@ -7991,12 +8703,17 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l int count = 0; uint32_t status, errors; uint16_t vlantag; + bool more = false; KASSERT(mutex_owned(rxq->rxq_lock)); for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { if (limit-- == 0) { rxq->rxq_ptr = i; + more = true; + DPRINTF(WM_DEBUG_RX, + ("%s: RX: loop limited, descriptor %d is not processed\n", + device_xname(sc->sc_dev), i)); break; } @@ -8004,8 +8721,9 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l DPRINTF(WM_DEBUG_RX, ("%s: RX: checking descriptor %d\n", - device_xname(sc->sc_dev), i)); - wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + device_xname(sc->sc_dev), i)); + wm_cdrxsync(rxq, i, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); status = wm_rxdesc_get_status(rxq, i); errors = wm_rxdesc_get_errors(rxq, i); @@ -8029,13 +8747,13 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l if (__predict_false(rxq->rxq_discard)) { DPRINTF(WM_DEBUG_RX, ("%s: RX: discarding contents of descriptor %d\n", - device_xname(sc->sc_dev), i)); + device_xname(sc->sc_dev), i)); wm_init_rxdesc(rxq, i); if (wm_rxdesc_is_eop(rxq, status)) { /* Reset our state. */ DPRINTF(WM_DEBUG_RX, ("%s: RX: resetting rxdiscard -> 0\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); rxq->rxq_discard = 0; } continue; @@ -8068,7 +8786,7 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l DPRINTF(WM_DEBUG_RX, ("%s: RX: Rx buffer allocation failed, " "dropping packet%s\n", device_xname(sc->sc_dev), - rxq->rxq_discard ? " (discard)" : "")); + rxq->rxq_discard ? " (discard)" : "")); continue; } @@ -8076,19 +8794,19 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l rxq->rxq_len += len; DPRINTF(WM_DEBUG_RX, ("%s: RX: buffer at %p len %d\n", - device_xname(sc->sc_dev), m->m_data, len)); + device_xname(sc->sc_dev), m->m_data, len)); /* If this is not the end of the packet, keep looking. */ if (!wm_rxdesc_is_eop(rxq, status)) { WM_RXCHAIN_LINK(rxq, m); DPRINTF(WM_DEBUG_RX, ("%s: RX: not yet EOP, rxlen -> %d\n", - device_xname(sc->sc_dev), rxq->rxq_len)); + device_xname(sc->sc_dev), rxq->rxq_len)); continue; } /* - * Okay, we have the entire packet now. The chip is + * Okay, we have the entire packet now. The chip is * configured to include the FCS except I350 and I21[01] * (not all chips can be configured to strip it), * so we need to trim it. @@ -8119,7 +8837,7 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l DPRINTF(WM_DEBUG_RX, ("%s: RX: have entire packet, len -> %d\n", - device_xname(sc->sc_dev), len)); + device_xname(sc->sc_dev), len)); /* If an error occurred, update stats and drop the packet. */ if (wm_rxdesc_has_errors(rxq, errors)) { @@ -8165,11 +8883,10 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l break; } - if (count != 0) - rnd_add_uint32(&sc->rnd_source, count); - DPRINTF(WM_DEBUG_RX, ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); + + return more; } /* @@ -8180,123 +8897,136 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l static void wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) { + uint32_t status, reg; + bool link; KASSERT(WM_CORE_LOCKED(sc)); DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), __func__)); - if (icr & ICR_LSC) { - uint32_t reg; - uint32_t status = CSR_READ(sc, WMREG_STATUS); - - if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0)) - wm_gig_downshift_workaround_ich8lan(sc); - - DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", + if ((icr & ICR_LSC) == 0) { + if (icr & ICR_RXSEQ) + DPRINTF(WM_DEBUG_LINK, + ("%s: LINK Receive sequence error\n", + device_xname(sc->sc_dev))); + return; + } + /* Link status changed */ + status = CSR_READ(sc, WMREG_STATUS); + link = status & STATUS_LU; + if (link) { + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", + device_xname(sc->sc_dev), + (status & STATUS_FD) ? "FDX" : "HDX")); + } else { + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", device_xname(sc->sc_dev))); - mii_pollstat(&sc->sc_mii); - if (sc->sc_type == WM_T_82543) { - int miistatus, active; + } + if ((sc->sc_type == WM_T_ICH8) && (link == false)) + wm_gig_downshift_workaround_ich8lan(sc); - /* - * With 82543, we need to force speed and - * duplex on the MAC equal to what the PHY - * speed and duplex configuration is. - */ - miistatus = sc->sc_mii.mii_media_status; + if ((sc->sc_type == WM_T_ICH8) + && (sc->sc_phytype == WMPHY_IGP_3)) { + wm_kmrn_lock_loss_workaround_ich8lan(sc); + } + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", + device_xname(sc->sc_dev))); + mii_pollstat(&sc->sc_mii); + if (sc->sc_type == WM_T_82543) { + int miistatus, active; - if (miistatus & IFM_ACTIVE) { - active = sc->sc_mii.mii_media_active; - sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); - switch (IFM_SUBTYPE(active)) { - case IFM_10_T: - sc->sc_ctrl |= CTRL_SPEED_10; - break; - case IFM_100_TX: - sc->sc_ctrl |= CTRL_SPEED_100; - break; - case IFM_1000_T: - sc->sc_ctrl |= CTRL_SPEED_1000; - break; - default: - /* - * fiber? - * Shoud not enter here. - */ - printf("unknown media (%x)\n", active); - break; - } - if (active & IFM_FDX) - sc->sc_ctrl |= CTRL_FD; - CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); - } - } else if ((sc->sc_type == WM_T_ICH8) - && (sc->sc_phytype == WMPHY_IGP_3)) { - wm_kmrn_lock_loss_workaround_ich8lan(sc); - } else if (sc->sc_type == WM_T_PCH) { - wm_k1_gig_workaround_hv(sc, - ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); - } - - if ((sc->sc_phytype == WMPHY_82578) - && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) - == IFM_1000_T)) { - - if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { - delay(200*1000); /* XXX too big */ - - /* Link stall fix for link up */ - wm_gmii_hv_writereg(sc->sc_dev, 1, - HV_MUX_DATA_CTRL, - HV_MUX_DATA_CTRL_GEN_TO_MAC - | HV_MUX_DATA_CTRL_FORCE_SPEED); - wm_gmii_hv_writereg(sc->sc_dev, 1, - HV_MUX_DATA_CTRL, - HV_MUX_DATA_CTRL_GEN_TO_MAC); + /* + * With 82543, we need to force speed and + * duplex on the MAC equal to what the PHY + * speed and duplex configuration is. + */ + miistatus = sc->sc_mii.mii_media_status; + + if (miistatus & IFM_ACTIVE) { + active = sc->sc_mii.mii_media_active; + sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); + switch (IFM_SUBTYPE(active)) { + case IFM_10_T: + sc->sc_ctrl |= CTRL_SPEED_10; + break; + case IFM_100_TX: + sc->sc_ctrl |= CTRL_SPEED_100; + break; + case IFM_1000_T: + sc->sc_ctrl |= CTRL_SPEED_1000; + break; + default: + /* + * Fiber? + * Shoud not enter here. + */ + device_printf(sc->sc_dev, + "unknown media (%x)\n", active); + break; } + if (active & IFM_FDX) + sc->sc_ctrl |= CTRL_FD; + CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); } + } else if (sc->sc_type == WM_T_PCH) { + wm_k1_gig_workaround_hv(sc, + ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); + } + + /* + * I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if (sc->sc_type >= WM_T_PCH_LPT) { + reg = CSR_READ(sc, WMREG_FEXTNVM4); + reg &= ~FEXTNVM4_BEACON_DURATION; + reg |= FEXTNVM4_BEACON_DURATION_8US; + CSR_WRITE(sc, WMREG_FEXTNVM4, reg); + } + + /* Work-around I218 hang issue */ + if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) || + (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) || + (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) || + (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3)) + wm_k1_workaround_lpt_lp(sc, link); + + if (sc->sc_type >= WM_T_PCH_LPT) { /* - * I217 Packet Loss issue: - * ensure that FEXTNVM4 Beacon Duration is set correctly - * on power up. - * Set the Beacon Duration for I217 to 8 usec + * Set platform power management values for Latency + * Tolerance Reporting (LTR) */ - if ((sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) { - reg = CSR_READ(sc, WMREG_FEXTNVM4); - reg &= ~FEXTNVM4_BEACON_DURATION; - reg |= FEXTNVM4_BEACON_DURATION_8US; - CSR_WRITE(sc, WMREG_FEXTNVM4, reg); - } + wm_platform_pm_pch_lpt(sc, + ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); + } - /* XXX Work-around I218 hang issue */ - /* e1000_k1_workaround_lpt_lp() */ + /* FEXTNVM6 K1-off workaround */ + if (sc->sc_type == WM_T_PCH_SPT) { + reg = CSR_READ(sc, WMREG_FEXTNVM6); + if (CSR_READ(sc, WMREG_PCIEANACFG) + & FEXTNVM6_K1_OFF_ENABLE) + reg |= FEXTNVM6_K1_OFF_ENABLE; + else + reg &= ~FEXTNVM6_K1_OFF_ENABLE; + CSR_WRITE(sc, WMREG_FEXTNVM6, reg); + } - if ((sc->sc_type == WM_T_PCH_LPT) - || (sc->sc_type == WM_T_PCH_SPT)) { - /* - * Set platform power management values for Latency - * Tolerance Reporting (LTR) - */ - wm_platform_pm_pch_lpt(sc, - ((sc->sc_mii.mii_media_status & IFM_ACTIVE) - != 0)); - } + if (!link) + return; - /* FEXTNVM6 K1-off workaround */ - if (sc->sc_type == WM_T_PCH_SPT) { - reg = CSR_READ(sc, WMREG_FEXTNVM6); - if (CSR_READ(sc, WMREG_PCIEANACFG) - & FEXTNVM6_K1_OFF_ENABLE) - reg |= FEXTNVM6_K1_OFF_ENABLE; - else - reg &= ~FEXTNVM6_K1_OFF_ENABLE; - CSR_WRITE(sc, WMREG_FEXTNVM6, reg); - } - } else if (icr & ICR_RXSEQ) { - DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n", - device_xname(sc->sc_dev))); + switch (sc->sc_type) { + case WM_T_PCH2: + wm_k1_workaround_lv(sc); + /* FALLTHROUGH */ + case WM_T_PCH: + if (sc->sc_phytype == WMPHY_82578) + wm_link_stall_workaround_hv(sc); + break; + default: + break; } } @@ -8316,10 +9046,11 @@ wm_linkintr_tbi(struct wm_softc *sc, uin status = CSR_READ(sc, WMREG_STATUS); if (icr & ICR_LSC) { + wm_check_for_link(sc); if (status & STATUS_LU) { DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", - device_xname(sc->sc_dev), - (status & STATUS_FD) ? "FDX" : "HDX")); + device_xname(sc->sc_dev), + (status & STATUS_FD) ? "FDX" : "HDX")); /* * NOTE: CTRL will update TFCE and RFCE automatically, * so we should update sc->sc_ctrl @@ -8338,23 +9069,20 @@ wm_linkintr_tbi(struct wm_softc *sc, uin sc->sc_fcrtl |= FCRTL_XONE; CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? - WMREG_OLD_FCRTL : WMREG_FCRTL, - sc->sc_fcrtl); + WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); sc->sc_tbi_linkup = 1; if_link_state_change(ifp, LINK_STATE_UP); } else { DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); sc->sc_tbi_linkup = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } /* Update LED */ wm_tbi_serdes_set_linkled(sc); - } else if (icr & ICR_RXSEQ) { - DPRINTF(WM_DEBUG_LINK, - ("%s: LINK: Receive sequence error\n", - device_xname(sc->sc_dev))); - } + } else if (icr & ICR_RXSEQ) + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", + device_xname(sc->sc_dev))); } /* @@ -8427,11 +9155,9 @@ wm_linkintr_serdes(struct wm_softc *sc, } /* Update LED */ wm_tbi_serdes_set_linkled(sc); - } else { - DPRINTF(WM_DEBUG_LINK, - ("%s: LINK: Receive sequence error\n", + } else + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", device_xname(sc->sc_dev))); - } } /* @@ -8448,7 +9174,7 @@ wm_linkintr(struct wm_softc *sc, uint32_ if (sc->sc_flags & WM_F_HAS_MII) wm_linkintr_gmii(sc, icr); else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) - && (sc->sc_type >= WM_T_82575)) + && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))) wm_linkintr_serdes(sc, icr); else wm_linkintr_tbi(sc, icr); @@ -8469,12 +9195,13 @@ wm_intr_legacy(void *arg) uint32_t icr, rndval = 0; int handled = 0; - DPRINTF(WM_DEBUG_TX, - ("%s: INTx: got intr\n", device_xname(sc->sc_dev))); while (1 /* CONSTCOND */) { icr = CSR_READ(sc, WMREG_ICR); if ((icr & sc->sc_icr) == 0) break; + if (handled == 0) + DPRINTF(WM_DEBUG_TX, + ("%s: INTx: got intr\n",device_xname(sc->sc_dev))); if (rndval == 0) rndval = icr; @@ -8491,12 +9218,19 @@ wm_intr_legacy(void *arg) if (icr & (ICR_RXDMT0 | ICR_RXT0)) { DPRINTF(WM_DEBUG_RX, ("%s: RX: got Rx intr 0x%08x\n", - device_xname(sc->sc_dev), - icr & (ICR_RXDMT0 | ICR_RXT0))); - WM_Q_EVCNT_INCR(rxq, rxintr); + device_xname(sc->sc_dev), + icr & (ICR_RXDMT0 | ICR_RXT0))); + WM_Q_EVCNT_INCR(rxq, intr); } #endif + /* + * wm_rxeof() does *not* call upper layer functions directly, + * as if_percpuq_enqueue() just call softint_schedule(). + * So, we can call wm_rxeof() in interrupt context. + */ wm_rxeof(rxq, UINT_MAX); + /* Fill lower bits with RX index. See below for the upper. */ + rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK; mutex_exit(rxq->rxq_lock); mutex_enter(txq->txq_lock); @@ -8510,11 +9244,13 @@ wm_intr_legacy(void *arg) if (icr & ICR_TXDW) { DPRINTF(WM_DEBUG_TX, ("%s: TX: got TXDW interrupt\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); WM_Q_EVCNT_INCR(txq, txdw); } #endif - wm_txeof(sc, txq); + wm_txeof(txq, UINT_MAX); + /* Fill upper bits with TX index. See above for the lower. */ + rndval = txq->txq_next * WM_NRXDESC; mutex_exit(txq->txq_lock); WM_CORE_LOCK(sc); @@ -8539,7 +9275,7 @@ wm_intr_legacy(void *arg) } } - rnd_add_uint32(&sc->rnd_source, rndval); + rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval); if (handled) { /* Try to get more packets going. */ @@ -8555,9 +9291,11 @@ wm_txrxintr_disable(struct wm_queue *wmq struct wm_softc *sc = wmq->wmq_txq.txq_sc; if (sc->sc_type == WM_T_82574) - CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); + CSR_WRITE(sc, WMREG_IMC, + ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); else if (sc->sc_type == WM_T_82575) - CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); + CSR_WRITE(sc, WMREG_EIMC, + EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); else CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); } @@ -8569,10 +9307,18 @@ wm_txrxintr_enable(struct wm_queue *wmq) wm_itrs_calculate(sc, wmq); + /* + * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here. + * There is no need to care about which of RXQ(0) and RXQ(1) enable + * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled + * while each wm_handle_queue(wmq) is runnig. + */ if (sc->sc_type == WM_T_82574) - CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); + CSR_WRITE(sc, WMREG_IMS, + ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER); else if (sc->sc_type == WM_T_82575) - CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); + CSR_WRITE(sc, WMREG_EIMS, + EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); else CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); } @@ -8584,7 +9330,11 @@ wm_txrxintr_msix(void *arg) struct wm_txqueue *txq = &wmq->wmq_txq; struct wm_rxqueue *rxq = &wmq->wmq_rxq; struct wm_softc *sc = txq->txq_sc; - u_int limit = sc->sc_rx_intr_process_limit; + u_int txlimit = sc->sc_tx_intr_process_limit; + u_int rxlimit = sc->sc_rx_intr_process_limit; + uint32_t rndval = 0; + bool txmore; + bool rxmore; KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); @@ -8601,7 +9351,9 @@ wm_txrxintr_msix(void *arg) } WM_Q_EVCNT_INCR(txq, txdw); - wm_txeof(sc, txq); + txmore = wm_txeof(txq, txlimit); + /* Fill upper bits with TX index. See below for the lower. */ + rndval = txq->txq_next * WM_NRXDESC; /* wm_deferred start() is done in wm_handle_queue(). */ mutex_exit(txq->txq_lock); @@ -8614,13 +9366,26 @@ wm_txrxintr_msix(void *arg) return 0; } - WM_Q_EVCNT_INCR(rxq, rxintr); - wm_rxeof(rxq, limit); + WM_Q_EVCNT_INCR(rxq, intr); + rxmore = wm_rxeof(rxq, rxlimit); + + /* Fill lower bits with RX index. See above for the upper. */ + rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK; mutex_exit(rxq->rxq_lock); wm_itrs_writereg(sc, wmq); - softint_schedule(wmq->wmq_si); + /* + * This function is called in the hardware interrupt context and + * per-CPU, so it's not required to take a lock. + */ + if (rndval != 0) + rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval); + + if (txmore || rxmore) + softint_schedule(wmq->wmq_si); + else + wm_txrxintr_enable(wmq); return 1; } @@ -8632,14 +9397,17 @@ wm_handle_queue(void *arg) struct wm_txqueue *txq = &wmq->wmq_txq; struct wm_rxqueue *rxq = &wmq->wmq_rxq; struct wm_softc *sc = txq->txq_sc; - u_int limit = sc->sc_rx_process_limit; + u_int txlimit = sc->sc_tx_process_limit; + u_int rxlimit = sc->sc_rx_process_limit; + bool txmore; + bool rxmore; mutex_enter(txq->txq_lock); if (txq->txq_stopping) { mutex_exit(txq->txq_lock); return; } - wm_txeof(sc, txq); + txmore = wm_txeof(txq, txlimit); wm_deferred_start_locked(txq); mutex_exit(txq->txq_lock); @@ -8648,12 +9416,15 @@ wm_handle_queue(void *arg) mutex_exit(rxq->rxq_lock); return; } - WM_Q_EVCNT_INCR(rxq, rxintr); - wm_rxeof(rxq, limit); + WM_Q_EVCNT_INCR(rxq, defer); + rxmore = wm_rxeof(rxq, rxlimit); mutex_exit(rxq->rxq_lock); - wm_txrxintr_enable(wmq); -} + if (txmore || rxmore) + softint_schedule(wmq->wmq_si); + else + wm_txrxintr_enable(wmq); +} /* * wm_linkintr_msix: @@ -8665,24 +9436,59 @@ wm_linkintr_msix(void *arg) { struct wm_softc *sc = arg; uint32_t reg; + bool has_rxo; DPRINTF(WM_DEBUG_LINK, ("%s: LINK: got link intr\n", device_xname(sc->sc_dev))); reg = CSR_READ(sc, WMREG_ICR); WM_CORE_LOCK(sc); - if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0)) + if (sc->sc_core_stopping) goto out; - WM_EVCNT_INCR(&sc->sc_ev_linkintr); - wm_linkintr(sc, ICR_LSC); + if ((reg & ICR_LSC) != 0) { + WM_EVCNT_INCR(&sc->sc_ev_linkintr); + wm_linkintr(sc, ICR_LSC); + } + + /* + * XXX 82574 MSI-X mode workaround + * + * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER + * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor + * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1) + * interrupts by writing WMREG_ICS to process receive packets. + */ + if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { +#if defined(WM_DEBUG) + log(LOG_WARNING, "%s: Receive overrun\n", + device_xname(sc->sc_dev)); +#endif /* defined(WM_DEBUG) */ + + has_rxo = true; + /* + * The RXO interrupt is very high rate when receive traffic is + * high rate. We use polling mode for ICR_OTHER like Tx/Rx + * interrupts. ICR_OTHER will be enabled at the end of + * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and + * ICR_RXQ(1) interrupts. + */ + CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); + + CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1)); + } + + out: WM_CORE_UNLOCK(sc); - - if (sc->sc_type == WM_T_82574) - CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); - else if (sc->sc_type == WM_T_82575) + + if (sc->sc_type == WM_T_82574) { + if (!has_rxo) + CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); + else + CSR_WRITE(sc, WMREG_IMS, ICR_LSC); + } else if (sc->sc_type == WM_T_82575) CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); else CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); @@ -8773,7 +9579,7 @@ wm_gmii_reset(struct wm_softc *sc) #endif delay(20*1000); /* XXX extra delay to get PHY ID? */ break; - case WM_T_82544: /* reset 10000us */ + case WM_T_82544: /* Reset 10000us */ case WM_T_82540: case WM_T_82545: case WM_T_82545_3: @@ -8783,7 +9589,7 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_82541_2: case WM_T_82547: case WM_T_82547_2: - case WM_T_82571: /* reset 100us */ + case WM_T_82571: /* Reset 100us */ case WM_T_82572: case WM_T_82573: case WM_T_82574: @@ -8796,7 +9602,7 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_I211: case WM_T_82583: case WM_T_80003: - /* generic reset */ + /* Generic reset */ CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); CSR_WRITE_FLUSH(sc); delay(20000); @@ -8808,7 +9614,7 @@ wm_gmii_reset(struct wm_softc *sc) || (sc->sc_type == WM_T_82541_2) || (sc->sc_type == WM_T_82547) || (sc->sc_type == WM_T_82547_2)) { - /* workaround for igp are done in igp_reset() */ + /* Workaround for igp are done in igp_reset() */ /* XXX add code to set LED after phy reset */ } break; @@ -8819,7 +9625,8 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: - /* generic reset */ + case WM_T_PCH_CNP: + /* Generic reset */ CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); CSR_WRITE_FLUSH(sc); delay(100); @@ -8838,7 +9645,7 @@ wm_gmii_reset(struct wm_softc *sc) /* get_cfg_done */ wm_get_cfg_done(sc); - /* extra setup */ + /* Extra setup */ switch (sc->sc_type) { case WM_T_82542_2_0: case WM_T_82542_2_1: @@ -8854,6 +9661,8 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_82571: case WM_T_82572: case WM_T_82573: + case WM_T_82574: + case WM_T_82583: case WM_T_82575: case WM_T_82576: case WM_T_82580: @@ -8862,11 +9671,7 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_I210: case WM_T_I211: case WM_T_80003: - /* null */ - break; - case WM_T_82574: - case WM_T_82583: - wm_lplu_d0_disable(sc); + /* Null */ break; case WM_T_82541: case WM_T_82547: @@ -8879,34 +9684,8 @@ wm_gmii_reset(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: - /* Allow time for h/w to get to a quiescent state afer reset */ - delay(10*1000); - - if (sc->sc_type == WM_T_PCH) - wm_hv_phy_workaround_ich8lan(sc); - - if (sc->sc_type == WM_T_PCH2) - wm_lv_phy_workaround_ich8lan(sc); - - /* Clear the host wakeup bit after lcd reset */ - if (sc->sc_type >= WM_T_PCH) { - reg = wm_gmii_hv_readreg(sc->sc_dev, 2, - BM_PORT_GEN_CFG); - reg &= ~BM_WUC_HOST_WU_BIT; - wm_gmii_hv_writereg(sc->sc_dev, 2, - BM_PORT_GEN_CFG, reg); - } - - /* - * XXX Configure the LCD with th extended configuration region - * in NVM - */ - - /* Disable D0 LPLU. */ - if (sc->sc_type >= WM_T_PCH) /* PCH* */ - wm_lplu_d0_disable_pch(sc); - else - wm_lplu_d0_disable(sc); /* ICH* */ + case WM_T_PCH_CNP: + wm_phy_post_reset(sc); break; default: panic("%s: unknown type\n", __func__); @@ -8926,7 +9705,7 @@ wm_gmii_reset(struct wm_softc *sc) * result might be incorrect. * * In the second call, PHY OUI and model is used to identify PHY type. - * It might not be perfpect because of the lack of compared entry, but it + * It might not be perfect because of the lack of compared entry, but it * would be better than the first call. * * If the detected new result and previous assumption is different, @@ -8943,6 +9722,9 @@ wm_gmii_setup_phytype(struct wm_softc *s mii_readreg_t new_readreg; mii_writereg_t new_writereg; + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + if (mii->mii_readreg == NULL) { /* * This is the first call of this function. For ICH and PCH @@ -8983,7 +9765,7 @@ wm_gmii_setup_phytype(struct wm_softc *s } else { /* It's not the first call. Use PHY OUI and model */ switch (phy_oui) { - case MII_OUI_ATHEROS: /* XXX ??? */ + case MII_OUI_ATTANSIC: /* XXX ??? */ switch (phy_model) { case 0x0004: /* XXX */ new_phytype = WMPHY_82578; @@ -9060,8 +9842,9 @@ wm_gmii_setup_phytype(struct wm_softc *s break; } if (new_phytype == WMPHY_UNKNOWN) - aprint_verbose_dev(dev, "%s: unknown PHY model\n", - __func__); + aprint_verbose_dev(dev, + "%s: unknown PHY model. OUI=%06x, model=%04x\n", + __func__, phy_oui, phy_model); if ((sc->sc_phytype != WMPHY_UNKNOWN) && (sc->sc_phytype != new_phytype )) { @@ -9129,7 +9912,7 @@ wm_gmii_setup_phytype(struct wm_softc *s new_readreg = wm_gmii_bm_readreg; new_writereg = wm_gmii_bm_writereg; } - if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) { + if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) { /* All PCH* use _hv_ */ new_readreg = wm_gmii_hv_readreg; new_writereg = wm_gmii_hv_writereg; @@ -9152,11 +9935,18 @@ wm_gmii_setup_phytype(struct wm_softc *s if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg)) aprint_error_dev(dev, "Previously assumed PHY read/write " "function was incorrect.\n"); - + /* Update now */ sc->sc_phytype = new_phytype; mii->mii_readreg = new_readreg; mii->mii_writereg = new_writereg; + if (new_readreg == wm_gmii_hv_readreg) { + sc->phy.readreg_locked = wm_gmii_hv_readreg_locked; + sc->phy.writereg_locked = wm_gmii_hv_writereg_locked; + } else if (new_readreg == wm_gmii_i82544_readreg) { + sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked; + sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked; + } } /* @@ -9243,18 +10033,13 @@ wm_gmii_mediainit(struct wm_softc *sc, p /* Initialize our media structures and probe the GMII. */ mii->mii_ifp = ifp; - /* - * The first call of wm_mii_setup_phytype. The result might be - * incorrect. - */ - wm_gmii_setup_phytype(sc, 0, 0); - mii->mii_statchg = wm_gmii_statchg; /* get PHY control from SMBus to PCIe */ if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) - || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) - wm_smbustopci(sc); + || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)) + wm_init_phy_workarounds_pchlan(sc); wm_gmii_reset(sc); @@ -9288,27 +10073,28 @@ wm_gmii_mediainit(struct wm_softc *sc, p CSR_WRITE_FLUSH(sc); delay(300*1000); /* XXX too long */ - /* from 1 to 8 */ + /* From 1 to 8 */ for (i = 1; i < 8; i++) mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, i, MII_OFFSET_ANY, MIIF_DOPAUSE); - /* restore previous sfp cage power state */ + /* Restore previous sfp cage power state */ CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); } } - } else { + } else mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); - } /* * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call * wm_set_mdio_slow_mode_hv() for a workaround and retry. */ - if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && - (LIST_FIRST(&mii->mii_phys) == NULL)) { + if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) + || (sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)) + && (LIST_FIRST(&mii->mii_phys) == NULL)) { wm_set_mdio_slow_mode_hv(sc); mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); @@ -9341,7 +10127,7 @@ wm_gmii_mediainit(struct wm_softc *sc, p /* * PHY Found! Check PHY type again by the second call of - * wm_mii_setup_phytype. + * wm_gmii_setup_phytype. */ wm_gmii_setup_phytype(sc, child->mii_mpd_oui, child->mii_mpd_model); @@ -9367,6 +10153,9 @@ wm_gmii_mediachange(struct ifnet *ifp) if ((ifp->if_flags & IFF_UP) == 0) return 0; + /* Disable D0 LPLU. */ + wm_lplu_d0_disable(sc); + sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); sc->sc_ctrl |= CTRL_SLU; if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) @@ -9387,12 +10176,16 @@ wm_gmii_mediachange(struct ifnet *ifp) case IFM_1000_T: sc->sc_ctrl |= CTRL_SPEED_1000; break; + case IFM_NONE: + /* There is no specific setting for IFM_NONE */ + break; default: panic("wm_gmii_mediachange: bad media 0x%x", ife->ifm_media); } } CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); + CSR_WRITE_FLUSH(sc); if (sc->sc_type <= WM_T_82543) wm_gmii_reset(sc); @@ -9429,7 +10222,7 @@ wm_i82543_mii_sendbits(struct wm_softc * v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); v |= MDI_DIR | CTRL_SWDPIO(3); - for (i = 1 << (nbits - 1); i != 0; i >>= 1) { + for (i = __BIT(nbits - 1); i != 0; i >>= 1) { if (data & i) v |= MDI_IO; else @@ -9497,9 +10290,9 @@ wm_i82543_mii_recvbits(struct wm_softc * * Read a PHY register on the GMII (i82543 version). */ static int -wm_gmii_i82543_readreg(device_t self, int phy, int reg) +wm_gmii_i82543_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); int rv; wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); @@ -9508,7 +10301,7 @@ wm_gmii_i82543_readreg(device_t self, in rv = wm_i82543_mii_recvbits(sc) & 0xffff; DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n", - device_xname(sc->sc_dev), phy, reg, rv)); + device_xname(dev), phy, reg, rv)); return rv; } @@ -9519,9 +10312,9 @@ wm_gmii_i82543_readreg(device_t self, in * Write a PHY register on the GMII (i82543 version). */ static void -wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) +wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | @@ -9535,38 +10328,52 @@ wm_gmii_i82543_writereg(device_t self, i * Read a PHY register on the GMII. */ static int -wm_gmii_mdic_readreg(device_t self, int phy, int reg) +wm_gmii_mdic_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint32_t mdic = 0; int i, rv; + if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) + && (reg > MII_ADDRMASK)) { + device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", + __func__, sc->sc_phytype, reg); + reg &= MII_ADDRMASK; + } + CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | MDIC_REGADD(reg)); for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { + delay(50); mdic = CSR_READ(sc, WMREG_MDIC); if (mdic & MDIC_READY) break; - delay(50); } if ((mdic & MDIC_READY) == 0) { log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", - device_xname(sc->sc_dev), phy, reg); - rv = 0; + device_xname(dev), phy, reg); + return 0; } else if (mdic & MDIC_E) { #if 0 /* This is normal if no PHY is present. */ log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", - device_xname(sc->sc_dev), phy, reg); + device_xname(dev), phy, reg); #endif - rv = 0; + return 0; } else { rv = MDIC_DATA(mdic); if (rv == 0xffff) rv = 0; } + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (sc->sc_type == WM_T_PCH2) + delay(100); + return rv; } @@ -9576,28 +10383,45 @@ wm_gmii_mdic_readreg(device_t self, int * Write a PHY register on the GMII. */ static void -wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val) +wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint32_t mdic = 0; int i; + if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) + && (reg > MII_ADDRMASK)) { + device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", + __func__, sc->sc_phytype, reg); + reg &= MII_ADDRMASK; + } + CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | MDIC_REGADD(reg) | MDIC_DATA(val)); for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { + delay(50); mdic = CSR_READ(sc, WMREG_MDIC); if (mdic & MDIC_READY) break; - delay(50); } - if ((mdic & MDIC_READY) == 0) + if ((mdic & MDIC_READY) == 0) { log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", - device_xname(sc->sc_dev), phy, reg); - else if (mdic & MDIC_E) + device_xname(dev), phy, reg); + return; + } else if (mdic & MDIC_E) { log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", - device_xname(sc->sc_dev), phy, reg); + device_xname(dev), phy, reg); + return; + } + + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (sc->sc_type == WM_T_PCH2) + delay(100); } /* @@ -9606,20 +10430,48 @@ wm_gmii_mdic_writereg(device_t self, int * Read a PHY register on the GMII. */ static int -wm_gmii_i82544_readreg(device_t self, int phy, int reg) +wm_gmii_i82544_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); - int rv; + struct wm_softc *sc = device_private(dev); + uint16_t val; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } - rv = wm_gmii_mdic_readreg(self, phy, reg); + + wm_gmii_i82544_readreg_locked(dev, phy, reg, &val); + sc->phy.release(sc); - return rv; + return val; +} + +static int +wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) +{ + struct wm_softc *sc = device_private(dev); + + if (reg > BME1000_MAX_MULTI_PAGE_REG) { + switch (sc->sc_phytype) { + case WMPHY_IGP: + case WMPHY_IGP_2: + case WMPHY_IGP_3: + wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, + reg); + break; + default: +#ifdef WM_DEBUG + device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n", + __func__, sc->sc_phytype, reg); +#endif + break; + } + } + + *val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK); + + return 0; } /* @@ -9628,18 +10480,46 @@ wm_gmii_i82544_readreg(device_t self, in * Write a PHY register on the GMII. */ static void -wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) +wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); + return; } - wm_gmii_mdic_writereg(self, phy, reg, val); + + wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val); sc->phy.release(sc); } +static int +wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val) +{ + struct wm_softc *sc = device_private(dev); + + if (reg > BME1000_MAX_MULTI_PAGE_REG) { + switch (sc->sc_phytype) { + case WMPHY_IGP: + case WMPHY_IGP_2: + case WMPHY_IGP_3: + wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, + reg); + break; + default: +#ifdef WM_DEBUG + device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x", + __func__, sc->sc_phytype, reg); +#endif + break; + } + } + + wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); + + return 0; +} + /* * wm_gmii_i80003_readreg: [mii interface function] * @@ -9648,33 +10528,49 @@ wm_gmii_i82544_writereg(device_t self, i * ressource ... */ static int -wm_gmii_i80003_readreg(device_t self, int phy, int reg) +wm_gmii_i80003_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); + int page_select, temp; int rv; - if (phy != 1) /* only one PHY on kumeran bus */ + if (phy != 1) /* Only one PHY on kumeran bus */ return 0; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } - if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) { - wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT, - reg >> GG82563_PAGE_SHIFT); - } else { - wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, - reg >> GG82563_PAGE_SHIFT); - } - /* Wait more 200us for a bug of the ready bit in the MDIC register */ - delay(200); - rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK); - delay(200); - sc->phy.release(sc); + if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else { + /* + * Use Alternative Page Select register to access registers + * 30 and 31. + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + temp = (uint16_t)reg >> GG82563_PAGE_SHIFT; + wm_gmii_mdic_writereg(dev, phy, page_select, temp); + if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { + /* + * Wait more 200us for a bug of the ready bit in the MDIC + * register. + */ + delay(200); + if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) { + device_printf(dev, "%s failed\n", __func__); + rv = 0; /* XXX */ + goto out; + } + rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK); + delay(200); + } else + rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK); +out: + sc->phy.release(sc); return rv; } @@ -9686,31 +10582,46 @@ wm_gmii_i80003_readreg(device_t self, in * ressource ... */ static void -wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) +wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); + int page_select, temp; - if (phy != 1) /* only one PHY on kumeran bus */ + if (phy != 1) /* Only one PHY on kumeran bus */ return; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } - if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) { - wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT, - reg >> GG82563_PAGE_SHIFT); - } else { - wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, - reg >> GG82563_PAGE_SHIFT); - } - /* Wait more 200us for a bug of the ready bit in the MDIC register */ - delay(200); - wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val); - delay(200); + if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else { + /* + * Use Alternative Page Select register to access registers + * 30 and 31. + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + temp = (uint16_t)reg >> GG82563_PAGE_SHIFT; + wm_gmii_mdic_writereg(dev, phy, page_select, temp); + if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { + /* + * Wait more 200us for a bug of the ready bit in the MDIC + * register. + */ + delay(200); + if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) { + device_printf(dev, "%s failed\n", __func__); + goto out; + } + wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); + delay(200); + } else + wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); +out: sc->phy.release(sc); } @@ -9722,16 +10633,15 @@ wm_gmii_i80003_writereg(device_t self, i * ressource ... */ static int -wm_gmii_bm_readreg(device_t self, int phy, int reg) +wm_gmii_bm_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint16_t page = reg >> BME1000_PAGE_SHIFT; uint16_t val; int rv; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } @@ -9740,7 +10650,7 @@ wm_gmii_bm_readreg(device_t self, int ph || (reg == 31)) ? 1 : phy; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { - wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); + wm_access_phy_wakeup_reg_bm(dev, reg, &val, true, false); rv = val; goto release; } @@ -9748,14 +10658,14 @@ wm_gmii_bm_readreg(device_t self, int ph if (reg > BME1000_MAX_MULTI_PAGE_REG) { if ((phy == 1) && (sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) - wm_gmii_mdic_writereg(self, phy, + wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); else - wm_gmii_mdic_writereg(self, phy, + wm_gmii_mdic_writereg(dev, phy, BME1000_PHY_PAGE_SELECT, page); } - rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK); + rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK); release: sc->phy.release(sc); @@ -9770,14 +10680,13 @@ release: * ressource ... */ static void -wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) +wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint16_t page = reg >> BME1000_PAGE_SHIFT; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } @@ -9789,82 +10698,180 @@ wm_gmii_bm_writereg(device_t self, int p uint16_t tmp; tmp = val; - wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); + wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false); goto release; } if (reg > BME1000_MAX_MULTI_PAGE_REG) { if ((phy == 1) && (sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) - wm_gmii_mdic_writereg(self, phy, + wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); else - wm_gmii_mdic_writereg(self, phy, + wm_gmii_mdic_writereg(dev, phy, BME1000_PHY_PAGE_SELECT, page); } - wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val); + wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); release: sc->phy.release(sc); } -static void -wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) +/* + * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @dev: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + */ +static int +wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp) { - struct wm_softc *sc = device_private(self); - uint16_t regnum = BM_PHY_REG_NUM(offset); - uint16_t wuce, reg; + uint16_t temp; - DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); - /* XXX Gig must be disabled for MDIO accesses to page 800 */ - if (sc->sc_type == WM_T_PCH) { - /* XXX e1000 driver do nothing... why? */ - } + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(dev), __func__)); - /* - * 1) Enable PHY wakeup register first. - * See e1000_enable_phy_wakeup_reg_access_bm(). - */ + if (!phy_regp) + return -1; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ - /* Set page 769 */ - wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, - BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); + /* Select Port Control Registers page */ + wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, + BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT); /* Read WUCE and save it */ - wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG); + *phy_regp = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG); - reg = wuce | BM_WUC_ENABLE_BIT; - reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); - wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg); - - /* Select page 800 */ - wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, - BM_WUC_PAGE << BME1000_PAGE_SHIFT); + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_regp; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); - /* - * 2) Access PHY wakeup register. - * See e1000_access_phy_wakeup_reg_bm. + wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp); + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page */ + wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, + BM_WUC_PAGE << IGP3_PAGE_SHIFT); - /* Write page 800 */ - wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); + return 0; +} - if (rd) - *val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE); - else - wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); +/* + * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @dev: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + */ +static int +wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp) +{ + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(dev), __func__)); + + if (!phy_regp) + return -1; + + /* Select Port Control Registers page */ + wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, + BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT); + + /* Restore 769.17 to its original value */ + wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp); + + return 0; +} + +/* + * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @sc: pointer to the HW structure + * @offset: register offset to be read or written + * @val: pointer to the data to read or write + * @rd: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by wm_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==TRUE, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()). + */ +static int +wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd, + bool page_set) +{ + struct wm_softc *sc = device_private(dev); + uint16_t regnum = BM_PHY_REG_NUM(offset); + uint16_t page = BM_PHY_REG_PAGE(offset); + uint16_t wuce; + int rv = 0; + + DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n", + device_xname(dev), __func__)); + /* XXX Gig must be disabled for MDIO accesses to page 800 */ + if ((sc->sc_type == WM_T_PCH) + && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) { + device_printf(dev, + "Attempting to access page %d while gig enabled.\n", page); + } + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce); + if (rv != 0) { + device_printf(dev, + "%s: Could not enable PHY wakeup reg access\n", + __func__); + return rv; + } + } + DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n", + device_xname(sc->sc_dev), __func__, page, regnum)); /* - * 3) Disable PHY wakeup register. - * See e1000_disable_phy_wakeup_reg_access_bm(). + * 2) Access PHY wakeup register. + * See wm_access_phy_wakeup_reg_bm. */ - /* Set page 769 */ - wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, - BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); - wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce); + /* Write the Wakeup register page offset value using opcode 0x11 */ + wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum); + + if (rd) { + /* Read the Wakeup register page value using opcode 0x12 */ + *val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val); + } + + if (!page_set) + rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); + + return rv; } /* @@ -9875,56 +10882,58 @@ wm_access_phy_wakeup_reg_bm(device_t sel * ressource ... */ static int -wm_gmii_hv_readreg(device_t self, int phy, int reg) +wm_gmii_hv_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); - int rv; + struct wm_softc *sc = device_private(dev); + uint16_t val; DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); + device_xname(dev), __func__)); if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } - rv = wm_gmii_hv_readreg_locked(self, phy, reg); + wm_gmii_hv_readreg_locked(dev, phy, reg, &val); sc->phy.release(sc); - return rv; + return val; } static int -wm_gmii_hv_readreg_locked(device_t self, int phy, int reg) +wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) { uint16_t page = BM_PHY_REG_PAGE(reg); uint16_t regnum = BM_PHY_REG_NUM(reg); - uint16_t val; - int rv; phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); - return val; - } + if (page == BM_WUC_PAGE) + return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false); /* * Lower than page 768 works differently than the rest so it has its * own func */ if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { - printf("gmii_hv_readreg!!!\n"); + device_printf(dev, "gmii_hv_readreg!!!\n"); return 0; } + /* + * XXX I21[789] documents say that the SMBus Address register is at + * PHY address 01, Page 0 (not 768), Register 26. + */ + if (page == HV_INTC_FC_PAGE_START) + page = 0; + if (regnum > BME1000_MAX_MULTI_PAGE_REG) { - wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, + wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); } - rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK); - return rv; + *val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK); + return 0; } /* @@ -9935,29 +10944,29 @@ wm_gmii_hv_readreg_locked(device_t self, * ressource ... */ static void -wm_gmii_hv_writereg(device_t self, int phy, int reg, int val) +wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); + device_xname(dev), __func__)); if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } - wm_gmii_hv_writereg_locked(self, phy, reg, val); + wm_gmii_hv_writereg_locked(dev, phy, reg, val); sc->phy.release(sc); } -static void -wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val) +static int +wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint16_t page = BM_PHY_REG_PAGE(reg); uint16_t regnum = BM_PHY_REG_NUM(reg); + int rv; phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; @@ -9966,8 +10975,8 @@ wm_gmii_hv_writereg_locked(device_t self uint16_t tmp; tmp = val; - wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); - return; + rv = wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false); + return rv; } /* @@ -9975,13 +10984,20 @@ wm_gmii_hv_writereg_locked(device_t self * own func */ if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { - printf("gmii_hv_writereg!!!\n"); - return; + device_printf(dev, "gmii_hv_writereg!!!\n"); + return -1; } { /* - * XXX Workaround MDIO accesses being disabled after entering + * XXX I21[789] documents say that the SMBus Address register + * is at PHY address 01, Page 0 (not 768), Register 26. + */ + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* + * XXX Workaround MDIO accesses being disabled after entering * IEEE Power Down (whenever bit 11 of the PHY control * register is set) */ @@ -9992,17 +11008,19 @@ wm_gmii_hv_writereg_locked(device_t self if ((child != NULL) && (child->mii_mpd_rev >= 1) && (phy == 2) && ((regnum & MII_ADDRMASK) == 0) && ((val & (1 << 11)) != 0)) { - printf("XXX need workaround\n"); + device_printf(dev, "XXX need workaround\n"); } } if (regnum > BME1000_MAX_MULTI_PAGE_REG) { - wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT, + wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); } } - wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val); + wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val); + + return 0; } /* @@ -10013,18 +11031,24 @@ wm_gmii_hv_writereg_locked(device_t self * ressource ... */ static int -wm_gmii_82580_readreg(device_t self, int phy, int reg) +wm_gmii_82580_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); int rv; if (sc->phy.acquire(sc) != 0) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } - rv = wm_gmii_mdic_readreg(self, phy, reg); +#ifdef DIAGNOSTIC + if (reg > MII_ADDRMASK) { + device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", + __func__, sc->sc_phytype, reg); + reg &= MII_ADDRMASK; + } +#endif + rv = wm_gmii_mdic_readreg(dev, phy, reg); sc->phy.release(sc); return rv; @@ -10038,17 +11062,23 @@ wm_gmii_82580_readreg(device_t self, int * ressource ... */ static void -wm_gmii_82580_writereg(device_t self, int phy, int reg, int val) +wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); if (sc->phy.acquire(sc) != 0) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } - wm_gmii_mdic_writereg(self, phy, reg, val); +#ifdef DIAGNOSTIC + if (reg > MII_ADDRMASK) { + device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", + __func__, sc->sc_phytype, reg); + reg &= MII_ADDRMASK; + } +#endif + wm_gmii_mdic_writereg(dev, phy, reg, val); sc->phy.release(sc); } @@ -10061,26 +11091,25 @@ wm_gmii_82580_writereg(device_t self, in * ressource ... */ static int -wm_gmii_gs40g_readreg(device_t self, int phy, int reg) +wm_gmii_gs40g_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); int page, offset; int rv; /* Acquire semaphore */ if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } /* Page select */ page = reg >> GS40G_PAGE_SHIFT; - wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page); + wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page); /* Read reg */ offset = reg & GS40G_OFFSET_MASK; - rv = wm_gmii_mdic_readreg(self, phy, offset); + rv = wm_gmii_mdic_readreg(dev, phy, offset); sc->phy.release(sc); return rv; @@ -10094,25 +11123,24 @@ wm_gmii_gs40g_readreg(device_t self, int * ressource ... */ static void -wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val) +wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); int page, offset; /* Acquire semaphore */ if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } /* Page select */ page = reg >> GS40G_PAGE_SHIFT; - wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page); + wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page); /* Write reg */ offset = reg & GS40G_OFFSET_MASK; - wm_gmii_mdic_writereg(self, phy, offset, val); + wm_gmii_mdic_writereg(dev, phy, offset, val); /* Release semaphore */ sc->phy.release(sc); @@ -10133,9 +11161,7 @@ wm_gmii_statchg(struct ifnet *ifp) sc->sc_tctl &= ~TCTL_COLD(0x3ff); sc->sc_fcrtl &= ~FCRTL_XONE; - /* - * Get flow control negotiation result. - */ + /* Get flow control negotiation result. */ if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; @@ -10190,7 +11216,7 @@ wm_gmii_statchg(struct ifnet *ifp) * Read a kumeran register */ static int -wm_kmrn_readreg(struct wm_softc *sc, int reg) +wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val) { int rv; @@ -10199,12 +11225,12 @@ wm_kmrn_readreg(struct wm_softc *sc, int else rv = sc->phy.acquire(sc); if (rv != 0) { - aprint_error_dev(sc->sc_dev, - "%s: failed to get semaphore\n", __func__); - return 0; + device_printf(sc->sc_dev, "%s: failed to get semaphore\n", + __func__); + return rv; } - rv = wm_kmrn_readreg_locked(sc, reg); + rv = wm_kmrn_readreg_locked(sc, reg, val); if (sc->sc_type == WM_T_80003) wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); @@ -10215,9 +11241,8 @@ wm_kmrn_readreg(struct wm_softc *sc, int } static int -wm_kmrn_readreg_locked(struct wm_softc *sc, int reg) +wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val) { - int rv; CSR_WRITE(sc, WMREG_KUMCTRLSTA, ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | @@ -10225,9 +11250,9 @@ wm_kmrn_readreg_locked(struct wm_softc * CSR_WRITE_FLUSH(sc); delay(2); - rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; + *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; - return rv; + return 0; } /* @@ -10235,8 +11260,8 @@ wm_kmrn_readreg_locked(struct wm_softc * * * Write a kumeran register */ -static void -wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) +static int +wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val) { int rv; @@ -10245,26 +11270,29 @@ wm_kmrn_writereg(struct wm_softc *sc, in else rv = sc->phy.acquire(sc); if (rv != 0) { - aprint_error_dev(sc->sc_dev, - "%s: failed to get semaphore\n", __func__); - return; + device_printf(sc->sc_dev, "%s: failed to get semaphore\n", + __func__); + return rv; } - wm_kmrn_writereg_locked(sc, reg, val); + rv = wm_kmrn_writereg_locked(sc, reg, val); if (sc->sc_type == WM_T_80003) wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); else sc->phy.release(sc); + + return rv; } -static void -wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val) +static int +wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val) { CSR_WRITE(sc, WMREG_KUMCTRLSTA, - ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | - (val & KUMCTRLSTA_MASK)); + ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val); + + return 0; } /* SGMII related */ @@ -10310,21 +11338,19 @@ wm_sgmii_uses_mdio(struct wm_softc *sc) * ressource ... */ static int -wm_sgmii_readreg(device_t self, int phy, int reg) +wm_sgmii_readreg(device_t dev, int phy, int reg) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint32_t i2ccmd; int i, rv; if (sc->phy.acquire(sc)) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return 0; } i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) - | (phy << I2CCMD_PHY_ADDR_SHIFT) - | I2CCMD_OPCODE_READ; + | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ; CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); /* Poll the ready bit */ @@ -10335,9 +11361,9 @@ wm_sgmii_readreg(device_t self, int phy, break; } if ((i2ccmd & I2CCMD_READY) == 0) - aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n"); + device_printf(dev, "I2CCMD Read did not complete\n"); if ((i2ccmd & I2CCMD_ERROR) != 0) - aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); + device_printf(dev, "I2CCMD Error bit set\n"); rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); @@ -10353,23 +11379,21 @@ wm_sgmii_readreg(device_t self, int phy, * ressource ... */ static void -wm_sgmii_writereg(device_t self, int phy, int reg, int val) +wm_sgmii_writereg(device_t dev, int phy, int reg, int val) { - struct wm_softc *sc = device_private(self); + struct wm_softc *sc = device_private(dev); uint32_t i2ccmd; int i; - int val_swapped; + int swapdata; if (sc->phy.acquire(sc) != 0) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); + device_printf(dev, "%s: failed to get semaphore\n", __func__); return; } /* Swap the data bytes for the I2C interface */ - val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00); + swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00); i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) - | (phy << I2CCMD_PHY_ADDR_SHIFT) - | I2CCMD_OPCODE_WRITE | val_swapped; + | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata; CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); /* Poll the ready bit */ @@ -10380,15 +11404,32 @@ wm_sgmii_writereg(device_t self, int phy break; } if ((i2ccmd & I2CCMD_READY) == 0) - aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n"); + device_printf(dev, "I2CCMD Write did not complete\n"); if ((i2ccmd & I2CCMD_ERROR) != 0) - aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); + device_printf(dev, "I2CCMD Error bit set\n"); sc->phy.release(sc); } /* TBI related */ +static bool +wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl) +{ + bool sig; + + sig = ctrl & CTRL_SWDPIN(1); + + /* + * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics + * detect a signal, 1 if they don't. + */ + if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544)) + sig = !sig; + + return sig; +} + /* * wm_tbi_mediainit: * @@ -10411,7 +11452,7 @@ wm_tbi_mediainit(struct wm_softc *sc) sc->sc_mii.mii_ifp = ifp; sc->sc_ethercom.ec_mii = &sc->sc_mii; - if ((sc->sc_type >= WM_T_82575) + if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_serdes_mediachange, wm_serdes_mediastatus); @@ -10451,9 +11492,9 @@ do { \ status = CSR_READ(sc, WMREG_STATUS); if (((status & STATUS_2P5_SKU) != 0) && ((status & STATUS_2P5_SKU_OVER) == 0)) { - ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD); + ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD); } else - ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD); + ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD); } else if (sc->sc_type == WM_T_82545) { /* Only 82545 is LX (XXX except SFP) */ ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD); @@ -10480,9 +11521,11 @@ wm_tbi_mediachange(struct ifnet *ifp) { struct wm_softc *sc = ifp->if_softc; struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; - uint32_t status; + uint32_t status, ctrl; + bool signal; int i; + KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER); if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { /* XXX need some work for >= 82571 and < 82575 */ if (sc->sc_type < WM_T_82575) @@ -10506,20 +11549,19 @@ wm_tbi_mediachange(struct ifnet *ifp) sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", - device_xname(sc->sc_dev), sc->sc_txcw)); + device_xname(sc->sc_dev), sc->sc_txcw)); CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); CSR_WRITE_FLUSH(sc); delay(1000); - i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); - DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i)); + ctrl = CSR_READ(sc, WMREG_CTRL); + signal = wm_tbi_havesignal(sc, ctrl); - /* - * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the - * optics detect a signal, 0 if they don't. - */ - if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { + DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev), + signal)); + + if (signal) { /* Have signal; wait for the link to come up. */ for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { delay(10000); @@ -10528,18 +11570,18 @@ wm_tbi_mediachange(struct ifnet *ifp) } DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", - device_xname(sc->sc_dev),i)); + device_xname(sc->sc_dev), i)); status = CSR_READ(sc, WMREG_STATUS); DPRINTF(WM_DEBUG_LINK, ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", - device_xname(sc->sc_dev),status, STATUS_LU)); + device_xname(sc->sc_dev), status, STATUS_LU)); if (status & STATUS_LU) { /* Link is up. */ DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> link up %s\n", - device_xname(sc->sc_dev), - (status & STATUS_FD) ? "FDX" : "HDX")); + device_xname(sc->sc_dev), + (status & STATUS_FD) ? "FDX" : "HDX")); /* * NOTE: CTRL will update TFCE and RFCE automatically, @@ -10558,8 +11600,7 @@ wm_tbi_mediachange(struct ifnet *ifp) sc->sc_fcrtl |= FCRTL_XONE; CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? - WMREG_OLD_FCRTL : WMREG_FCRTL, - sc->sc_fcrtl); + WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); sc->sc_tbi_linkup = 1; } else { if (i == WM_LINKUP_TIMEOUT) @@ -10567,12 +11608,12 @@ wm_tbi_mediachange(struct ifnet *ifp) /* Link is down. */ DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> link down\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); sc->sc_tbi_linkup = 0; } } else { DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", - device_xname(sc->sc_dev))); + device_xname(sc->sc_dev))); sc->sc_tbi_linkup = 0; } @@ -10626,7 +11667,10 @@ wm_check_for_link(struct wm_softc *sc) uint32_t rxcw; uint32_t ctrl; uint32_t status; - uint32_t sig; + bool signal; + + DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { /* XXX need some work for >= 82571 */ @@ -10639,32 +11683,29 @@ wm_check_for_link(struct wm_softc *sc) rxcw = CSR_READ(sc, WMREG_RXCW); ctrl = CSR_READ(sc, WMREG_CTRL); status = CSR_READ(sc, WMREG_STATUS); - - sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; + signal = wm_tbi_havesignal(sc, ctrl); DPRINTF(WM_DEBUG_LINK, - ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n", - device_xname(sc->sc_dev), __func__, - ((ctrl & CTRL_SWDPIN(1)) == sig), + ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n", + device_xname(sc->sc_dev), __func__, signal, ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0))); /* * SWDPIN LU RXCW - * 0 0 0 - * 0 0 1 (should not happen) - * 0 1 0 (should not happen) - * 0 1 1 (should not happen) - * 1 0 0 Disable autonego and force linkup - * 1 0 1 got /C/ but not linkup yet - * 1 1 0 (linkup) - * 1 1 1 If IFM_AUTO, back to autonego + * 0 0 0 + * 0 0 1 (should not happen) + * 0 1 0 (should not happen) + * 0 1 1 (should not happen) + * 1 0 0 Disable autonego and force linkup + * 1 0 1 got /C/ but not linkup yet + * 1 1 0 (linkup) + * 1 1 1 If IFM_AUTO, back to autonego * */ - if (((ctrl & CTRL_SWDPIN(1)) == sig) - && ((status & STATUS_LU) == 0) - && ((rxcw & RXCW_C) == 0)) { - DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n", - __func__)); + if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) { + DPRINTF(WM_DEBUG_LINK, + ("%s: %s: force linkup and fullduplex\n", + device_xname(sc->sc_dev), __func__)); sc->sc_tbi_linkup = 0; /* Disable auto-negotiation in the TXCW register */ CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); @@ -10681,17 +11722,18 @@ wm_check_for_link(struct wm_softc *sc) && ((rxcw & RXCW_C) != 0) && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { sc->sc_tbi_linkup = 1; - DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n", + DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n", + device_xname(sc->sc_dev), __func__)); CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); - } else if (((ctrl & CTRL_SWDPIN(1)) == sig) - && ((rxcw & RXCW_C) != 0)) { - DPRINTF(WM_DEBUG_LINK, ("/C/")); - } else { - DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl, + } else if (signal && ((rxcw & RXCW_C) != 0)) + DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/", + device_xname(sc->sc_dev), __func__)); + else + DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n", + device_xname(sc->sc_dev), __func__, rxcw, ctrl, status)); - } return 0; } @@ -10719,13 +11761,11 @@ wm_tbi_tick(struct wm_softc *sc) /* set link status */ if ((status & STATUS_LU) == 0) { - DPRINTF(WM_DEBUG_LINK, - ("%s: LINK: checklink -> down\n", + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev))); sc->sc_tbi_linkup = 0; } else if (sc->sc_tbi_linkup == 0) { - DPRINTF(WM_DEBUG_LINK, - ("%s: LINK: checklink -> up %s\n", + DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev), (status & STATUS_FD) ? "FDX" : "HDX")); sc->sc_tbi_linkup = 1; @@ -10870,9 +11910,9 @@ wm_serdes_mediastatus(struct ifnet *ifp, status = CSR_READ(sc, WMREG_STATUS); if (((status & STATUS_2P5_SKU) != 0) && ((status & STATUS_2P5_SKU_OVER) == 0)) { - ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */ + ifmr->ifm_active |= IFM_2500_KX; } else - ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */ + ifmr->ifm_active |= IFM_1000_KX; } else { switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) { case PCS_LSTS_SPEED_10: @@ -11049,10 +12089,10 @@ wm_sfp_get_media_type(struct wm_softc *s if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0) mediatype = WM_MEDIATYPE_SERDES; - else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){ + else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) { sc->sc_flags |= WM_F_SGMII; mediatype = WM_MEDIATYPE_COPPER; - } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){ + } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) { sc->sc_flags |= WM_F_SGMII; mediatype = WM_MEDIATYPE_SERDES; } @@ -11144,6 +12184,9 @@ wm_nvm_read_uwire(struct wm_softc *sc, i DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + for (i = 0; i < wordcnt; i++) { /* Clear SK and DI. */ reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); @@ -11189,6 +12232,7 @@ wm_nvm_read_uwire(struct wm_softc *sc, i delay(2); } + sc->nvm.release(sc); return 0; } @@ -11217,7 +12261,10 @@ wm_nvm_set_addrbits_size_eecd(struct wm_ case WM_T_82547_2: /* Set dummy value to access EEPROM */ sc->sc_nvm_wordsize = 64; - wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data); + if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) { + aprint_error_dev(sc->sc_dev, + "%s: failed to read EEPROM size\n", __func__); + } reg = data; size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK); if (size == 0) @@ -11280,7 +12327,7 @@ wm_nvm_ready_spi(struct wm_softc *sc) } if (usec >= SPI_MAX_RETRIES) { aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n"); - return 1; + return -1; } return 0; } @@ -11296,18 +12343,22 @@ wm_nvm_read_spi(struct wm_softc *sc, int uint32_t reg, val; int i; uint8_t opc; + int rv = 0; DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + /* Clear SK and CS. */ reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); CSR_WRITE(sc, WMREG_EECD, reg); CSR_WRITE_FLUSH(sc); delay(2); - if (wm_nvm_ready_spi(sc)) - return 1; + if ((rv = wm_nvm_ready_spi(sc)) != 0) + goto out; /* Toggle CS to flush commands. */ CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); @@ -11335,7 +12386,9 @@ wm_nvm_read_spi(struct wm_softc *sc, int CSR_WRITE_FLUSH(sc); delay(2); - return 0; +out: + sc->nvm.release(sc); + return rv; } /* Using with EERD */ @@ -11361,27 +12414,31 @@ wm_poll_eerd_eewr_done(struct wm_softc * } static int -wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, - uint16_t *data) +wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data) { int i, eerd = 0; - int error = 0; + int rv = 0; DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + for (i = 0; i < wordcnt; i++) { eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; - CSR_WRITE(sc, WMREG_EERD, eerd); - error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); - if (error != 0) + rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD); + if (rv != 0) { + aprint_error_dev(sc->sc_dev, "EERD polling failed: " + "offset=%d. wordcnt=%d\n", offset, wordcnt); break; - + } data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); } - return error; + sc->nvm.release(sc); + return rv; } /* Flash */ @@ -11392,24 +12449,40 @@ wm_nvm_valid_bank_detect_ich8lan(struct uint32_t eecd; uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); + uint32_t nvm_dword = 0; uint8_t sig_byte = 0; + int rv; switch (sc->sc_type) { case WM_T_PCH_SPT: - /* - * In SPT, read from the CTRL_EXT reg instead of accessing the - * sector valid bits from the NVM. - */ - *bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS; - if ((*bank == 0) || (*bank == 1)) { - aprint_error_dev(sc->sc_dev, - "%s: no valid NVM bank present (%u)\n", __func__, - *bank); - return -1; - } else { - *bank = *bank - 2; + case WM_T_PCH_CNP: + bank1_offset = sc->sc_ich8_flash_bank_size * 2; + act_offset = ICH_NVM_SIG_WORD * 2; + + /* Set bank to 0 in case flash read fails. */ + *bank = 0; + + /* Check bank 0 */ + rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword); + if (rv != 0) + return rv; + sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + rv = wm_read_ich8_dword(sc, act_offset + bank1_offset, + &nvm_dword); + sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { + *bank = 1; return 0; } + aprint_error_dev(sc->sc_dev, + "%s: no valid NVM bank present (%u)\n", __func__, *bank); + return -1; case WM_T_ICH8: case WM_T_ICH9: eecd = CSR_READ(sc, WMREG_EECD); @@ -11456,28 +12529,33 @@ wm_ich8_cycle_init(struct wm_softc *sc) int32_t error = 1; int32_t i = 0; - hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); + if (sc->sc_type >= WM_T_PCH_SPT) + hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL; + else + hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); /* May be check the Flash Des Valid bit in Hw status */ - if ((hsfsts & HSFSTS_FLDVAL) == 0) { + if ((hsfsts & HSFSTS_FLDVAL) == 0) return error; - } /* Clear FCERR in Hw status by writing 1 */ /* Clear DAEL in Hw status by writing a 1 */ hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; - ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); + if (sc->sc_type >= WM_T_PCH_SPT) + ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL); + else + ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); /* * Either we should have a hardware SPI cycle in progress bit to check * against, in order to start a new cycle or FDONE bit should be - * changed in the hardware so that it is 1 after harware reset, which + * changed in the hardware so that it is 1 after hardware reset, which * can then be used as an indication whether a cycle is in progress or * has been completed .. we should also have some software semaphore * mechanism to guard FDONE or the cycle in progress bit so that two * threads access to those bits can be sequentiallized or a way so that - * 2 threads dont start the cycle at the same time + * 2 threads don't start the cycle at the same time */ if ((hsfsts & HSFSTS_FLINPRO) == 0) { @@ -11488,15 +12566,24 @@ wm_ich8_cycle_init(struct wm_softc *sc) /* Begin by setting Flash Cycle Done. */ hsfsts |= HSFSTS_DONE; - ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); + if (sc->sc_type >= WM_T_PCH_SPT) + ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, + hsfsts & 0xffffUL); + else + ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); error = 0; } else { /* - * otherwise poll for sometime so the current cycle has a + * Otherwise poll for sometime so the current cycle has a * chance to end before giving up. */ for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { - hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); + if (sc->sc_type >= WM_T_PCH_SPT) + hsfsts = ICH8_FLASH_READ32(sc, + ICH_FLASH_HSFSTS) & 0xffffUL; + else + hsfsts = ICH8_FLASH_READ16(sc, + ICH_FLASH_HSFSTS); if ((hsfsts & HSFSTS_FLINPRO) == 0) { error = 0; break; @@ -11509,7 +12596,12 @@ wm_ich8_cycle_init(struct wm_softc *sc) * now set the Flash Cycle Done. */ hsfsts |= HSFSTS_DONE; - ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); + if (sc->sc_type >= WM_T_PCH_SPT) + ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, + hsfsts & 0xffffUL); + else + ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, + hsfsts); } } return error; @@ -11529,13 +12621,24 @@ wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); + if (sc->sc_type >= WM_T_PCH_SPT) + hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16; + else + hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); hsflctl |= HSFCTL_GO; - ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); + if (sc->sc_type >= WM_T_PCH_SPT) + ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, + (uint32_t)hsflctl << 16); + else + ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); /* Wait till FDONE bit is set to 1 */ do { - hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); + if (sc->sc_type >= WM_T_PCH_SPT) + hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) + & 0xffffUL; + else + hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); if (hsfsts & HSFSTS_DONE) break; delay(1); @@ -11580,18 +12683,22 @@ wm_read_ich8_data(struct wm_softc *sc, u if (error) break; - hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); + if (sc->sc_type >= WM_T_PCH_SPT) + hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK; hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; - if (sc->sc_type == WM_T_PCH_SPT) { + if (sc->sc_type >= WM_T_PCH_SPT) { /* * In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported. */ - ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL, - (uint32_t)hsflctl); + ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, + (uint32_t)hsflctl << 16); } else ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); @@ -11627,7 +12734,13 @@ wm_read_ich8_data(struct wm_softc *sc, u * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ - hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); + if (sc->sc_type >= WM_T_PCH_SPT) + hsfsts = ICH8_FLASH_READ32(sc, + ICH_FLASH_HSFSTS) & 0xffffUL; + else + hsfsts = ICH8_FLASH_READ16(sc, + ICH_FLASH_HSFSTS); + if (hsfsts & HSFSTS_ERR) { /* Repeat for some time before giving up. */ continue; @@ -11711,7 +12824,7 @@ wm_read_ich8_dword(struct wm_softc *sc, static int wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) { - int32_t error = 0; + int32_t rv = 0; uint32_t flash_bank = 0; uint32_t act_offset = 0; uint32_t bank_offset = 0; @@ -11721,14 +12834,17 @@ wm_nvm_read_ich8(struct wm_softc *sc, in DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + /* * We need to know which is the valid flash bank. In the event * that we didn't allocate eeprom_shadow_ram, we may not be - * managing flash_bank. So it cannot be trusted and needs + * managing flash_bank. So it cannot be trusted and needs * to be updated with each read. */ - error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); - if (error) { + rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); + if (rv) { DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", device_xname(sc->sc_dev))); flash_bank = 0; @@ -11740,18 +12856,11 @@ wm_nvm_read_ich8(struct wm_softc *sc, in */ bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); - error = wm_get_swfwhw_semaphore(sc); - if (error) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); - return error; - } - for (i = 0; i < words; i++) { /* The NVM part needs a byte offset, hence * 2 */ act_offset = bank_offset + ((offset + i) * 2); - error = wm_read_ich8_word(sc, act_offset, &word); - if (error) { + rv = wm_read_ich8_word(sc, act_offset, &word); + if (rv) { aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n", __func__); break; @@ -11759,8 +12868,8 @@ wm_nvm_read_ich8(struct wm_softc *sc, in data[i] = word; } - wm_put_swfwhw_semaphore(sc); - return error; + sc->nvm.release(sc); + return rv; } /****************************************************************************** @@ -11775,7 +12884,7 @@ wm_nvm_read_ich8(struct wm_softc *sc, in static int wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data) { - int32_t error = 0; + int32_t rv = 0; uint32_t flash_bank = 0; uint32_t act_offset = 0; uint32_t bank_offset = 0; @@ -11785,14 +12894,17 @@ wm_nvm_read_spt(struct wm_softc *sc, int DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + /* * We need to know which is the valid flash bank. In the event * that we didn't allocate eeprom_shadow_ram, we may not be - * managing flash_bank. So it cannot be trusted and needs + * managing flash_bank. So it cannot be trusted and needs * to be updated with each read. */ - error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); - if (error) { + rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); + if (rv) { DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", device_xname(sc->sc_dev))); flash_bank = 0; @@ -11804,19 +12916,12 @@ wm_nvm_read_spt(struct wm_softc *sc, int */ bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); - error = wm_get_swfwhw_semaphore(sc); - if (error) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); - return error; - } - for (i = 0; i < words; i++) { /* The NVM part needs a byte offset, hence * 2 */ act_offset = bank_offset + ((offset + i) * 2); /* but we must read dword aligned, so mask ... */ - error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword); - if (error) { + rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword); + if (rv) { aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n", __func__); break; @@ -11828,8 +12933,8 @@ wm_nvm_read_spt(struct wm_softc *sc, int data[i] = (uint16_t)((dword >> 16) & 0xFFFF); } - wm_put_swfwhw_semaphore(sc); - return error; + sc->nvm.release(sc); + return rv; } /* iNVM */ @@ -11837,7 +12942,7 @@ wm_nvm_read_spt(struct wm_softc *sc, int static int wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data) { - int32_t rv = 0; + int32_t rv = 0; uint32_t invm_dword; uint16_t i; uint8_t record_type, word_address; @@ -11873,10 +12978,13 @@ wm_nvm_read_invm(struct wm_softc *sc, in { int rv = 0; int i; - + DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + if (sc->nvm.acquire(sc) != 0) + return -1; + for (i = 0; i < words; i++) { switch (offset + i) { case NVM_OFF_MACADDR: @@ -11931,104 +13039,12 @@ wm_nvm_read_invm(struct wm_softc *sc, in } } + sc->nvm.release(sc); return rv; } /* Lock, detecting NVM type, validate checksum, version and read */ -/* - * wm_nvm_acquire: - * - * Perform the EEPROM handshake required on some chips. - */ -static int -wm_nvm_acquire(struct wm_softc *sc) -{ - uint32_t reg; - int x; - int ret = 0; - - DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); - - if (sc->sc_type >= WM_T_ICH8) { - ret = wm_get_nvm_ich8lan(sc); - } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) { - ret = wm_get_swfwhw_semaphore(sc); - } else if (sc->sc_flags & WM_F_LOCK_SWFW) { - /* This will also do wm_get_swsm_semaphore() if needed */ - ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); - } else if (sc->sc_flags & WM_F_LOCK_SWSM) { - ret = wm_get_swsm_semaphore(sc); - } - - if (ret) { - aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", - __func__); - return 1; - } - - if (sc->sc_flags & WM_F_LOCK_EECD) { - reg = CSR_READ(sc, WMREG_EECD); - - /* Request EEPROM access. */ - reg |= EECD_EE_REQ; - CSR_WRITE(sc, WMREG_EECD, reg); - - /* ..and wait for it to be granted. */ - for (x = 0; x < 1000; x++) { - reg = CSR_READ(sc, WMREG_EECD); - if (reg & EECD_EE_GNT) - break; - delay(5); - } - if ((reg & EECD_EE_GNT) == 0) { - aprint_error_dev(sc->sc_dev, - "could not acquire EEPROM GNT\n"); - reg &= ~EECD_EE_REQ; - CSR_WRITE(sc, WMREG_EECD, reg); - if (sc->sc_flags & WM_F_LOCK_EXTCNF) - wm_put_swfwhw_semaphore(sc); - if (sc->sc_flags & WM_F_LOCK_SWFW) - wm_put_swfw_semaphore(sc, SWFW_EEP_SM); - else if (sc->sc_flags & WM_F_LOCK_SWSM) - wm_put_swsm_semaphore(sc); - return 1; - } - } - - return 0; -} - -/* - * wm_nvm_release: - * - * Release the EEPROM mutex. - */ -static void -wm_nvm_release(struct wm_softc *sc) -{ - uint32_t reg; - - DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); - - if (sc->sc_flags & WM_F_LOCK_EECD) { - reg = CSR_READ(sc, WMREG_EECD); - reg &= ~EECD_EE_REQ; - CSR_WRITE(sc, WMREG_EECD, reg); - } - - if (sc->sc_type >= WM_T_ICH8) { - wm_put_nvm_ich8lan(sc); - } else if (sc->sc_flags & WM_F_LOCK_EXTCNF) - wm_put_swfwhw_semaphore(sc); - if (sc->sc_flags & WM_F_LOCK_SWFW) - wm_put_swfw_semaphore(sc, SWFW_EEP_SM); - else if (sc->sc_flags & WM_F_LOCK_SWSM) - wm_put_swsm_semaphore(sc); -} - static int wm_nvm_is_onboard_eeprom(struct wm_softc *sc) { @@ -12049,7 +13065,7 @@ wm_nvm_is_onboard_eeprom(struct wm_softc } static int -wm_nvm_get_flash_presence_i210(struct wm_softc *sc) +wm_nvm_flash_presence_i210(struct wm_softc *sc) { uint32_t eec; @@ -12082,7 +13098,8 @@ wm_nvm_validate_checksum(struct wm_softc return 0; #ifdef WM_DEBUG - if (sc->sc_type == WM_T_PCH_LPT) { + if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)) { csum_wordaddr = NVM_OFF_COMPAT; valid_checksum = NVM_COMPAT_VALID_CHECKSUM; } else { @@ -12096,12 +13113,11 @@ wm_nvm_validate_checksum(struct wm_softc || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { /* XXX PCH_SPT? */ wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data); - if ((eeprom_data & valid_checksum) == 0) { + if ((eeprom_data & valid_checksum) == 0) DPRINTF(WM_DEBUG_NVM, ("%s: NVM need to be updated (%04x != %04x)\n", device_xname(sc->sc_dev), eeprom_data, valid_checksum)); - } } if ((wm_debug & WM_DEBUG_NVM) != 0) { @@ -12164,6 +13180,7 @@ wm_nvm_version(struct wm_softc *sc) bool check_version = false; bool check_optionrom = false; bool have_build = false; + bool have_uid = true; /* * Version format: @@ -12180,9 +13197,27 @@ wm_nvm_version(struct wm_softc *sc) * 82572EI 0x5069 5.6.9? * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4) * 0x2013 2.1.3? - * 82583 0x10a0 1.10.0? (document says it's default vaule) + * 82583 0x10a0 1.10.0? (document says it's default value) + * ICH8+82567 0x0040 0.4.0? + * ICH9+82566 0x1040 1.4.0? + *ICH10+82567 0x0043 0.4.3? + * PCH+82577 0x00c1 0.12.1? + * PCH2+82579 0x00d3 0.13.3? + * 0x00d4 0.13.4? + * LPT+I218 0x0023 0.2.3? + * SPT+I219 0x0084 0.8.4? + * CNP+I219 0x0054 0.5.4? + */ + + /* + * XXX + * Qemu's e1000e emulation (82574L)'s SPI has only 64 words. + * I've never seen on real 82574 hardware with such small SPI ROM. */ - wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1); + if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1) + || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0)) + have_uid = false; + switch (sc->sc_type) { case WM_T_82571: case WM_T_82572: @@ -12192,18 +13227,32 @@ wm_nvm_version(struct wm_softc *sc) check_optionrom = true; have_build = true; break; + case WM_T_ICH8: + case WM_T_ICH9: + case WM_T_ICH10: + case WM_T_PCH: + case WM_T_PCH2: + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + check_version = true; + have_build = true; + have_uid = false; + break; case WM_T_82575: case WM_T_82576: case WM_T_82580: - if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID) + if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID) check_version = true; break; case WM_T_I211: wm_nvm_version_invm(sc); + have_uid = false; goto printver; case WM_T_I210: - if (!wm_nvm_get_flash_presence_i210(sc)) { + if (!wm_nvm_flash_presence_i210(sc)) { wm_nvm_version_invm(sc); + have_uid = false; goto printver; } /* FALLTHROUGH */ @@ -12215,8 +13264,8 @@ wm_nvm_version(struct wm_softc *sc) default: return; } - if (check_version) { - wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data); + if (check_version + && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) { major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; if (have_build || ((nvm_data & 0x0f00) != 0x0000)) { minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT; @@ -12238,14 +13287,18 @@ printver: aprint_verbose(".%d", build); } } - if (check_optionrom) { - wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off); + + /* Assume the Option ROM area is at avove NVM_SIZE */ + if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom + && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) { /* Option ROM Version */ if ((off != 0x0000) && (off != 0xffff)) { + int rv; + off += NVM_COMBO_VER_OFF; - wm_nvm_read(sc, off + 1, 1, &uid1); - wm_nvm_read(sc, off, 1, &uid0); - if ((uid0 != 0) && (uid0 != 0xffff) + rv = wm_nvm_read(sc, off + 1, 1, &uid1); + rv |= wm_nvm_read(sc, off, 1, &uid0); + if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff) && (uid1 != 0) && (uid1 != 0xffff)) { /* 16bits */ major = uid0 >> 8; @@ -12257,8 +13310,8 @@ printver: } } - wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0); - aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0); + if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0)) + aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0); } /* @@ -12275,27 +13328,10 @@ wm_nvm_read(struct wm_softc *sc, int wor device_xname(sc->sc_dev), __func__)); if (sc->sc_flags & WM_F_EEPROM_INVALID) - return 1; - - if (wm_nvm_acquire(sc)) - return 1; + return -1; - if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) - || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) - || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) - rv = wm_nvm_read_ich8(sc, word, wordcnt, data); - else if (sc->sc_type == WM_T_PCH_SPT) - rv = wm_nvm_read_spt(sc, word, wordcnt, data); - else if (sc->sc_flags & WM_F_EEPROM_INVM) - rv = wm_nvm_read_invm(sc, word, wordcnt, data); - else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) - rv = wm_nvm_read_eerd(sc, word, wordcnt, data); - else if (sc->sc_flags & WM_F_EEPROM_SPI) - rv = wm_nvm_read_spi(sc, word, wordcnt, data); - else - rv = wm_nvm_read_uwire(sc, word, wordcnt, data); + rv = sc->nvm.read(sc, word, wordcnt, data); - wm_nvm_release(sc); return rv; } @@ -12322,12 +13358,100 @@ wm_put_null(struct wm_softc *sc) return; } -/* - * Get hardware semaphore. - * Same as e1000_get_hw_semaphore_generic() - */ static int -wm_get_swsm_semaphore(struct wm_softc *sc) +wm_get_eecd(struct wm_softc *sc) +{ + uint32_t reg; + int x; + + DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + reg = CSR_READ(sc, WMREG_EECD); + + /* Request EEPROM access. */ + reg |= EECD_EE_REQ; + CSR_WRITE(sc, WMREG_EECD, reg); + + /* ..and wait for it to be granted. */ + for (x = 0; x < 1000; x++) { + reg = CSR_READ(sc, WMREG_EECD); + if (reg & EECD_EE_GNT) + break; + delay(5); + } + if ((reg & EECD_EE_GNT) == 0) { + aprint_error_dev(sc->sc_dev, + "could not acquire EEPROM GNT\n"); + reg &= ~EECD_EE_REQ; + CSR_WRITE(sc, WMREG_EECD, reg); + return -1; + } + + return 0; +} + +static void +wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd) +{ + + *eecd |= EECD_SK; + CSR_WRITE(sc, WMREG_EECD, *eecd); + CSR_WRITE_FLUSH(sc); + if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) + delay(1); + else + delay(50); +} + +static void +wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd) +{ + + *eecd &= ~EECD_SK; + CSR_WRITE(sc, WMREG_EECD, *eecd); + CSR_WRITE_FLUSH(sc); + if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) + delay(1); + else + delay(50); +} + +static void +wm_put_eecd(struct wm_softc *sc) +{ + uint32_t reg; + + DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + /* Stop nvm */ + reg = CSR_READ(sc, WMREG_EECD); + if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) { + /* Pull CS high */ + reg |= EECD_CS; + wm_nvm_eec_clock_lower(sc, ®); + } else { + /* CS on Microwire is active-high */ + reg &= ~(EECD_CS | EECD_DI); + CSR_WRITE(sc, WMREG_EECD, reg); + wm_nvm_eec_clock_raise(sc, ®); + wm_nvm_eec_clock_lower(sc, ®); + } + + reg = CSR_READ(sc, WMREG_EECD); + reg &= ~EECD_EE_REQ; + CSR_WRITE(sc, WMREG_EECD, reg); + + return; +} + +/* + * Get hardware semaphore. + * Same as e1000_get_hw_semaphore_generic() + */ +static int +wm_get_swsm_semaphore(struct wm_softc *sc) { int32_t timeout; uint32_t swsm; @@ -12336,6 +13460,7 @@ wm_get_swsm_semaphore(struct wm_softc *s device_xname(sc->sc_dev), __func__)); KASSERT(sc->sc_nvm_wordsize > 0); +retry: /* Get the SW semaphore. */ timeout = sc->sc_nvm_wordsize + 1; while (timeout) { @@ -12349,6 +13474,16 @@ wm_get_swsm_semaphore(struct wm_softc *s } if (timeout == 0) { + if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) { + /* + * In rare circumstances, the SW semaphore may already + * be held unintentionally. Clear the semaphore once + * before giving up. + */ + sc->sc_flags &= ~WM_F_WA_I210_CLSEM; + wm_put_swsm_semaphore(sc); + goto retry; + } aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n"); return 1; @@ -12398,7 +13533,7 @@ wm_put_swsm_semaphore(struct wm_softc *s /* * Get SW/FW semaphore. - * Same as e1000_acquire_swfw_sync_82575(). + * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}(). */ static int wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) @@ -12406,35 +13541,37 @@ wm_get_swfw_semaphore(struct wm_softc *s uint32_t swfw_sync; uint32_t swmask = mask << SWFW_SOFT_SHIFT; uint32_t fwmask = mask << SWFW_FIRM_SHIFT; - int timeout = 200; + int timeout; DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); - KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); - for (timeout = 0; timeout < 200; timeout++) { - if (sc->sc_flags & WM_F_LOCK_SWSM) { - if (wm_get_swsm_semaphore(sc)) { - aprint_error_dev(sc->sc_dev, - "%s: failed to get semaphore\n", - __func__); - return 1; - } + if (sc->sc_type == WM_T_80003) + timeout = 50; + else + timeout = 200; + + while (timeout) { + if (wm_get_swsm_semaphore(sc)) { + aprint_error_dev(sc->sc_dev, + "%s: failed to get semaphore\n", + __func__); + return 1; } swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); if ((swfw_sync & (swmask | fwmask)) == 0) { swfw_sync |= swmask; CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); - if (sc->sc_flags & WM_F_LOCK_SWSM) - wm_put_swsm_semaphore(sc); + wm_put_swsm_semaphore(sc); return 0; } - if (sc->sc_flags & WM_F_LOCK_SWSM) - wm_put_swsm_semaphore(sc); + wm_put_swsm_semaphore(sc); delay(5000); + timeout--; } - printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", - device_xname(sc->sc_dev), mask, swfw_sync); + device_printf(sc->sc_dev, + "failed to get swfw semaphore mask 0x%x swfw 0x%x\n", + mask, swfw_sync); return 1; } @@ -12445,17 +13582,101 @@ wm_put_swfw_semaphore(struct wm_softc *s DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); - KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0); - if (sc->sc_flags & WM_F_LOCK_SWSM) { - while (wm_get_swsm_semaphore(sc) != 0) - continue; - } + while (wm_get_swsm_semaphore(sc) != 0) + continue; + swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); - if (sc->sc_flags & WM_F_LOCK_SWSM) + + wm_put_swsm_semaphore(sc); +} + +static int +wm_get_nvm_80003(struct wm_softc *sc) +{ + int rv; + + DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) { + aprint_error_dev(sc->sc_dev, + "%s: failed to get semaphore(SWFW)\n", __func__); + return rv; + } + + if (((sc->sc_flags & WM_F_LOCK_EECD) != 0) + && (rv = wm_get_eecd(sc)) != 0) { + aprint_error_dev(sc->sc_dev, + "%s: failed to get semaphore(EECD)\n", __func__); + wm_put_swfw_semaphore(sc, SWFW_EEP_SM); + return rv; + } + + return 0; +} + +static void +wm_put_nvm_80003(struct wm_softc *sc) +{ + + DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) + wm_put_eecd(sc); + wm_put_swfw_semaphore(sc, SWFW_EEP_SM); +} + +static int +wm_get_nvm_82571(struct wm_softc *sc) +{ + int rv; + + DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if ((rv = wm_get_swsm_semaphore(sc)) != 0) + return rv; + + switch (sc->sc_type) { + case WM_T_82573: + break; + default: + if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) + rv = wm_get_eecd(sc); + break; + } + + if (rv != 0) { + aprint_error_dev(sc->sc_dev, + "%s: failed to get semaphore\n", + __func__); wm_put_swsm_semaphore(sc); + } + + return rv; +} + +static void +wm_put_nvm_82571(struct wm_softc *sc) +{ + + DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + switch (sc->sc_type) { + case WM_T_82573: + break; + default: + if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) + wm_put_eecd(sc); + break; + } + + wm_put_swsm_semaphore(sc); } static int @@ -12496,8 +13717,8 @@ wm_get_swfwhw_semaphore(struct wm_softc return 0; delay(5000); } - printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", - device_xname(sc->sc_dev), ext_ctrl); + device_printf(sc->sc_dev, + "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl); mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ return 1; } @@ -12533,8 +13754,8 @@ wm_get_swflag_ich8lan(struct wm_softc *s delay(1000); } if (timeout >= WM_PHY_CFG_TIMEOUT) { - printf("%s: SW has already locked the resource\n", - device_xname(sc->sc_dev)); + device_printf(sc->sc_dev, + "SW has already locked the resource\n"); goto out; } @@ -12547,8 +13768,7 @@ wm_get_swflag_ich8lan(struct wm_softc *s delay(1000); } if (timeout >= 1000) { - printf("%s: failed to acquire semaphore\n", - device_xname(sc->sc_dev)); + device_printf(sc->sc_dev, "failed to acquire semaphore\n"); ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); goto out; @@ -12572,8 +13792,7 @@ wm_put_swflag_ich8lan(struct wm_softc *s ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); } else { - printf("%s: Semaphore unexpectedly released\n", - device_xname(sc->sc_dev)); + device_printf(sc->sc_dev, "Semaphore unexpectedly released\n"); } mutex_exit(sc->sc_ich_phymtx); @@ -12661,6 +13880,7 @@ wm_check_mng_mode(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: rv = wm_check_mng_mode_ich8lan(sc); break; case WM_T_82574: @@ -12674,7 +13894,7 @@ wm_check_mng_mode(struct wm_softc *sc) rv = wm_check_mng_mode_generic(sc); break; default: - /* noting to do */ + /* Noting to do */ rv = 0; break; } @@ -12780,6 +14000,7 @@ wm_phy_resetisblocked(struct wm_softc *s case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: do { reg = CSR_READ(sc, WMREG_FWSM); if ((reg & FWSM_RSPCIPHY) == 0) { @@ -12804,7 +14025,7 @@ wm_phy_resetisblocked(struct wm_softc *s return false; break; default: - /* no problem */ + /* No problem */ break; } @@ -12866,8 +14087,8 @@ wm_gate_hw_phy_config_ich8lan(struct wm_ CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); } -static void -wm_smbustopci(struct wm_softc *sc) +static int +wm_init_phy_workarounds_pchlan(struct wm_softc *sc) { uint32_t fwsm, reg; int rv = 0; @@ -12882,15 +14103,28 @@ wm_smbustopci(struct wm_softc *sc) wm_ulp_disable(sc); /* Acquire PHY semaphore */ - sc->phy.acquire(sc); + rv = sc->phy.acquire(sc); + if (rv != 0) { + DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n", + device_xname(sc->sc_dev), __func__)); + return -1; + } + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ fwsm = CSR_READ(sc, WMREG_FWSM); switch (sc->sc_type) { case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: if (wm_phy_is_accessible_pchlan(sc)) break; + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ reg = CSR_READ(sc, WMREG_CTRL_EXT); reg |= CTRL_EXT_FORCE_SMBUS; CSR_WRITE(sc, WMREG_CTRL_EXT, reg); @@ -12898,6 +14132,10 @@ wm_smbustopci(struct wm_softc *sc) /* XXX Isn't this required??? */ CSR_WRITE_FLUSH(sc); #endif + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ delay(50 * 1000); /* FALLTHROUGH */ case WM_T_PCH2: @@ -12910,16 +14148,20 @@ wm_smbustopci(struct wm_softc *sc) break; if (wm_phy_resetisblocked(sc) == true) { - printf("XXX reset is blocked(3)\n"); + device_printf(sc->sc_dev, "XXX reset is blocked(3)\n"); break; } + /* Toggle LANPHYPC Value bit */ wm_toggle_lanphypc_pch_lpt(sc); if (sc->sc_type >= WM_T_PCH_LPT) { if (wm_phy_is_accessible_pchlan(sc) == true) break; + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ reg = CSR_READ(sc, WMREG_CTRL_EXT); reg &= ~CTRL_EXT_FORCE_SMBUS; CSR_WRITE(sc, WMREG_CTRL_EXT, reg); @@ -12937,23 +14179,38 @@ wm_smbustopci(struct wm_softc *sc) sc->phy.release(sc); if (rv == 0) { + /* Check to see if able to reset PHY. Print error if not */ if (wm_phy_resetisblocked(sc)) { - printf("XXX reset is blocked(4)\n"); + device_printf(sc->sc_dev, "XXX reset is blocked(4)\n"); goto out; } - wm_reset_phy(sc); + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + if (wm_reset_phy(sc) != 0) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ if (wm_phy_resetisblocked(sc)) - printf("XXX reset is blocked(4)\n"); + device_printf(sc->sc_dev, "XXX reset is blocked(4)\n"); } out: - /* - * Ungate automatic PHY configuration by hardware on non-managed 82579 - */ + /* Ungate automatic PHY configuration on non-managed 82579 */ if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) { delay(10*1000); wm_gate_hw_phy_config_ich8lan(sc, false); } + + return 0; } static void @@ -12972,7 +14229,7 @@ wm_init_manageability(struct wm_softc *s /* Enable receiving management packets to the host */ if (sc->sc_type >= WM_T_82571) { manc |= MANC_EN_MNG2HOST; - manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; + manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624; CSR_WRITE(sc, WMREG_MANC2H, manc2h); } @@ -13030,6 +14287,7 @@ wm_get_wakeup(struct wm_softc *sc) case WM_T_PCH2: case WM_T_PCH_LPT: case WM_T_PCH_SPT: + case WM_T_PCH_CNP: sc->sc_flags |= WM_F_HAS_AMT; sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; break; @@ -13041,18 +14299,6 @@ wm_get_wakeup(struct wm_softc *sc) if (wm_enable_mng_pass_thru(sc) != 0) sc->sc_flags |= WM_F_HAS_MANAGE; -#ifdef WM_DEBUG - printf("\n"); - if ((sc->sc_flags & WM_F_HAS_AMT) != 0) - printf("HAS_AMT,"); - if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) - printf("ARC_SUBSYS_VALID,"); - if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) - printf("ASF_FIRMWARE_PRES,"); - if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) - printf("HAS_MANAGE,"); - printf("\n"); -#endif /* * Note that the WOL flags is set after the resetting of the eeprom * stuff @@ -13063,11 +14309,12 @@ wm_get_wakeup(struct wm_softc *sc) * Unconfigure Ultra Low Power mode. * Only for I217 and newer (see below). */ -static void +static int wm_ulp_disable(struct wm_softc *sc) { uint32_t reg; - int i = 0; + uint16_t phyreg; + int i = 0, rv = 0; DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); @@ -13077,7 +14324,7 @@ wm_ulp_disable(struct wm_softc *sc) || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V) || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2) || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2)) - return; + return 0; if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) { /* Request ME un-configure ULP mode in the PHY */ @@ -13089,8 +14336,9 @@ wm_ulp_disable(struct wm_softc *sc) /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) { if (i++ == 30) { - printf("%s timed out\n", __func__); - return; + device_printf(sc->sc_dev, "%s timed out\n", + __func__); + return -1; } delay(10 * 1000); } @@ -13098,42 +14346,56 @@ wm_ulp_disable(struct wm_softc *sc) reg &= ~H2ME_ENFORCE_SETTINGS; CSR_WRITE(sc, WMREG_H2ME, reg); - return; + return 0; } /* Acquire semaphore */ - sc->phy.acquire(sc); + rv = sc->phy.acquire(sc); + if (rv != 0) { + DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n", + device_xname(sc->sc_dev), __func__)); + return -1; + } /* Toggle LANPHYPC */ wm_toggle_lanphypc_pch_lpt(sc); /* Unforce SMBus mode in PHY */ - reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); - if (reg == 0x0000 || reg == 0xffff) { + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg); + if (rv != 0) { uint32_t reg2; - printf("%s: Force SMBus first.\n", __func__); + aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n", + __func__); reg2 = CSR_READ(sc, WMREG_CTRL_EXT); reg2 |= CTRL_EXT_FORCE_SMBUS; CSR_WRITE(sc, WMREG_CTRL_EXT, reg2); delay(50 * 1000); - reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL); + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, + &phyreg); + if (rv != 0) + goto release; } - reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg); + phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS; + wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg); /* Unforce SMBus mode in MAC */ reg = CSR_READ(sc, WMREG_CTRL_EXT); reg &= ~CTRL_EXT_FORCE_SMBUS; CSR_WRITE(sc, WMREG_CTRL_EXT, reg); - reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL); - reg |= HV_PM_CTRL_K1_ENA; - wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg); + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg); + if (rv != 0) + goto release; + phyreg |= HV_PM_CTRL_K1_ENA; + wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg); - reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1); - reg &= ~(I218_ULP_CONFIG1_IND + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, + &phyreg); + if (rv != 0) + goto release; + phyreg &= ~(I218_ULP_CONFIG1_IND | I218_ULP_CONFIG1_STICKY_ULP | I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_WOL_HOST @@ -13141,41 +14403,105 @@ wm_ulp_disable(struct wm_softc *sc) | I218_ULP_CONFIG1_EN_ULP_LANPHYPC | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | I218_ULP_CONFIG1_DIS_SMB_PERST); - wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); - reg |= I218_ULP_CONFIG1_START; - wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg); + wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg); + phyreg |= I218_ULP_CONFIG1_START; + wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg); reg = CSR_READ(sc, WMREG_FEXTNVM7); reg &= ~FEXTNVM7_DIS_SMB_PERST; CSR_WRITE(sc, WMREG_FEXTNVM7, reg); +release: /* Release semaphore */ sc->phy.release(sc); wm_gmii_reset(sc); delay(50 * 1000); + + return rv; } /* WOL in the newer chipset interfaces (pchlan) */ -static void +static int wm_enable_phy_wakeup(struct wm_softc *sc) { -#if 0 - uint16_t preg; + device_t dev = sc->sc_dev; + uint32_t mreg, moff; + uint16_t wuce, wuc, wufc, preg; + int i, rv; + + KASSERT(sc->sc_type >= WM_T_PCH); /* Copy MAC RARs to PHY RARs */ + wm_copy_rx_addrs_to_phy_ich8lan(sc); + + /* Activate PHY wakeup */ + rv = sc->phy.acquire(sc); + if (rv != 0) { + device_printf(dev, "%s: failed to acquire semaphore\n", + __func__); + return rv; + } + + /* + * Enable access to PHY wakeup registers. + * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE. + */ + rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce); + if (rv != 0) { + device_printf(dev, + "%s: Could not enable PHY wakeup reg access\n", __func__); + goto release; + } /* Copy MAC MTA to PHY MTA */ + for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) { + uint16_t lo, hi; + + mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4)); + lo = (uint16_t)(mreg & 0xffff); + hi = (uint16_t)((mreg >> 16) & 0xffff); + wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true); + wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true); + } /* Configure PHY Rx Control register */ + wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true); + mreg = CSR_READ(sc, WMREG_RCTL); + if (mreg & RCTL_UPE) + preg |= BM_RCTL_UPE; + if (mreg & RCTL_MPE) + preg |= BM_RCTL_MPE; + preg &= ~(BM_RCTL_MO_MASK); + moff = __SHIFTOUT(mreg, RCTL_MO); + if (moff != 0) + preg |= moff << BM_RCTL_MO_SHIFT; + if (mreg & RCTL_BAM) + preg |= BM_RCTL_BAM; + if (mreg & RCTL_PMCF) + preg |= BM_RCTL_PMCF; + mreg = CSR_READ(sc, WMREG_CTRL); + if (mreg & CTRL_RFCE) + preg |= BM_RCTL_RFCE; + wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true); + wuc = WUC_APME | WUC_PME_EN; + wufc = WUFC_MAG; /* Enable PHY wakeup in MAC register */ + CSR_WRITE(sc, WMREG_WUC, + WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc); + CSR_WRITE(sc, WMREG_WUFC, wufc); /* Configure and enable PHY wakeup in PHY registers */ + wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true); + wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true); - /* Activate PHY wakeup */ + wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); - /* XXX */ -#endif +release: + sc->phy.release(sc); + + return 0; } /* Power down workaround on D3 */ @@ -13215,55 +14541,174 @@ wm_igp3_phy_powerdown_workaround_ich8lan } } -static void -wm_enable_wakeup(struct wm_softc *sc) -{ - uint32_t reg, pmreg; - pcireg_t pmode; +/* + * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @sc: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + */ +static void +wm_suspend_workarounds_ich8lan(struct wm_softc *sc) +{ + uint32_t phy_ctrl; + + phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL); + phy_ctrl |= PHY_CTRL_GBE_DIS; + + if (sc->sc_phytype == WMPHY_I217) { + uint16_t devid = sc->sc_pcidevid; + + if ((devid == PCI_PRODUCT_INTEL_I218_LM) || + (devid == PCI_PRODUCT_INTEL_I218_V) || + (devid == PCI_PRODUCT_INTEL_I218_LM3) || + (devid == PCI_PRODUCT_INTEL_I218_V3) || + (sc->sc_type >= WM_T_PCH_SPT)) + CSR_WRITE(sc, WMREG_FEXTNVM6, + CSR_READ(sc, WMREG_FEXTNVM6) + & ~FEXTNVM6_REQ_PLL_CLK); - DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); +#if 0 /* notyet */ + if (sc->phy.acquire(sc) != 0) + goto out; - if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, - &pmreg, NULL) == 0) - return; + /* XXX Do workaround for EEE */ - /* Advertise the wakeup capability */ - CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) - | CTRL_SWDPIN(3)); - CSR_WRITE(sc, WMREG_WUC, WUC_APME); + /* + * For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ - /* ICH workaround */ - switch (sc->sc_type) { - case WM_T_ICH8: - case WM_T_ICH9: - case WM_T_ICH10: - case WM_T_PCH: - case WM_T_PCH2: - case WM_T_PCH_LPT: - case WM_T_PCH_SPT: - /* Disable gig during WOL */ - reg = CSR_READ(sc, WMREG_PHY_CTRL); - reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; - CSR_WRITE(sc, WMREG_PHY_CTRL, reg); + /* + * Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + + sc->phy.release(sc); +#endif + } +#if 0 +out: +#endif + CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl); + + if (sc->sc_type == WM_T_ICH8) + wm_gig_downshift_workaround_ich8lan(sc); + + if (sc->sc_type >= WM_T_PCH) { + wm_oem_bits_config_ich8lan(sc, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ if (sc->sc_type == WM_T_PCH) - wm_gmii_reset(sc); + wm_reset_phy(sc); - /* Power down workaround */ - if (sc->sc_phytype == WMPHY_82577) { - struct mii_softc *child; + if (sc->phy.acquire(sc) != 0) + return; + wm_write_smbus_addr(sc); + sc->phy.release(sc); + } +} - /* Assume that the PHY is copper */ - child = LIST_FIRST(&sc->sc_mii.mii_phys); - if ((child != NULL) && (child->mii_mpd_rev <= 2)) - sc->sc_mii.mii_writereg(sc->sc_dev, 1, - (768 << 5) | 25, 0x0444); /* magic num */ - } - break; - default: - break; +/* + * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @sc: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + */ +static int +wm_resume_workarounds_pchlan(struct wm_softc *sc) +{ + device_t dev = sc->sc_dev; + int rv; + + if (sc->sc_type < WM_T_PCH2) + return 0; + + rv = wm_init_phy_workarounds_pchlan(sc); + if (rv != 0) + return -1; + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (sc->sc_phytype == WMPHY_I217) { + uint16_t phy_reg; + + if (sc->phy.acquire(sc) != 0) + return -1; + + /* Clear Auto Enable LPI after link up */ + sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg); + + if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { + /* Restore clear on SMB if no manageability engine + * is present + */ + rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR, + &phy_reg); + if (rv != 0) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg); + if (rv != 0) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg); + +release: + sc->phy.release(sc); + return rv; } + return 0; +} + +static void +wm_enable_wakeup(struct wm_softc *sc) +{ + uint32_t reg, pmreg; + pcireg_t pmode; + int rv = 0; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, + &pmreg, NULL) == 0) + return; + + if ((sc->sc_flags & WM_F_WOL) == 0) + goto pme; + + /* Advertise the wakeup capability */ + CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) + | CTRL_SWDPIN(3)); + /* Keep the laser running on fiber adapters */ if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) { @@ -13272,64 +14717,162 @@ wm_enable_wakeup(struct wm_softc *sc) CSR_WRITE(sc, WMREG_CTRL_EXT, reg); } + if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) || + (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || + (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) || + (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)) + wm_suspend_workarounds_ich8lan(sc); + +#if 0 /* For the multicast packet */ reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; -#if 0 /* for the multicast packet */ reg |= WUFC_MC; CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); #endif - if (sc->sc_type >= WM_T_PCH) - wm_enable_phy_wakeup(sc); - else { - CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN); - CSR_WRITE(sc, WMREG_WUFC, reg); + if (sc->sc_type >= WM_T_PCH) { + rv = wm_enable_phy_wakeup(sc); + if (rv != 0) + goto pme; + } else { + /* Enable wakeup by the MAC */ + CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN); + CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG); } if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) - && (sc->sc_phytype == WMPHY_IGP_3)) - wm_igp3_phy_powerdown_workaround_ich8lan(sc); + && (sc->sc_phytype == WMPHY_IGP_3)) + wm_igp3_phy_powerdown_workaround_ich8lan(sc); +pme: /* Request PME */ pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); -#if 0 - /* Disable WOL */ - pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); -#else - /* For WOL */ - pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; -#endif + if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) { + /* For WOL */ + pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; + } else { + /* Disable WOL */ + pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); + } pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); } -/* LPLU */ - +/* Disable ASPM L0s and/or L1 for workaround */ static void -wm_lplu_d0_disable(struct wm_softc *sc) +wm_disable_aspm(struct wm_softc *sc) { - uint32_t reg; + pcireg_t reg, mask = 0; + unsigned const char *str = ""; - DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", - device_xname(sc->sc_dev), __func__)); + /* + * Only for PCIe device which has PCIe capability in the PCI config + * space. + */ + if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0)) + return; - reg = CSR_READ(sc, WMREG_PHY_CTRL); - reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU); - CSR_WRITE(sc, WMREG_PHY_CTRL, reg); + switch (sc->sc_type) { + case WM_T_82571: + case WM_T_82572: + /* + * 8257[12] Errata 13: Device Does Not Support PCIe Active + * State Power management L1 State (ASPM L1). + */ + mask = PCIE_LCSR_ASPM_L1; + str = "L1 is"; + break; + case WM_T_82573: + case WM_T_82574: + case WM_T_82583: + /* + * The 82573 disappears when PCIe ASPM L0s is enabled. + * + * The 82574 and 82583 does not support PCIe ASPM L0s with + * some chipset. The document of 82574 and 82583 says that + * disabling L0s with some specific chipset is sufficient, + * but we follow as of the Intel em driver does. + * + * References: + * Errata 8 of the Specification Update of i82573. + * Errata 20 of the Specification Update of i82574. + * Errata 9 of the Specification Update of i82583. + */ + mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S; + str = "L0s and L1 are"; + break; + default: + return; + } + + reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, + sc->sc_pcixe_capoff + PCIE_LCSR); + reg &= ~mask; + pci_conf_write(sc->sc_pc, sc->sc_pcitag, + sc->sc_pcixe_capoff + PCIE_LCSR, reg); + + /* Print only in wm_attach() */ + if ((sc->sc_flags & WM_F_ATTACHED) == 0) + aprint_verbose_dev(sc->sc_dev, + "ASPM %s disabled to workaround the errata.\n", str); } +/* LPLU */ + static void -wm_lplu_d0_disable_pch(struct wm_softc *sc) +wm_lplu_d0_disable(struct wm_softc *sc) { + struct mii_data *mii = &sc->sc_mii; uint32_t reg; DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); - reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); - reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); - reg |= HV_OEM_BITS_ANEGNOW; - wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); + if (sc->sc_phytype == WMPHY_IFE) + return; + + switch (sc->sc_type) { + case WM_T_82571: + case WM_T_82572: + case WM_T_82573: + case WM_T_82575: + case WM_T_82576: + reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT); + reg &= ~PMR_D0_LPLU; + mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg); + break; + case WM_T_82580: + case WM_T_I350: + case WM_T_I210: + case WM_T_I211: + reg = CSR_READ(sc, WMREG_PHPM); + reg &= ~PHPM_D0A_LPLU; + CSR_WRITE(sc, WMREG_PHPM, reg); + break; + case WM_T_82574: + case WM_T_82583: + case WM_T_ICH8: + case WM_T_ICH9: + case WM_T_ICH10: + reg = CSR_READ(sc, WMREG_PHY_CTRL); + reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU); + CSR_WRITE(sc, WMREG_PHY_CTRL, reg); + CSR_WRITE_FLUSH(sc); + break; + case WM_T_PCH: + case WM_T_PCH2: + case WM_T_PCH_LPT: + case WM_T_PCH_SPT: + case WM_T_PCH_CNP: + reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); + reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); + if (wm_phy_resetisblocked(sc) == false) + reg |= HV_OEM_BITS_ANEGNOW; + wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); + break; + default: + break; + } } /* EEE */ @@ -13339,6 +14882,8 @@ wm_set_eee_i350(struct wm_softc *sc) { uint32_t ipcnfg, eeer; + KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER); + ipcnfg = CSR_READ(sc, WMREG_IPCNFG); eeer = CSR_READ(sc, WMREG_EEER); @@ -13368,31 +14913,31 @@ wm_set_eee_i350(struct wm_softc *sc) static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) { -#if 0 - int miistatus, active, i; + struct mii_data *mii = &sc->sc_mii; + uint32_t status = CSR_READ(sc, WMREG_STATUS); + int i; int reg; - miistatus = sc->sc_mii.mii_media_status; + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); /* If the link is not up, do nothing */ - if ((miistatus & IFM_ACTIVE) == 0) + if ((status & STATUS_LU) == 0) return; - active = sc->sc_mii.mii_media_active; - /* Nothing to do if the link is other than 1Gbps */ - if (IFM_SUBTYPE(active) != IFM_1000_T) + if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000) return; for (i = 0; i < 10; i++) { /* read twice */ - reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); - reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); + reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); + reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0) goto out; /* GOOD! */ /* Reset the PHY */ - wm_gmii_reset(sc); + wm_reset_phy(sc); delay(5*1000); } @@ -13409,22 +14954,32 @@ wm_kmrn_lock_loss_workaround_ich8lan(str out: return; -#endif } -/* WOL from S5 stops working */ +/* + * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @sc: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + */ static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) { - uint16_t kmrn_reg; + uint16_t kmreg; /* Only for igp3 */ if (sc->sc_phytype == WMPHY_IGP_3) { - kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); - kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; - wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); - kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; - wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); + if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0) + return; + kmreg |= KUMCTRLSTA_DIAG_NELPBK; + if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0) + return; + kmreg &= ~KUMCTRLSTA_DIAG_NELPBK; + wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg); } } @@ -13433,7 +14988,7 @@ wm_gig_downshift_workaround_ich8lan(stru * XXX should be moved to new PHY driver? */ static void -wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) +wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc) { DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", @@ -13443,7 +14998,7 @@ wm_hv_phy_workaround_ich8lan(struct wm_s if (sc->sc_phytype == WMPHY_82577) wm_set_mdio_slow_mode_hv(sc); - /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ + /* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ @@ -13476,17 +15031,149 @@ wm_hv_phy_workaround_ich8lan(struct wm_s wm_k1_gig_workaround_hv(sc, 1); } +/* + * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @sc: pointer to the HW structure + */ +static void +wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc) +{ + device_t dev = sc->sc_dev; + uint32_t mac_reg; + uint16_t i, wuce; + int count; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + + if (sc->phy.acquire(sc) != 0) + return; + if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + count = wm_rar_count(sc); + for (i = 0; i < count; i++) { + uint16_t lo, hi; + mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i)); + lo = (uint16_t)(mac_reg & 0xffff); + hi = (uint16_t)((mac_reg >> 16) & 0xffff); + wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true); + wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true); + + mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i)); + lo = (uint16_t)(mac_reg & 0xffff); + hi = (uint16_t)((mac_reg & RAL_AV) >> 16); + wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true); + wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true); + } + + wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); + +release: + sc->phy.release(sc); +} + +/* + * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + */ static void -wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) +wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc) { DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); KASSERT(sc->sc_type == WM_T_PCH2); + /* Set MDIO slow mode before any other MDIO access */ wm_set_mdio_slow_mode_hv(sc); + + /* XXX Set MSE higher to enable link to stay up when noise is high */ + /* XXX Drop link after 5 times MSE threshold was reached */ } +/** + * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. + **/ +static int +wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link) +{ + uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6); + uint32_t status = CSR_READ(sc, WMREG_STATUS); + uint32_t speed = __SHIFTOUT(status, STATUS_SPEED); + uint16_t phyreg; + + if (link && (speed == STATUS_SPEED_1000)) { + sc->phy.acquire(sc); + int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, + &phyreg); + if (rv != 0) + goto release; + rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, + phyreg & ~KUMCTRLSTA_K1_ENABLE); + if (rv != 0) + goto release; + delay(20); + CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK); + + rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, + &phyreg); +release: + sc->phy.release(sc); + return rv; + } + + fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK; + + struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys); + if (((child != NULL) && (child->mii_mpd_rev > 5)) + || !link + || ((speed == STATUS_SPEED_100) && (status & STATUS_FD))) + goto update_fextnvm6; + + phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL); + + /* Clear link status transmit timeout */ + phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + if (speed == STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg); + +update_fextnvm6: + CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6); + return 0; +} + +/* + * wm_k1_gig_workaround_hv - K1 Si workaround + * @sc: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + */ static int wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) { @@ -13502,10 +15189,12 @@ wm_k1_gig_workaround_hv(struct wm_softc k1_enable = 0; /* Link stall fix for link up */ - wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); + wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, + 0x0100); } else { /* Link stall fix for link down */ - wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); + wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, + 0x4100); } wm_configure_k1_ich8lan(sc, k1_enable); @@ -13514,6 +15203,88 @@ wm_k1_gig_workaround_hv(struct wm_softc return 0; } +/* + * wm_k1_workaround_lv - K1 Si workaround + * @sc: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 for 1000 and 100 speeds + */ +static int +wm_k1_workaround_lv(struct wm_softc *sc) +{ + uint32_t reg; + int phyreg; + + if (sc->sc_type != WM_T_PCH2) + return 0; + + /* Set K1 beacon duration based on 10Mbps speed */ + phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS); + + if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + if (phyreg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { + /* LV 1G/100 Packet drop issue wa */ + phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL); + phyreg &= ~HV_PM_CTRL_K1_ENA; + wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg); + } else { + /* For 10Mbps */ + reg = CSR_READ(sc, WMREG_FEXTNVM4); + reg &= ~FEXTNVM4_BEACON_DURATION; + reg |= FEXTNVM4_BEACON_DURATION_16US; + CSR_WRITE(sc, WMREG_FEXTNVM4, reg); + } + } + + return 0; +} + +/* + * wm_link_stall_workaround_hv - Si workaround + * @sc: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + */ +static int +wm_link_stall_workaround_hv(struct wm_softc *sc) +{ + int phyreg; + + if (sc->sc_phytype != WMPHY_82578) + return 0; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR); + if ((phyreg & BMCR_LOOP) != 0) + return 0; + + /* Check if link is up and at 1Gbps */ + phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS); + phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED + | BM_CS_STATUS_SPEED_MASK; + if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED + | BM_CS_STATUS_SPEED_1000)) + return 0; + + delay(200 * 1000); /* XXX too big */ + + /* Flush the packets in the fifo buffer */ + wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL, + HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED); + wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL, + HV_MUX_DATA_CTRL_GEN_TO_MAC); + + return 0; +} + static void wm_set_mdio_slow_mode_hv(struct wm_softc *sc) { @@ -13524,20 +15295,35 @@ wm_set_mdio_slow_mode_hv(struct wm_softc reg | HV_KMRN_MDIO_SLOW); } +/* + * wm_configure_k1_ich8lan - Configure K1 power state + * @sc: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + */ static void wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) { uint32_t ctrl, ctrl_ext, tmp; - uint16_t kmrn_reg; + uint16_t kmreg; + int rv; - kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); + KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); + + rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg); + if (rv != 0) + return; if (k1_enable) - kmrn_reg |= KUMCTRLSTA_K1_ENABLE; + kmreg |= KUMCTRLSTA_K1_ENABLE; else - kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; + kmreg &= ~KUMCTRLSTA_K1_ENABLE; - wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); + rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg); + if (rv != 0) + return; delay(20); @@ -13556,6 +15342,8 @@ wm_configure_k1_ich8lan(struct wm_softc CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); CSR_WRITE_FLUSH(sc); delay(20); + + return; } /* special case - for 82575 - need to do manual init ... */ @@ -13563,7 +15351,7 @@ static void wm_reset_init_script_82575(struct wm_softc *sc) { /* - * remark: this is untested code - we have no board without EEPROM + * Remark: this is untested code - we have no board without EEPROM * same setup as mentioned int the FreeBSD driver for the i82575 */ @@ -13596,6 +15384,8 @@ wm_reset_mdicnfg_82580(struct wm_softc * uint16_t nvmword; int rv; + if (sc->sc_type != WM_T_82580) + return; if ((sc->sc_flags & WM_F_SGMII) == 0) return; @@ -13620,26 +15410,33 @@ wm_reset_mdicnfg_82580(struct wm_softc * static bool wm_phy_is_accessible_pchlan(struct wm_softc *sc) { - int i; uint32_t reg; uint16_t id1, id2; + int i, rv; DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); + KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); + id1 = id2 = 0xffff; for (i = 0; i < 2; i++) { - id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1); - if (MII_INVALIDID(id1)) + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1, + &id1); + if ((rv != 0) || MII_INVALIDID(id1)) continue; - id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2); - if (MII_INVALIDID(id2)) + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2, + &id2); + if ((rv != 0) || MII_INVALIDID(id2)) continue; break; } - if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) { + if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) goto out; - } + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ if (sc->sc_type < WM_T_PCH_LPT) { sc->phy.release(sc); wm_set_mdio_slow_mode_hv(sc); @@ -13648,19 +15445,21 @@ wm_phy_is_accessible_pchlan(struct wm_so sc->phy.acquire(sc); } if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) { - printf("XXX return with false\n"); + device_printf(sc->sc_dev, "XXX return with false\n"); return false; } out: - if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) { + if (sc->sc_type >= WM_T_PCH_LPT) { /* Only unforce SMBus if ME is not active */ if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { + uint16_t phyreg; + /* Unforce SMBus mode in PHY */ - reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, - CV_SMB_CTRL); - reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, + CV_SMB_CTRL, &phyreg); + phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS; wm_gmii_hv_writereg_locked(sc->sc_dev, 2, - CV_SMB_CTRL, reg); + CV_SMB_CTRL, phyreg); /* Unforce SMBus mode in MAC */ reg = CSR_READ(sc, WMREG_CTRL_EXT); @@ -13715,15 +15514,36 @@ wm_platform_pm_pch_lpt(struct wm_softc * | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND; uint32_t rxa; uint16_t scale = 0, lat_enc = 0; + int32_t obff_hwm = 0; int64_t lat_ns, value; - + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", device_xname(sc->sc_dev), __func__)); if (link) { - pcireg_t preg; uint16_t max_snoop, max_nosnoop, max_ltr_enc; + uint32_t status; + uint16_t speed; + pcireg_t preg; + status = CSR_READ(sc, WMREG_STATUS); + switch (__SHIFTOUT(status, STATUS_SPEED)) { + case STATUS_SPEED_10: + speed = 10; + break; + case STATUS_SPEED_100: + speed = 100; + break; + case STATUS_SPEED_1000: + speed = 1000; + break; + default: + device_printf(sc->sc_dev, "Unknown speed " + "(status = %08x)\n", status); + return -1; + } + + /* Rx Packet Buffer Allocation size (KB) */ rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK; /* @@ -13736,31 +15556,12 @@ wm_platform_pm_pch_lpt(struct wm_softc * * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ lat_ns = ((int64_t)rxa * 1024 - - (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000; + (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu + + ETHER_HDR_LEN))) * 8 * 1000; if (lat_ns < 0) lat_ns = 0; - else { - uint32_t status; - uint16_t speed; - - status = CSR_READ(sc, WMREG_STATUS); - switch (__SHIFTOUT(status, STATUS_SPEED)) { - case STATUS_SPEED_10: - speed = 10; - break; - case STATUS_SPEED_100: - speed = 100; - break; - case STATUS_SPEED_1000: - speed = 1000; - break; - default: - printf("%s: Unknown speed (status = %08x)\n", - device_xname(sc->sc_dev), status); - return -1; - } + else lat_ns /= speed; - } value = lat_ns; while (value > LTRV_VALUE) { @@ -13768,12 +15569,13 @@ wm_platform_pm_pch_lpt(struct wm_softc * value = howmany(value, __BIT(5)); } if (scale > LTRV_SCALE_MAX) { - printf("%s: Invalid LTR latency scale %d\n", - device_xname(sc->sc_dev), scale); + device_printf(sc->sc_dev, + "Invalid LTR latency scale %d\n", scale); return -1; } lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value); + /* Determine the maximum latency tolerated by the platform */ preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_LTR_CAP_LPT); max_snoop = preg & 0xffff; @@ -13783,12 +15585,39 @@ wm_platform_pm_pch_lpt(struct wm_softc * if (lat_enc > max_ltr_enc) { lat_enc = max_ltr_enc; + lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL) + * PCI_LTR_SCALETONS( + __SHIFTOUT(lat_enc, + PCI_LTR_MAXSNOOPLAT_SCALE)); + } + + if (lat_ns) { + lat_ns *= speed * 1000; + lat_ns /= 8; + lat_ns /= 1000000000; + obff_hwm = (int32_t)(rxa - lat_ns); + } + if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) { + device_printf(sc->sc_dev, "Invalid high water mark %d" + "(rxa = %d, lat_ns = %d)\n", + obff_hwm, (int32_t)rxa, (int32_t)lat_ns); + return -1; } } /* Snoop and No-Snoop latencies the same */ reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP); CSR_WRITE(sc, WMREG_LTRV, reg); + /* Set OBFF high water mark */ + reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM; + reg |= obff_hwm; + CSR_WRITE(sc, WMREG_SVT, reg); + + /* Enable OBFF */ + reg = CSR_READ(sc, WMREG_SVCR); + reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT; + CSR_WRITE(sc, WMREG_SVCR, reg); + return 0; } @@ -13808,6 +15637,11 @@ wm_pll_workaround_i210(struct wm_softc * bool wa_done = false; int i; + /* Get Power Management cap offset */ + if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, + &pmreg, NULL) == 0) + return; + /* Save WUC and MDICNFG registers */ wuc = CSR_READ(sc, WMREG_WUC); mdicnfg = CSR_READ(sc, WMREG_MDICNFG); @@ -13819,10 +15653,6 @@ wm_pll_workaround_i210(struct wm_softc * nvmword = INVM_DEFAULT_AL; tmp_nvmword = nvmword | INVM_PLL_WO_VAL; - /* Get Power Management cap offset */ - if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, - &pmreg, NULL) == 0) - return; for (i = 0; i < WM_MAX_PLL_TRIES; i++) { phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1, GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG); @@ -13866,3 +15696,22 @@ wm_pll_workaround_i210(struct wm_softc * if (wa_done) aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n"); } + +static void +wm_legacy_irq_quirk_spt(struct wm_softc *sc) +{ + uint32_t reg; + + DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n", + device_xname(sc->sc_dev), __func__)); + KASSERT((sc->sc_type == WM_T_PCH_SPT) + || (sc->sc_type == WM_T_PCH_CNP)); + + reg = CSR_READ(sc, WMREG_FEXTNVM7); + reg |= FEXTNVM7_SIDE_CLK_UNGATE; + CSR_WRITE(sc, WMREG_FEXTNVM7, reg); + + reg = CSR_READ(sc, WMREG_FEXTNVM9); + reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS; + CSR_WRITE(sc, WMREG_FEXTNVM9, reg); +}