Annotation of src/sys/dev/ic/i82596.c, Revision 1.17.2.1
1.17.2.1! matt 1: /* $NetBSD: i82596.c,v 1.18 2007/10/19 11:59:53 ad Exp $ */
1.1 jkunz 2:
3: /*
4: * Copyright (c) 2003 Jochen Kunz.
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. The name of Jochen Kunz may not be used to endorse or promote
16: * products derived from this software without specific prior
17: * written permission.
18: *
19: * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.14 skrll 33: * Driver for the Intel i82596CA and i82596DX/SX 10MBit/s Ethernet chips.
34: *
1.1 jkunz 35: * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
1.8 perry 36: * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
1.1 jkunz 37: * i82586 compatibility mode.
1.13 skrll 38: *
1.14 skrll 39: * Documentation about these chips can be found at
40: *
41: * http://developer.intel.com/design/network/datashts/290218.htm
42: * http://developer.intel.com/design/network/datashts/290219.htm
1.1 jkunz 43: */
44:
45: #include <sys/cdefs.h>
1.17.2.1! matt 46: __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.18 2007/10/19 11:59:53 ad Exp $");
1.1 jkunz 47:
48: /* autoconfig and device stuff */
49: #include <sys/param.h>
50: #include <sys/device.h>
51: #include <sys/conf.h>
52: #include "locators.h"
53: #include "ioconf.h"
54:
55: /* bus_space / bus_dma etc. */
1.17.2.1! matt 56: #include <sys/bus.h>
! 57: #include <sys/intr.h>
1.1 jkunz 58:
59: /* general system data and functions */
60: #include <sys/systm.h>
61: #include <sys/ioctl.h>
62:
63: /* tsleep / sleep / wakeup */
64: #include <sys/proc.h>
65: /* hz for above */
66: #include <sys/kernel.h>
67:
68: /* network stuff */
69: #include <net/if.h>
70: #include <net/if_dl.h>
71: #include <net/if_media.h>
72: #include <net/if_ether.h>
73: #include <sys/socket.h>
74: #include <sys/mbuf.h>
75:
76: #include "bpfilter.h"
1.8 perry 77: #if NBPFILTER > 0
1.1 jkunz 78: #include <net/bpf.h>
1.8 perry 79: #endif
1.1 jkunz 80:
81: #include <dev/ic/i82596reg.h>
82: #include <dev/ic/i82596var.h>
83:
84: /* Supported chip variants */
1.10 skrll 85: const char *i82596_typenames[] = { "unknown", "DX/SX", "CA" };
1.1 jkunz 86:
87: /* media change and status callback */
88: static int iee_mediachange(struct ifnet *);
89: static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
90:
91: /* interface routines to upper protocols */
92: static void iee_start(struct ifnet *); /* initiate output */
1.15 christos 93: static int iee_ioctl(struct ifnet *, u_long, void *); /* ioctl routine */
1.1 jkunz 94: static int iee_init(struct ifnet *); /* init routine */
95: static void iee_stop(struct ifnet *, int); /* stop routine */
96: static void iee_watchdog(struct ifnet *); /* timer routine */
97:
98: /* internal helper functions */
1.7 tsutsui 99: static void iee_cb_setup(struct iee_softc *, uint32_t);
1.1 jkunz 100:
101: /*
1.13 skrll 102: * Things a MD frontend has to provide:
103: *
104: * The functions via function pointers in the softc:
105: * int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
106: * int (*sc_iee_reset)(struct iee_softc *sc);
107: * void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
108: * int (*sc_mediachange)(struct ifnet *);
109: *
110: * sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
111: * to the SCP cmd word and issuing a Channel Attention.
112: * sc_iee_reset(): initiate a reset, supply the address of the SCP to the
113: * chip, wait for the chip to initialize and ACK interrupts that
114: * this may have caused by calling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
115: * This functions must carefully bus_dmamap_sync() all data they have touched!
116: *
117: * sc_mediastatus() and sc_mediachange() are just MD hooks to the according
118: * MI functions. The MD frontend may set this pointers to NULL when they
119: * are not needed.
120: *
121: * sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
122: * This is for printing out the correct chip type at attach time only. The
123: * MI backend doesn't distinguish different chip types when programming
124: * the chip.
125: *
126: * sc->sc_flags has to be set to 0 on little endian hardware and to
127: * IEE_NEED_SWAP on big endian hardware, when endianess conversion is not
128: * done by the bus attachment. Usually you need to set IEE_NEED_SWAP
129: * when IEE_SYSBUS_BE is set in the sysbus byte.
130: *
131: * sc->sc_cl_align must be set to 1 or to the cache line size. When set to
132: * 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
133: * it forces alignment of the data structures in the shared memory to a multiple
134: * of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
135: * I/O coherent caches and are unable to map the shared memory uncachable.
136: * (At least pre PA7100LC CPUs are unable to map memory uncachable.)
137: *
138: * sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLLOWING MACROS ARE USED:
139: * SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
140: *
141: * The MD frontend has to allocate a piece of DMA memory at least of
142: * IEE_SHMEM_MAX bytes size. All communication with the chip is done via
143: * this shared memory. If possible map this memory non-cachable on
144: * archs with non DMA I/O coherent caches. The base of the memory needs
145: * to be aligned to an even address if sc->sc_cl_align == 1 and aligned
146: * to a cache line if sc->sc_cl_align != 1.
147: *
148: * An interrupt with iee_intr() as handler must be established.
149: *
150: * Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
151: * int *media, int nmedia, int defmedia); when everything is set up. First
152: * parameter is a pointer to the MI softc, ether_address is an array that
153: * contains the ethernet address. media is an array of the media types
154: * provided by the hardware. The members of this array are supplied to
155: * ifmedia_add() in sequence. nmedia is the count of elements in media.
156: * defmedia is the default media that is set via ifmedia_set().
157: * nmedia and defmedia are ignored when media == NULL.
158: *
159: * The MD backend may call iee_detach() to detach the device.
160: *
161: * See sys/arch/hp700/gsc/if_iee_gsc.c for an example.
162: */
1.1 jkunz 163:
164:
165: /*
1.13 skrll 166: * How frame reception is done:
167: * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
168: * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
169: * together in a circular list. sc->sc_rx_done is the count of RFDs in the
170: * list already processed / the number of the RFD that has to be checked for
171: * a new frame first at the next RX interrupt. Upon successful reception of
172: * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
173: * cluster is allocated and the RFD / RBD are reinitialized accordingly.
174: *
175: * When a RFD list overrun occurred the whole RFD and RBD lists are reinitialized
176: * and frame reception is started again.
177: */
1.1 jkunz 178: int
179: iee_intr(void *intarg)
180: {
181: struct iee_softc *sc = intarg;
182: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
183: struct iee_rfd *rfd;
184: struct iee_rbd *rbd;
185: bus_dmamap_t rx_map;
186: struct mbuf *rx_mbuf;
187: struct mbuf *new_mbuf;
188: int scb_status;
189: int scb_cmd;
1.6 tsutsui 190: int n, col;
1.1 jkunz 191:
192: if ((ifp->if_flags & IFF_RUNNING) == 0) {
193: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
194: return(1);
195: }
196: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
197: BUS_DMASYNC_POSTREAD);
198: scb_status = SC_SCB->scb_status;
199: scb_cmd = SC_SCB->scb_cmd;
200: rfd = SC_RFD(sc->sc_rx_done);
1.2 jkunz 201: while ((rfd->rfd_status & IEE_RFD_C) != 0) {
1.1 jkunz 202: /* At least one packet was received. */
203: rbd = SC_RBD(sc->sc_rx_done);
204: rx_map = sc->sc_rx_map[sc->sc_rx_done];
205: rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
206: SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
207: &= ~IEE_RBD_EL;
208: if ((rfd->rfd_status & IEE_RFD_OK) == 0
209: || (rbd->rbd_count & IEE_RBD_EOF) == 0
210: || (rbd->rbd_count & IEE_RBD_F) == 0){
211: /* Receive error, skip frame and reuse buffer. */
212: rfd->rfd_status = 0;
213: rbd->rbd_count = 0;
214: rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
215: printf("%s: iee_intr: receive error %d, rfd_status="
1.8 perry 216: "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
1.1 jkunz 217: ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
218: sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
219: continue;
220: }
221: rfd->rfd_status = 0;
222: bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
223: BUS_DMASYNC_POSTREAD);
1.8 perry 224: rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
1.1 jkunz 225: rbd->rbd_count & IEE_RBD_COUNT;
226: rx_mbuf->m_pkthdr.rcvif = ifp;
227: MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
228: if (new_mbuf == NULL) {
229: printf("%s: iee_intr: can't allocate mbuf\n",
230: sc->sc_dev.dv_xname);
231: break;
232: }
233: MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
234: MCLGET(new_mbuf, M_DONTWAIT);
235: if ((new_mbuf->m_flags & M_EXT) == 0) {
1.8 perry 236: printf("%s: iee_intr: can't alloc mbuf cluster\n",
1.1 jkunz 237: sc->sc_dev.dv_xname);
238: m_freem(new_mbuf);
239: break;
240: }
241: bus_dmamap_unload(sc->sc_dmat, rx_map);
1.8 perry 242: if (bus_dmamap_load(sc->sc_dmat, rx_map,
243: new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
1.1 jkunz 244: NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
245: panic("%s: iee_intr: can't load RX DMA map\n",
246: sc->sc_dev.dv_xname);
247: bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
248: new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
249: #if NBPFILTER > 0
250: if (ifp->if_bpf != 0)
251: bpf_mtap(ifp->if_bpf, rx_mbuf);
252: #endif /* NBPFILTER > 0 */
253: (*ifp->if_input)(ifp, rx_mbuf);
254: ifp->if_ipackets++;
255: sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
256: rbd->rbd_count = 0;
257: rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
258: rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
259: sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
260: rfd = SC_RFD(sc->sc_rx_done);
261: }
262: if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
263: || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
264: || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
265: /* Receive Overrun, reinit receive ring buffer. */
266: for (n = 0 ; n < IEE_NRFD ; n++) {
267: SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
268: SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
269: + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
1.8 perry 270: SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
1.1 jkunz 271: + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
1.8 perry 272: SC_RBD(n)->rbd_size = IEE_RBD_EL |
1.1 jkunz 273: sc->sc_rx_map[n]->dm_segs[0].ds_len;
1.8 perry 274: SC_RBD(n)->rbd_rb_addr =
1.1 jkunz 275: sc->sc_rx_map[n]->dm_segs[0].ds_addr;
276: }
277: SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
278: sc->sc_rx_done = 0;
1.8 perry 279: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
1.1 jkunz 280: IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
281: (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
1.8 perry 282: printf("%s: iee_intr: receive ring buffer overrun\n",
1.1 jkunz 283: sc->sc_dev.dv_xname);
1.2 jkunz 284: }
1.1 jkunz 285:
1.8 perry 286: if (sc->sc_next_cb != 0
1.2 jkunz 287: && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
1.1 jkunz 288: /* CMD list finished */
289: ifp->if_timer = 0;
290: if (sc->sc_next_tbd != 0) {
1.12 skrll 291: /* A TX CMD list finished, cleanup */
1.1 jkunz 292: for (n = 0 ; n < sc->sc_next_cb ; n++) {
293: m_freem(sc->sc_tx_mbuf[n]);
294: sc->sc_tx_mbuf[n] = NULL;
295: bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
1.8 perry 296: if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
1.1 jkunz 297: (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
1.6 tsutsui 298: col = 16;
1.1 jkunz 299: else
1.8 perry 300: col = SC_CB(n)->cb_status
1.1 jkunz 301: & IEE_CB_MAXCOL;
1.6 tsutsui 302: sc->sc_tx_col += col;
303: if ((SC_CB(n)->cb_status & IEE_CB_OK) != 0) {
304: ifp->if_opackets++;
305: ifp->if_collisions += col;
306: }
1.1 jkunz 307: }
308: sc->sc_next_tbd = 0;
309: ifp->if_flags &= ~IFF_OACTIVE;
310: }
311: for (n = 0 ; n < sc->sc_next_cb ; n++) {
312: /* Check if a CMD failed, but ignore TX errors. */
313: if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
1.2 jkunz 314: && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
1.8 perry 315: printf("%s: iee_intr: scb_status=0x%x "
1.1 jkunz 316: "scb_cmd=0x%x failed command %d: "
1.8 perry 317: "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
318: sc->sc_dev.dv_xname, scb_status, scb_cmd,
1.1 jkunz 319: ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
320: n, SC_CB(n)->cb_cmd);
321: }
322: sc->sc_next_cb = 0;
323: if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
1.8 perry 324: iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
1.1 jkunz 325: | IEE_CB_I);
326: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
327: } else
1.12 skrll 328: /* Try to get deferred packets going. */
1.1 jkunz 329: iee_start(ifp);
330: }
331: if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
332: sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
1.8 perry 333: printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 334: sc->sc_crc_err);
335: }
336: if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
337: sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
1.8 perry 338: printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 339: sc->sc_align_err);
340: }
341: if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
342: sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
1.8 perry 343: printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 344: sc->sc_resource_err);
345: }
346: if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
347: sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
1.8 perry 348: printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 349: sc->sc_overrun_err);
350: }
351: if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
352: sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
1.8 perry 353: printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 354: sc->sc_rcvcdt_err);
355: }
356: if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
357: sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
1.8 perry 358: printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
1.1 jkunz 359: sc->sc_short_fr_err);
360: }
1.8 perry 361: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
1.2 jkunz 362: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1.1 jkunz 363: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
364: return(1);
365: }
366:
367:
368:
369: /*
1.13 skrll 370: * How Command Block List Processing is done.
371: *
372: * A running CBL is never manipulated. If there is a CBL already running,
373: * further CMDs are deferred until the current list is done. A new list is
374: * setup when the old one has finished.
375: * This eases programming. To manipulate a running CBL it is necessary to
376: * suspend the Command Unit to avoid race conditions. After a suspend
377: * is sent we have to wait for an interrupt that ACKs the suspend. Then
378: * we can manipulate the CBL and resume operation. I am not sure that this
379: * is more effective then the current, much simpler approach. => KISS
380: * See i82596CA data sheet page 26.
381: *
382: * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0).
383: *
384: * A CBL may consist of TX CMDs, and _only_ TX CMDs.
385: * A TX CBL is running or on the way to be set up when
386: * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
387: *
388: * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
389: * non-TX CMDs.
390: *
391: * This comes mostly through the way how an Ethernet driver works and
392: * because running CBLs are not manipulated when they are on the way. If
393: * if_start() is called there will be TX CMDs enqueued so we have a running
394: * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa.
395: *
396: * The Multicast Setup Command is special. A MCS needs more space than
397: * a single CB has. Actual space requirement depends on the length of the
398: * multicast list. So we always defer MCS until other CBLs are finished,
399: * then we setup a CONF CMD in the first CB. The CONF CMD is needed to
400: * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
401: * use all the remaining space in the CBL and the Transmit Buffer Descriptor
402: * List. (Therefore CBL and TBDL must be continuous in physical and virtual
403: * memory. This is guaranteed through the definitions of the list offsets
404: * in i82596reg.h and because it is only a single DMA segment used for all
405: * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
406: * a multicast list length of 0, thus disabling the multicast filter.
407: * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
408: */
1.1 jkunz 409: void
1.7 tsutsui 410: iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
1.1 jkunz 411: {
412: struct iee_cb *cb = SC_CB(sc->sc_next_cb);
413: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
414: struct ether_multistep step;
415: struct ether_multi *enm;
416:
417: memset(cb, 0, IEE_CB_SZ);
418: cb->cb_cmd = cmd;
419: switch(cmd & IEE_CB_CMD) {
420: case IEE_CB_CMD_NOP: /* NOP CMD */
421: break;
422: case IEE_CB_CMD_IAS: /* Individual Address Setup */
1.17 dyoung 423: memcpy(__UNVOLATILE(cb->cb_ind_addr), CLLADDR(ifp->if_sadl),
1.1 jkunz 424: ETHER_ADDR_LEN);
425: break;
426: case IEE_CB_CMD_CONF: /* Configure */
1.9 he 427: memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
1.1 jkunz 428: & IEE_CF_0_CNT_M);
429: break;
430: case IEE_CB_CMD_MCS: /* Multicast Setup */
431: if (sc->sc_next_cb != 0) {
432: sc->sc_flags |= IEE_WANT_MCAST;
433: return;
434: }
435: sc->sc_flags &= ~IEE_WANT_MCAST;
436: if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
437: /* Need no multicast filter in promisc mode. */
1.8 perry 438: iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
1.1 jkunz 439: | IEE_CB_I);
440: return;
441: }
442: /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
443: cb = SC_CB(sc->sc_next_cb + 1);
444: cb->cb_cmd = cmd;
445: cb->cb_mcast.mc_size = 0;
446: ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
447: while (enm != NULL) {
1.8 perry 448: if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
449: ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
1.1 jkunz 450: * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
451: > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
452: cb->cb_mcast.mc_size = 0;
453: break;
454: }
1.9 he 455: memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
456: cb->cb_mcast.mc_size * ETHER_ADDR_LEN]),
1.1 jkunz 457: enm->enm_addrlo, ETHER_ADDR_LEN);
458: ETHER_NEXT_MULTI(step, enm);
459: cb->cb_mcast.mc_size++;
460: }
461: if (cb->cb_mcast.mc_size == 0) {
462: /* Can't do exact mcast filtering, do ALLMULTI mode. */
463: ifp->if_flags |= IFF_ALLMULTI;
464: sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
465: } else {
466: /* disable ALLMULTI and load mcast list */
467: ifp->if_flags &= ~IFF_ALLMULTI;
468: sc->sc_cf[11] |= IEE_CF_11_MCALL;
469: /* Mcast setup may need more then IEE_CB_SZ bytes. */
1.8 perry 470: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
471: IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
1.1 jkunz 472: BUS_DMASYNC_PREWRITE);
473: }
474: iee_cb_setup(sc, IEE_CB_CMD_CONF);
475: break;
476: case IEE_CB_CMD_TR: /* Transmit */
477: cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
478: + IEE_TBD_SZ * sc->sc_next_tbd);
1.12 skrll 479: cb->cb_cmd |= IEE_CB_SF; /* Always use Flexible Mode. */
1.1 jkunz 480: break;
481: case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
482: break;
483: case IEE_CB_CMD_DUMP: /* Dump */
484: break;
485: case IEE_CB_CMD_DIAG: /* Diagnose */
486: break;
487: default:
488: /* can't happen */
489: break;
490: }
1.8 perry 491: cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
1.1 jkunz 492: (sc->sc_next_cb + 1));
1.8 perry 493: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
1.1 jkunz 494: + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
495: sc->sc_next_cb++;
496: ifp->if_timer = 5;
497: return;
498: }
499:
500:
501:
502: void
1.8 perry 503: iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia,
1.1 jkunz 504: int defmedia)
505: {
506: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
507: int n;
508:
509: /* Set pointer to Intermediate System Configuration Pointer. */
510: /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
511: SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
512: /* Set pointer to System Control Block. */
513: /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
514: SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
515: /* Set pointer to Receive Frame Area. (physical address) */
516: SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
517: /* Set pointer to Command Block. (physical address) */
518: SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
519:
520: ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
521: if (media != NULL) {
522: for (n = 0 ; n < nmedia ; n++)
523: ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
524: ifmedia_set(&sc->sc_ifmedia, defmedia);
525: } else {
526: ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
527: ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
528: }
529:
530: ifp->if_softc = sc;
531: strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
532: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
533: ifp->if_start = iee_start; /* initiate output routine */
534: ifp->if_ioctl = iee_ioctl; /* ioctl routine */
535: ifp->if_init = iee_init; /* init routine */
536: ifp->if_stop = iee_stop; /* stop routine */
537: ifp->if_watchdog = iee_watchdog; /* timer routine */
538: IFQ_SET_READY(&ifp->if_snd);
539: /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
540: sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
541:
542: if_attach(ifp);
543: ether_ifattach(ifp, eth_addr);
544:
545: aprint_normal(": Intel 82596%s address %s\n",
546: i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
547:
548: for (n = 0 ; n < IEE_NCB ; n++)
549: sc->sc_tx_map[n] = NULL;
550: for (n = 0 ; n < IEE_NRFD ; n++) {
551: sc->sc_rx_mbuf[n] = NULL;
552: sc->sc_rx_map[n] = NULL;
553: }
554: sc->sc_tx_timeout = 0;
555: sc->sc_setup_timeout = 0;
556: (sc->sc_iee_reset)(sc);
557: return;
558: }
559:
560:
561:
562: void
563: iee_detach(struct iee_softc *sc, int flags)
564: {
565: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
566:
567: if ((ifp->if_flags & IFF_RUNNING) != 0)
568: iee_stop(ifp, 1);
569: ether_ifdetach(ifp);
570: if_detach(ifp);
571: return;
572: }
573:
574:
575:
576: /* media change and status callback */
577: int
578: iee_mediachange(struct ifnet *ifp)
579: {
580: struct iee_softc *sc = ifp->if_softc;
1.8 perry 581:
1.1 jkunz 582: if (sc->sc_mediachange != NULL)
583: return ((sc->sc_mediachange)(ifp));
584: return(0);
585: }
586:
587:
588:
589: void
590: iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
591: {
592: struct iee_softc *sc = ifp->if_softc;
593:
594: if (sc->sc_mediastatus != NULL)
595: return ((sc->sc_mediastatus)(ifp, ifmreq));
596: return;
597: }
598:
599:
600:
601: /* initiate output routine */
602: void
603: iee_start(struct ifnet *ifp)
604: {
605: struct iee_softc *sc = ifp->if_softc;
606: struct mbuf *m = NULL;
607: int t;
608: int n;
609:
610: if (sc->sc_next_cb != 0)
1.12 skrll 611: /* There is already a CMD running. Defer packet enqueuing. */
1.1 jkunz 612: return;
613: for (t = 0 ; t < IEE_NCB ; t++) {
614: IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
615: if (sc->sc_tx_mbuf[t] == NULL)
616: break;
617: if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
618: sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
619: /*
1.8 perry 620: * The packet needs more TBD then we support.
621: * Copy the packet into a mbuf cluster to get it out.
1.1 jkunz 622: */
1.8 perry 623: printf("%s: iee_start: failed to load DMA map\n",
1.1 jkunz 624: sc->sc_dev.dv_xname);
625: MGETHDR(m, M_DONTWAIT, MT_DATA);
626: if (m == NULL) {
627: printf("%s: iee_start: can't allocate mbuf\n",
628: sc->sc_dev.dv_xname);
629: m_freem(sc->sc_tx_mbuf[t]);
630: t--;
631: continue;
632: }
633: MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
634: MCLGET(m, M_DONTWAIT);
635: if ((m->m_flags & M_EXT) == 0) {
636: printf("%s: iee_start: can't allocate mbuf "
637: "cluster\n", sc->sc_dev.dv_xname);
638: m_freem(sc->sc_tx_mbuf[t]);
639: m_freem(m);
640: t--;
641: continue;
642: }
1.8 perry 643: m_copydata(sc->sc_tx_mbuf[t], 0,
1.15 christos 644: sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
1.1 jkunz 645: m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
646: m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
647: m_freem(sc->sc_tx_mbuf[t]);
648: sc->sc_tx_mbuf[t] = m;
649: if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
650: m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
651: printf("%s: iee_start: can't load TX DMA map\n",
652: sc->sc_dev.dv_xname);
653: m_freem(sc->sc_tx_mbuf[t]);
654: t--;
655: continue;
656: }
657: }
658: for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
659: SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
660: sc->sc_tx_map[t]->dm_segs[n].ds_addr;
661: SC_TBD(sc->sc_next_tbd + n)->tbd_size =
662: sc->sc_tx_map[t]->dm_segs[n].ds_len;
663: SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
1.8 perry 664: IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
1.1 jkunz 665: * (sc->sc_next_tbd + n + 1));
666: }
667: SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
668: bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
669: sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
670: IFQ_POLL(&ifp->if_snd, m);
671: if (m == NULL)
672: iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
673: | IEE_CB_I);
674: else
675: iee_cb_setup(sc, IEE_CB_CMD_TR);
676: sc->sc_next_tbd += n;
677: #if NBPFILTER > 0
678: /* Pass packet to bpf if someone listens. */
679: if (ifp->if_bpf)
680: bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
681: #endif
682: }
683: if (t == 0)
684: /* No packets got set up for TX. */
685: return;
686: if (t == IEE_NCB)
687: ifp->if_flags |= IFF_OACTIVE;
688: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
689: IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
690: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
691: return;
692: }
693:
694:
695:
696: /* ioctl routine */
697: int
1.15 christos 698: iee_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1.1 jkunz 699: {
700: struct iee_softc *sc = ifp->if_softc;
701: int s;
702: int err;
703:
704: s = splnet();
1.4 thorpej 705: switch (cmd) {
706: case SIOCSIFMEDIA:
707: case SIOCGIFMEDIA:
708: err = ifmedia_ioctl(ifp, (struct ifreq *) data,
709: &sc->sc_ifmedia, cmd);
710: break;
711:
712: default:
1.1 jkunz 713: err = ether_ioctl(ifp, cmd, data);
1.4 thorpej 714: if (err == ENETRESET) {
715: /*
716: * Multicast list as changed; set the hardware filter
717: * accordingly.
718: */
719: if (ifp->if_flags & IFF_RUNNING) {
720: iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
721: IEE_CB_EL | IEE_CB_I);
722: if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
723: (*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
724: }
1.3 thorpej 725: err = 0;
1.4 thorpej 726: }
727: break;
1.1 jkunz 728: }
729: splx(s);
730: return(err);
731: }
732:
733:
734:
735: /* init routine */
736: int
737: iee_init(struct ifnet *ifp)
738: {
739: struct iee_softc *sc = ifp->if_softc;
740: int r;
741: int t;
742: int n;
743: int err;
744:
745: sc->sc_next_cb = 0;
746: sc->sc_next_tbd = 0;
747: sc->sc_flags &= ~IEE_WANT_MCAST;
748: sc->sc_rx_done = 0;
749: SC_SCB->scb_crc_err = 0;
750: SC_SCB->scb_align_err = 0;
751: SC_SCB->scb_resource_err = 0;
752: SC_SCB->scb_overrun_err = 0;
753: SC_SCB->scb_rcvcdt_err = 0;
754: SC_SCB->scb_short_fr_err = 0;
755: sc->sc_crc_err = 0;
756: sc->sc_align_err = 0;
757: sc->sc_resource_err = 0;
758: sc->sc_overrun_err = 0;
759: sc->sc_rcvcdt_err = 0;
760: sc->sc_short_fr_err = 0;
761: sc->sc_tx_col = 0;
762: sc->sc_rx_err = 0;
763: sc->sc_cmd_err = 0;
764: /* Create Transmit DMA maps. */
765: for (t = 0 ; t < IEE_NCB ; t++) {
766: if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
1.8 perry 767: MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
1.1 jkunz 768: &sc->sc_tx_map[t]) != 0) {
1.8 perry 769: printf("%s: iee_init: can't create TX DMA map\n",
1.1 jkunz 770: sc->sc_dev.dv_xname);
771: for (n = 0 ; n < t ; n++)
1.8 perry 772: bus_dmamap_destroy(sc->sc_dmat,
1.1 jkunz 773: sc->sc_tx_map[n]);
774: return(ENOBUFS);
775: }
776: }
777: /* Initialize Receive Frame and Receive Buffer Descriptors */
778: err = 0;
779: memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
780: memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
781: for (r = 0 ; r < IEE_NRFD ; r++) {
782: SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
783: SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
784: + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
785:
1.8 perry 786: SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
1.1 jkunz 787: + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
788: if (sc->sc_rx_mbuf[r] == NULL) {
789: MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
790: if (sc->sc_rx_mbuf[r] == NULL) {
1.8 perry 791: printf("%s: iee_init: can't allocate mbuf\n",
1.1 jkunz 792: sc->sc_dev.dv_xname);
793: err = 1;
794: break;
795: }
796: MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
797: MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
798: if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
799: printf("%s: iee_init: can't allocate mbuf"
800: " cluster\n", sc->sc_dev.dv_xname);
801: m_freem(sc->sc_rx_mbuf[r]);
802: err = 1;
803: break;
804: }
805: }
806: if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
1.8 perry 807: MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
1.1 jkunz 808: &sc->sc_rx_map[r]) != 0) {
809: printf("%s: iee_init: can't create RX "
810: "DMA map\n", sc->sc_dev.dv_xname);
811: m_freem(sc->sc_rx_mbuf[r]);
812: err = 1;
813: break;
814: }
815: if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
1.8 perry 816: sc->sc_rx_mbuf[r]->m_ext.ext_buf,
1.1 jkunz 817: sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
818: BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
819: printf("%s: iee_init: can't load RX DMA map\n",
820: sc->sc_dev.dv_xname);
821: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
822: m_freem(sc->sc_rx_mbuf[r]);
823: err = 1;
824: break;
825: }
826: bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
827: sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
828: SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
829: SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
830: }
831: SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
832: if (err != 0) {
833: for (n = 0 ; n < r; n++) {
834: m_freem(sc->sc_rx_mbuf[n]);
835: sc->sc_rx_mbuf[n] = NULL;
836: bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
837: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
838: sc->sc_rx_map[n] = NULL;
839: }
840: for (n = 0 ; n < t ; n++) {
841: bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
842: sc->sc_tx_map[n] = NULL;
843: }
844: return(ENOBUFS);
845: }
846:
847: (sc->sc_iee_reset)(sc);
848: iee_cb_setup(sc, IEE_CB_CMD_IAS);
849: sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
850: sc->sc_cf[1] = IEE_CF_1_DEF;
851: sc->sc_cf[2] = IEE_CF_2_DEF;
1.8 perry 852: sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
1.1 jkunz 853: | IEE_CF_3_PREAMLEN_DEF;
854: sc->sc_cf[4] = IEE_CF_4_DEF;
855: sc->sc_cf[5] = IEE_CF_5_DEF;
856: sc->sc_cf[6] = IEE_CF_6_DEF;
857: sc->sc_cf[7] = IEE_CF_7_DEF;
858: sc->sc_cf[8] = IEE_CF_8_DEF;
859: sc->sc_cf[9] = IEE_CF_9_DEF;
860: sc->sc_cf[10] = IEE_CF_10_DEF;
861: sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
862: sc->sc_cf[12] = IEE_CF_12_DEF;
863: sc->sc_cf[13] = IEE_CF_13_DEF;
864: iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
865: SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
866: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
867: BUS_DMASYNC_PREWRITE);
868: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
869: /* Issue a Channel Attention to ACK interrupts we may have caused. */
870: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
871:
872: /* Mark the interface as running and ready to RX/TX packets. */
873: ifp->if_flags |= IFF_RUNNING;
874: ifp->if_flags &= ~IFF_OACTIVE;
875: return(0);
876: }
877:
878:
879:
880: /* stop routine */
881: void
882: iee_stop(struct ifnet *ifp, int disable)
883: {
884: struct iee_softc *sc = ifp->if_softc;
885: int n;
886:
887: ifp->if_flags &= ~IFF_RUNNING;
888: ifp->if_flags |= IFF_OACTIVE;
889: ifp->if_timer = 0;
890: /* Reset the chip to get it quiet. */
891: (sc->sc_iee_reset)(ifp->if_softc);
892: /* Issue a Channel Attention to ACK interrupts we may have caused. */
893: (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
1.12 skrll 894: /* Release any dynamically allocated resources. */
1.1 jkunz 895: for (n = 0 ; n < IEE_NCB ; n++) {
896: if (sc->sc_tx_map[n] != NULL)
897: bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
898: sc->sc_tx_map[n] = NULL;
899: }
900: for (n = 0 ; n < IEE_NRFD ; n++) {
901: if (sc->sc_rx_mbuf[n] != NULL)
902: m_freem(sc->sc_rx_mbuf[n]);
903: sc->sc_rx_mbuf[n] = NULL;
904: if (sc->sc_rx_map[n] != NULL) {
905: bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
906: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
907: }
908: sc->sc_rx_map[n] = NULL;
909: }
910: return;
911: }
912:
913:
914:
915: /* timer routine */
916: void
917: iee_watchdog(struct ifnet *ifp)
918: {
919: struct iee_softc *sc = ifp->if_softc;
920:
921: (sc->sc_iee_reset)(sc);
922: if (sc->sc_next_tbd != 0)
1.8 perry 923: printf("%s: iee_watchdog: transmit timeout %d\n",
1.1 jkunz 924: sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
925: else
1.8 perry 926: printf("%s: iee_watchdog: setup timeout %d\n",
1.1 jkunz 927: sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
928: iee_init(ifp);
929: return;
930: }
CVSweb <webmaster@jp.NetBSD.org>