Annotation of src/sys/dev/ic/i82596.c, Revision 1.21
1.21 ! tsutsui 1: /* $NetBSD: i82596.c,v 1.20 2009/05/05 15:47:35 tsutsui Exp $ */
1.1 jkunz 2:
3: /*
4: * Copyright (c) 2003 Jochen Kunz.
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. The name of Jochen Kunz may not be used to endorse or promote
16: * products derived from this software without specific prior
17: * written permission.
18: *
19: * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.14 skrll 33: * Driver for the Intel i82596CA and i82596DX/SX 10MBit/s Ethernet chips.
34: *
1.1 jkunz 35: * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
1.8 perry 36: * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
1.1 jkunz 37: * i82586 compatibility mode.
1.13 skrll 38: *
1.14 skrll 39: * Documentation about these chips can be found at
40: *
41: * http://developer.intel.com/design/network/datashts/290218.htm
42: * http://developer.intel.com/design/network/datashts/290219.htm
1.1 jkunz 43: */
44:
45: #include <sys/cdefs.h>
1.21 ! tsutsui 46: __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.20 2009/05/05 15:47:35 tsutsui Exp $");
1.1 jkunz 47:
48: /* autoconfig and device stuff */
49: #include <sys/param.h>
50: #include <sys/device.h>
51: #include <sys/conf.h>
52: #include "locators.h"
53: #include "ioconf.h"
54:
55: /* bus_space / bus_dma etc. */
1.18 ad 56: #include <sys/bus.h>
57: #include <sys/intr.h>
1.1 jkunz 58:
59: /* general system data and functions */
60: #include <sys/systm.h>
61: #include <sys/ioctl.h>
62:
63: /* tsleep / sleep / wakeup */
64: #include <sys/proc.h>
65: /* hz for above */
66: #include <sys/kernel.h>
67:
68: /* network stuff */
69: #include <net/if.h>
70: #include <net/if_dl.h>
71: #include <net/if_media.h>
72: #include <net/if_ether.h>
73: #include <sys/socket.h>
74: #include <sys/mbuf.h>
75:
76: #include "bpfilter.h"
1.8 perry 77: #if NBPFILTER > 0
1.1 jkunz 78: #include <net/bpf.h>
1.8 perry 79: #endif
1.1 jkunz 80:
81: #include <dev/ic/i82596reg.h>
82: #include <dev/ic/i82596var.h>
83:
84: /* Supported chip variants */
1.10 skrll 85: const char *i82596_typenames[] = { "unknown", "DX/SX", "CA" };
1.1 jkunz 86:
87: /* media change and status callback */
88: static int iee_mediachange(struct ifnet *);
89: static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
90:
91: /* interface routines to upper protocols */
92: static void iee_start(struct ifnet *); /* initiate output */
1.15 christos 93: static int iee_ioctl(struct ifnet *, u_long, void *); /* ioctl routine */
1.1 jkunz 94: static int iee_init(struct ifnet *); /* init routine */
95: static void iee_stop(struct ifnet *, int); /* stop routine */
96: static void iee_watchdog(struct ifnet *); /* timer routine */
97:
98: /* internal helper functions */
1.7 tsutsui 99: static void iee_cb_setup(struct iee_softc *, uint32_t);
1.1 jkunz 100:
101: /*
1.13 skrll 102: * Things a MD frontend has to provide:
103: *
104: * The functions via function pointers in the softc:
105: * int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
106: * int (*sc_iee_reset)(struct iee_softc *sc);
107: * void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
108: * int (*sc_mediachange)(struct ifnet *);
109: *
110: * sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
111: * to the SCP cmd word and issuing a Channel Attention.
112: * sc_iee_reset(): initiate a reset, supply the address of the SCP to the
113: * chip, wait for the chip to initialize and ACK interrupts that
114: * this may have caused by calling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
115: * This functions must carefully bus_dmamap_sync() all data they have touched!
116: *
117: * sc_mediastatus() and sc_mediachange() are just MD hooks to the according
118: * MI functions. The MD frontend may set this pointers to NULL when they
119: * are not needed.
120: *
121: * sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
122: * This is for printing out the correct chip type at attach time only. The
123: * MI backend doesn't distinguish different chip types when programming
124: * the chip.
125: *
1.20 tsutsui 126: * IEE_NEED_SWAP in sc->sc_flags has to be cleared on little endian hardware
127: * and set on big endian hardware, when endianess conversion is not done
128: * by the bus attachment but done by i82596 chip itself.
129: * Usually you need to set IEE_NEED_SWAP on big endian machines
130: * where the hardware (the LE/~BE pin) is configured as BE mode.
131: *
132: * If the chip is configured as BE mode, all 8 bit (byte) and 16 bit (word)
133: * entities can be written in big endian. But Rev A chip doesn't support
134: * 32 bit (dword) entities with big endian byte ordering, so we have to
135: * treat all 32 bit (dword) entities as two 16 bit big endian entities.
136: * Rev B and C chips support big endian byte ordering for 32 bit entities,
137: * and this new feature is enabled by IEE_SYSBUS_BE in the sysbus byte.
138: *
139: * With the IEE_SYSBUS_BE feature, all 32 bit address ponters are
140: * treated as true 32 bit entities but the SCB absolute address and
141: * statistical counters are still treated as two 16 bit big endian entities,
142: * so we have to always swap high and low words for these entities.
143: * IEE_SWAP32() should be used for the SCB address and statistical counters,
144: * and IEE_SWAPA32() should be used for other 32 bit pointers in the shmem.
145: *
146: * IEE_REV_A flag must be set in sc->sc_flags if the IEE_SYSBUS_BE feature
147: * is disabled even on big endian machines for the old Rev A chip in backend.
1.13 skrll 148: *
149: * sc->sc_cl_align must be set to 1 or to the cache line size. When set to
150: * 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
151: * it forces alignment of the data structures in the shared memory to a multiple
152: * of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
153: * I/O coherent caches and are unable to map the shared memory uncachable.
154: * (At least pre PA7100LC CPUs are unable to map memory uncachable.)
155: *
156: * sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLLOWING MACROS ARE USED:
157: * SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
158: *
1.21 ! tsutsui 159: * The MD frontend also has to set sc->sc_cl_align and sc->sc_sysbus
! 160: * to allocate and setup shared DMA memory in MI iee_attach().
! 161: * All communication with the chip is done via this shared memory.
! 162: * This memory is mapped with BUS_DMA_COHERENT so it will be uncached
! 163: * if possible for archs with non DMA I/O coherent caches.
! 164: * The base of the memory needs to be aligned to an even address
! 165: * if sc->sc_cl_align == 1 and aligned to a cache line if sc->sc_cl_align != 1.
1.13 skrll 166: *
167: * An interrupt with iee_intr() as handler must be established.
168: *
169: * Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
170: * int *media, int nmedia, int defmedia); when everything is set up. First
171: * parameter is a pointer to the MI softc, ether_address is an array that
172: * contains the ethernet address. media is an array of the media types
173: * provided by the hardware. The members of this array are supplied to
174: * ifmedia_add() in sequence. nmedia is the count of elements in media.
175: * defmedia is the default media that is set via ifmedia_set().
176: * nmedia and defmedia are ignored when media == NULL.
177: *
178: * The MD backend may call iee_detach() to detach the device.
179: *
180: * See sys/arch/hp700/gsc/if_iee_gsc.c for an example.
181: */
1.1 jkunz 182:
183:
184: /*
1.13 skrll 185: * How frame reception is done:
186: * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
187: * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
188: * together in a circular list. sc->sc_rx_done is the count of RFDs in the
189: * list already processed / the number of the RFD that has to be checked for
190: * a new frame first at the next RX interrupt. Upon successful reception of
191: * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
192: * cluster is allocated and the RFD / RBD are reinitialized accordingly.
193: *
194: * When a RFD list overrun occurred the whole RFD and RBD lists are reinitialized
195: * and frame reception is started again.
196: */
1.1 jkunz 197: int
198: iee_intr(void *intarg)
199: {
200: struct iee_softc *sc = intarg;
201: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
202: struct iee_rfd *rfd;
203: struct iee_rbd *rbd;
204: bus_dmamap_t rx_map;
205: struct mbuf *rx_mbuf;
206: struct mbuf *new_mbuf;
207: int scb_status;
208: int scb_cmd;
1.6 tsutsui 209: int n, col;
1.1 jkunz 210:
211: if ((ifp->if_flags & IFF_RUNNING) == 0) {
212: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
1.19 tsutsui 213: return 1;
1.1 jkunz 214: }
215: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
216: BUS_DMASYNC_POSTREAD);
217: scb_status = SC_SCB->scb_status;
218: scb_cmd = SC_SCB->scb_cmd;
219: rfd = SC_RFD(sc->sc_rx_done);
1.2 jkunz 220: while ((rfd->rfd_status & IEE_RFD_C) != 0) {
1.1 jkunz 221: /* At least one packet was received. */
222: rbd = SC_RBD(sc->sc_rx_done);
223: rx_map = sc->sc_rx_map[sc->sc_rx_done];
224: rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
225: SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
226: &= ~IEE_RBD_EL;
227: if ((rfd->rfd_status & IEE_RFD_OK) == 0
228: || (rbd->rbd_count & IEE_RBD_EOF) == 0
229: || (rbd->rbd_count & IEE_RBD_F) == 0){
230: /* Receive error, skip frame and reuse buffer. */
231: rfd->rfd_status = 0;
232: rbd->rbd_count = 0;
233: rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
234: printf("%s: iee_intr: receive error %d, rfd_status="
1.19 tsutsui 235: "0x%.4x, rfd_count=0x%.4x\n",
236: device_xname(sc->sc_dev),
1.1 jkunz 237: ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
238: sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
239: continue;
240: }
241: rfd->rfd_status = 0;
242: bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
243: BUS_DMASYNC_POSTREAD);
1.8 perry 244: rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
1.1 jkunz 245: rbd->rbd_count & IEE_RBD_COUNT;
246: rx_mbuf->m_pkthdr.rcvif = ifp;
247: MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
248: if (new_mbuf == NULL) {
249: printf("%s: iee_intr: can't allocate mbuf\n",
1.19 tsutsui 250: device_xname(sc->sc_dev));
1.1 jkunz 251: break;
252: }
253: MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
254: MCLGET(new_mbuf, M_DONTWAIT);
255: if ((new_mbuf->m_flags & M_EXT) == 0) {
1.8 perry 256: printf("%s: iee_intr: can't alloc mbuf cluster\n",
1.19 tsutsui 257: device_xname(sc->sc_dev));
1.1 jkunz 258: m_freem(new_mbuf);
259: break;
260: }
261: bus_dmamap_unload(sc->sc_dmat, rx_map);
1.8 perry 262: if (bus_dmamap_load(sc->sc_dmat, rx_map,
263: new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
1.1 jkunz 264: NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
265: panic("%s: iee_intr: can't load RX DMA map\n",
1.19 tsutsui 266: device_xname(sc->sc_dev));
1.1 jkunz 267: bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
268: new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
269: #if NBPFILTER > 0
270: if (ifp->if_bpf != 0)
271: bpf_mtap(ifp->if_bpf, rx_mbuf);
272: #endif /* NBPFILTER > 0 */
273: (*ifp->if_input)(ifp, rx_mbuf);
274: ifp->if_ipackets++;
275: sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
276: rbd->rbd_count = 0;
277: rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
1.20 tsutsui 278: rbd->rbd_rb_addr = IEE_SWAPA32(rx_map->dm_segs[0].ds_addr);
1.1 jkunz 279: sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
280: rfd = SC_RFD(sc->sc_rx_done);
281: }
282: if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
283: || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
284: || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
285: /* Receive Overrun, reinit receive ring buffer. */
286: for (n = 0 ; n < IEE_NRFD ; n++) {
287: SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
1.20 tsutsui 288: SC_RFD(n)->rfd_link_addr =
289: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RFD_OFF
290: + IEE_RFD_SZ * ((n + 1) % IEE_NRFD)));
291: SC_RBD(n)->rbd_next_rbd =
292: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RBD_OFF
293: + IEE_RBD_SZ * ((n + 1) % IEE_NRFD)));
1.8 perry 294: SC_RBD(n)->rbd_size = IEE_RBD_EL |
1.1 jkunz 295: sc->sc_rx_map[n]->dm_segs[0].ds_len;
1.8 perry 296: SC_RBD(n)->rbd_rb_addr =
1.20 tsutsui 297: IEE_SWAPA32(sc->sc_rx_map[n]->dm_segs[0].ds_addr);
1.1 jkunz 298: }
1.20 tsutsui 299: SC_RFD(0)->rfd_rbd_addr =
300: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RBD_OFF));
1.1 jkunz 301: sc->sc_rx_done = 0;
1.8 perry 302: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
1.1 jkunz 303: IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
304: (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
1.8 perry 305: printf("%s: iee_intr: receive ring buffer overrun\n",
1.19 tsutsui 306: device_xname(sc->sc_dev));
1.2 jkunz 307: }
1.1 jkunz 308:
1.8 perry 309: if (sc->sc_next_cb != 0
1.2 jkunz 310: && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
1.1 jkunz 311: /* CMD list finished */
312: ifp->if_timer = 0;
313: if (sc->sc_next_tbd != 0) {
1.12 skrll 314: /* A TX CMD list finished, cleanup */
1.1 jkunz 315: for (n = 0 ; n < sc->sc_next_cb ; n++) {
316: m_freem(sc->sc_tx_mbuf[n]);
317: sc->sc_tx_mbuf[n] = NULL;
318: bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
1.8 perry 319: if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
1.1 jkunz 320: (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
1.6 tsutsui 321: col = 16;
1.1 jkunz 322: else
1.8 perry 323: col = SC_CB(n)->cb_status
1.1 jkunz 324: & IEE_CB_MAXCOL;
1.6 tsutsui 325: sc->sc_tx_col += col;
326: if ((SC_CB(n)->cb_status & IEE_CB_OK) != 0) {
327: ifp->if_opackets++;
328: ifp->if_collisions += col;
329: }
1.1 jkunz 330: }
331: sc->sc_next_tbd = 0;
332: ifp->if_flags &= ~IFF_OACTIVE;
333: }
334: for (n = 0 ; n < sc->sc_next_cb ; n++) {
335: /* Check if a CMD failed, but ignore TX errors. */
336: if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
1.2 jkunz 337: && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
1.8 perry 338: printf("%s: iee_intr: scb_status=0x%x "
1.1 jkunz 339: "scb_cmd=0x%x failed command %d: "
1.8 perry 340: "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
1.19 tsutsui 341: device_xname(sc->sc_dev),
342: scb_status, scb_cmd,
1.1 jkunz 343: ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
344: n, SC_CB(n)->cb_cmd);
345: }
346: sc->sc_next_cb = 0;
347: if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
1.8 perry 348: iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
1.1 jkunz 349: | IEE_CB_I);
350: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
351: } else
1.12 skrll 352: /* Try to get deferred packets going. */
1.1 jkunz 353: iee_start(ifp);
354: }
1.20 tsutsui 355: if (IEE_SWAP32(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
356: sc->sc_crc_err = IEE_SWAP32(SC_SCB->scb_crc_err);
1.19 tsutsui 357: printf("%s: iee_intr: crc_err=%d\n", device_xname(sc->sc_dev),
1.1 jkunz 358: sc->sc_crc_err);
359: }
1.20 tsutsui 360: if (IEE_SWAP32(SC_SCB->scb_align_err) != sc->sc_align_err) {
361: sc->sc_align_err = IEE_SWAP32(SC_SCB->scb_align_err);
1.19 tsutsui 362: printf("%s: iee_intr: align_err=%d\n", device_xname(sc->sc_dev),
1.1 jkunz 363: sc->sc_align_err);
364: }
1.20 tsutsui 365: if (IEE_SWAP32(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
366: sc->sc_resource_err = IEE_SWAP32(SC_SCB->scb_resource_err);
1.19 tsutsui 367: printf("%s: iee_intr: resource_err=%d\n",
368: device_xname(sc->sc_dev), sc->sc_resource_err);
1.1 jkunz 369: }
1.20 tsutsui 370: if (IEE_SWAP32(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
371: sc->sc_overrun_err = IEE_SWAP32(SC_SCB->scb_overrun_err);
1.19 tsutsui 372: printf("%s: iee_intr: overrun_err=%d\n",
373: device_xname(sc->sc_dev), sc->sc_overrun_err);
1.1 jkunz 374: }
1.20 tsutsui 375: if (IEE_SWAP32(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
376: sc->sc_rcvcdt_err = IEE_SWAP32(SC_SCB->scb_rcvcdt_err);
1.19 tsutsui 377: printf("%s: iee_intr: rcvcdt_err=%d\n",
378: device_xname(sc->sc_dev), sc->sc_rcvcdt_err);
1.1 jkunz 379: }
1.20 tsutsui 380: if (IEE_SWAP32(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
381: sc->sc_short_fr_err = IEE_SWAP32(SC_SCB->scb_short_fr_err);
1.19 tsutsui 382: printf("%s: iee_intr: short_fr_err=%d\n",
383: device_xname(sc->sc_dev), sc->sc_short_fr_err);
1.1 jkunz 384: }
1.8 perry 385: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
1.2 jkunz 386: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1.1 jkunz 387: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
1.19 tsutsui 388: return 1;
1.1 jkunz 389: }
390:
391:
392:
393: /*
1.13 skrll 394: * How Command Block List Processing is done.
395: *
396: * A running CBL is never manipulated. If there is a CBL already running,
397: * further CMDs are deferred until the current list is done. A new list is
398: * setup when the old one has finished.
399: * This eases programming. To manipulate a running CBL it is necessary to
400: * suspend the Command Unit to avoid race conditions. After a suspend
401: * is sent we have to wait for an interrupt that ACKs the suspend. Then
402: * we can manipulate the CBL and resume operation. I am not sure that this
403: * is more effective then the current, much simpler approach. => KISS
404: * See i82596CA data sheet page 26.
405: *
406: * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0).
407: *
408: * A CBL may consist of TX CMDs, and _only_ TX CMDs.
409: * A TX CBL is running or on the way to be set up when
410: * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
411: *
412: * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
413: * non-TX CMDs.
414: *
415: * This comes mostly through the way how an Ethernet driver works and
416: * because running CBLs are not manipulated when they are on the way. If
417: * if_start() is called there will be TX CMDs enqueued so we have a running
418: * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa.
419: *
420: * The Multicast Setup Command is special. A MCS needs more space than
421: * a single CB has. Actual space requirement depends on the length of the
422: * multicast list. So we always defer MCS until other CBLs are finished,
423: * then we setup a CONF CMD in the first CB. The CONF CMD is needed to
424: * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
425: * use all the remaining space in the CBL and the Transmit Buffer Descriptor
426: * List. (Therefore CBL and TBDL must be continuous in physical and virtual
427: * memory. This is guaranteed through the definitions of the list offsets
428: * in i82596reg.h and because it is only a single DMA segment used for all
429: * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
430: * a multicast list length of 0, thus disabling the multicast filter.
431: * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
432: */
1.1 jkunz 433: void
1.7 tsutsui 434: iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
1.1 jkunz 435: {
436: struct iee_cb *cb = SC_CB(sc->sc_next_cb);
437: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
438: struct ether_multistep step;
439: struct ether_multi *enm;
440:
441: memset(cb, 0, IEE_CB_SZ);
442: cb->cb_cmd = cmd;
443: switch(cmd & IEE_CB_CMD) {
444: case IEE_CB_CMD_NOP: /* NOP CMD */
445: break;
446: case IEE_CB_CMD_IAS: /* Individual Address Setup */
1.17 dyoung 447: memcpy(__UNVOLATILE(cb->cb_ind_addr), CLLADDR(ifp->if_sadl),
1.1 jkunz 448: ETHER_ADDR_LEN);
449: break;
450: case IEE_CB_CMD_CONF: /* Configure */
1.9 he 451: memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
1.1 jkunz 452: & IEE_CF_0_CNT_M);
453: break;
454: case IEE_CB_CMD_MCS: /* Multicast Setup */
455: if (sc->sc_next_cb != 0) {
456: sc->sc_flags |= IEE_WANT_MCAST;
457: return;
458: }
459: sc->sc_flags &= ~IEE_WANT_MCAST;
460: if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
461: /* Need no multicast filter in promisc mode. */
1.8 perry 462: iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
1.1 jkunz 463: | IEE_CB_I);
464: return;
465: }
466: /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
467: cb = SC_CB(sc->sc_next_cb + 1);
468: cb->cb_cmd = cmd;
469: cb->cb_mcast.mc_size = 0;
470: ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
471: while (enm != NULL) {
1.8 perry 472: if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
473: ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
1.1 jkunz 474: * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
475: > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
476: cb->cb_mcast.mc_size = 0;
477: break;
478: }
1.9 he 479: memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
480: cb->cb_mcast.mc_size * ETHER_ADDR_LEN]),
1.1 jkunz 481: enm->enm_addrlo, ETHER_ADDR_LEN);
482: ETHER_NEXT_MULTI(step, enm);
483: cb->cb_mcast.mc_size++;
484: }
485: if (cb->cb_mcast.mc_size == 0) {
486: /* Can't do exact mcast filtering, do ALLMULTI mode. */
487: ifp->if_flags |= IFF_ALLMULTI;
488: sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
489: } else {
490: /* disable ALLMULTI and load mcast list */
491: ifp->if_flags &= ~IFF_ALLMULTI;
492: sc->sc_cf[11] |= IEE_CF_11_MCALL;
493: /* Mcast setup may need more then IEE_CB_SZ bytes. */
1.8 perry 494: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
495: IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
1.1 jkunz 496: BUS_DMASYNC_PREWRITE);
497: }
498: iee_cb_setup(sc, IEE_CB_CMD_CONF);
499: break;
500: case IEE_CB_CMD_TR: /* Transmit */
1.20 tsutsui 501: cb->cb_transmit.tx_tbd_addr =
502: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_TBD_OFF
503: + IEE_TBD_SZ * sc->sc_next_tbd));
1.12 skrll 504: cb->cb_cmd |= IEE_CB_SF; /* Always use Flexible Mode. */
1.1 jkunz 505: break;
506: case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
507: break;
508: case IEE_CB_CMD_DUMP: /* Dump */
509: break;
510: case IEE_CB_CMD_DIAG: /* Diagnose */
511: break;
512: default:
513: /* can't happen */
514: break;
515: }
1.20 tsutsui 516: cb->cb_link_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
517: (sc->sc_next_cb + 1)));
1.8 perry 518: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
1.1 jkunz 519: + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
520: sc->sc_next_cb++;
521: ifp->if_timer = 5;
522: return;
523: }
524:
525:
526:
527: void
1.8 perry 528: iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia,
1.1 jkunz 529: int defmedia)
530: {
531: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
532: int n;
533:
1.21 ! tsutsui 534: KASSERT(sc->sc_cl_align > 0);
! 535:
! 536: /* allocate memory for shared DMA descriptors */
! 537: if (bus_dmamem_alloc(sc->sc_dmat, IEE_SHMEM_MAX, PAGE_SIZE, 0,
! 538: &sc->sc_dma_segs, 1, &sc->sc_dma_rsegs, BUS_DMA_NOWAIT) != 0) {
! 539: aprint_error(": iee_gsc_attach: can't allocate %d bytes of "
! 540: "DMA memory\n", (int)IEE_SHMEM_MAX);
! 541: return;
! 542: }
! 543: if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs,
! 544: IEE_SHMEM_MAX, (void **)&sc->sc_shmem_addr,
! 545: BUS_DMA_COHERENT | BUS_DMA_NOWAIT) != 0) {
! 546: aprint_error(": iee_gsc_attach: can't map DMA memory\n");
! 547: bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
! 548: sc->sc_dma_rsegs);
! 549: return;
! 550: }
! 551: if (bus_dmamap_create(sc->sc_dmat, IEE_SHMEM_MAX, sc->sc_dma_rsegs,
! 552: IEE_SHMEM_MAX, 0, BUS_DMA_NOWAIT, &sc->sc_shmem_map) != 0) {
! 553: aprint_error(": iee_gsc_attach: can't create DMA map\n");
! 554: bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, IEE_SHMEM_MAX); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
! 555: sc->sc_dma_rsegs);
! 556: return;
! 557: }
! 558: if (bus_dmamap_load(sc->sc_dmat, sc->sc_shmem_map, sc->sc_shmem_addr,
! 559: IEE_SHMEM_MAX, NULL, BUS_DMA_NOWAIT) != 0) {
! 560: aprint_error(": iee_gsc_attach: can't load DMA map\n");
! 561: bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map);
! 562: bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, IEE_SHMEM_MAX); bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
! 563: sc->sc_dma_rsegs);
! 564: return;
! 565: }
! 566: memset(sc->sc_shmem_addr, 0, IEE_SHMEM_MAX);
! 567:
1.1 jkunz 568: /* Set pointer to Intermediate System Configuration Pointer. */
569: /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
1.20 tsutsui 570: SC_SCP->scp_iscp_addr = IEE_SWAP32(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
1.21 ! tsutsui 571: SC_SCP->scp_sysbus = sc->sc_sysbus;
1.1 jkunz 572: /* Set pointer to System Control Block. */
573: /* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
1.20 tsutsui 574: SC_ISCP->iscp_scb_addr = IEE_SWAP32(IEE_PHYS_SHMEM(IEE_SCB_OFF));
1.1 jkunz 575: /* Set pointer to Receive Frame Area. (physical address) */
1.20 tsutsui 576: SC_SCB->scb_rfa_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RFD_OFF));
1.1 jkunz 577: /* Set pointer to Command Block. (physical address) */
1.20 tsutsui 578: SC_SCB->scb_cmd_blk_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_CB_OFF));
1.1 jkunz 579:
1.21 ! tsutsui 580: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
! 581: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
! 582:
1.1 jkunz 583: ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
584: if (media != NULL) {
585: for (n = 0 ; n < nmedia ; n++)
586: ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
587: ifmedia_set(&sc->sc_ifmedia, defmedia);
588: } else {
589: ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
590: ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
591: }
592:
593: ifp->if_softc = sc;
1.19 tsutsui 594: strcpy(ifp->if_xname, device_xname(sc->sc_dev));
1.1 jkunz 595: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
596: ifp->if_start = iee_start; /* initiate output routine */
597: ifp->if_ioctl = iee_ioctl; /* ioctl routine */
598: ifp->if_init = iee_init; /* init routine */
599: ifp->if_stop = iee_stop; /* stop routine */
600: ifp->if_watchdog = iee_watchdog; /* timer routine */
601: IFQ_SET_READY(&ifp->if_snd);
602: /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
603: sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
604:
605: if_attach(ifp);
606: ether_ifattach(ifp, eth_addr);
607:
608: aprint_normal(": Intel 82596%s address %s\n",
1.19 tsutsui 609: i82596_typenames[sc->sc_type], ether_sprintf(eth_addr));
1.1 jkunz 610:
611: for (n = 0 ; n < IEE_NCB ; n++)
612: sc->sc_tx_map[n] = NULL;
613: for (n = 0 ; n < IEE_NRFD ; n++) {
614: sc->sc_rx_mbuf[n] = NULL;
615: sc->sc_rx_map[n] = NULL;
616: }
617: sc->sc_tx_timeout = 0;
618: sc->sc_setup_timeout = 0;
619: (sc->sc_iee_reset)(sc);
620: }
621:
622:
623:
624: void
625: iee_detach(struct iee_softc *sc, int flags)
626: {
627: struct ifnet *ifp = &sc->sc_ethercom.ec_if;
628:
629: if ((ifp->if_flags & IFF_RUNNING) != 0)
630: iee_stop(ifp, 1);
631: ether_ifdetach(ifp);
632: if_detach(ifp);
1.21 ! tsutsui 633: bus_dmamap_unload(sc->sc_dmat, sc->sc_shmem_map);
! 634: bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map);
! 635: bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, IEE_SHMEM_MAX);
! 636: bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs);
1.1 jkunz 637: }
638:
639:
640:
641: /* media change and status callback */
642: int
643: iee_mediachange(struct ifnet *ifp)
644: {
645: struct iee_softc *sc = ifp->if_softc;
1.8 perry 646:
1.1 jkunz 647: if (sc->sc_mediachange != NULL)
1.19 tsutsui 648: return (sc->sc_mediachange)(ifp);
649: return 0;
1.1 jkunz 650: }
651:
652:
653:
654: void
655: iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
656: {
657: struct iee_softc *sc = ifp->if_softc;
658:
659: if (sc->sc_mediastatus != NULL)
1.19 tsutsui 660: (sc->sc_mediastatus)(ifp, ifmreq);
1.1 jkunz 661: }
662:
663:
664:
665: /* initiate output routine */
666: void
667: iee_start(struct ifnet *ifp)
668: {
669: struct iee_softc *sc = ifp->if_softc;
670: struct mbuf *m = NULL;
671: int t;
672: int n;
673:
674: if (sc->sc_next_cb != 0)
1.12 skrll 675: /* There is already a CMD running. Defer packet enqueuing. */
1.1 jkunz 676: return;
677: for (t = 0 ; t < IEE_NCB ; t++) {
678: IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
679: if (sc->sc_tx_mbuf[t] == NULL)
680: break;
681: if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
682: sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
683: /*
1.8 perry 684: * The packet needs more TBD then we support.
685: * Copy the packet into a mbuf cluster to get it out.
1.1 jkunz 686: */
1.8 perry 687: printf("%s: iee_start: failed to load DMA map\n",
1.19 tsutsui 688: device_xname(sc->sc_dev));
1.1 jkunz 689: MGETHDR(m, M_DONTWAIT, MT_DATA);
690: if (m == NULL) {
691: printf("%s: iee_start: can't allocate mbuf\n",
1.19 tsutsui 692: device_xname(sc->sc_dev));
1.1 jkunz 693: m_freem(sc->sc_tx_mbuf[t]);
694: t--;
695: continue;
696: }
697: MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
698: MCLGET(m, M_DONTWAIT);
699: if ((m->m_flags & M_EXT) == 0) {
700: printf("%s: iee_start: can't allocate mbuf "
1.19 tsutsui 701: "cluster\n", device_xname(sc->sc_dev));
1.1 jkunz 702: m_freem(sc->sc_tx_mbuf[t]);
703: m_freem(m);
704: t--;
705: continue;
706: }
1.8 perry 707: m_copydata(sc->sc_tx_mbuf[t], 0,
1.15 christos 708: sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
1.1 jkunz 709: m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
710: m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
711: m_freem(sc->sc_tx_mbuf[t]);
712: sc->sc_tx_mbuf[t] = m;
713: if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
714: m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
715: printf("%s: iee_start: can't load TX DMA map\n",
1.19 tsutsui 716: device_xname(sc->sc_dev));
1.1 jkunz 717: m_freem(sc->sc_tx_mbuf[t]);
718: t--;
719: continue;
720: }
721: }
722: for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
723: SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
1.20 tsutsui 724: IEE_SWAPA32(sc->sc_tx_map[t]->dm_segs[n].ds_addr);
1.1 jkunz 725: SC_TBD(sc->sc_next_tbd + n)->tbd_size =
726: sc->sc_tx_map[t]->dm_segs[n].ds_len;
727: SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
1.20 tsutsui 728: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
729: * (sc->sc_next_tbd + n + 1)));
1.1 jkunz 730: }
731: SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
732: bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
733: sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
734: IFQ_POLL(&ifp->if_snd, m);
735: if (m == NULL)
736: iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
737: | IEE_CB_I);
738: else
739: iee_cb_setup(sc, IEE_CB_CMD_TR);
740: sc->sc_next_tbd += n;
741: #if NBPFILTER > 0
742: /* Pass packet to bpf if someone listens. */
743: if (ifp->if_bpf)
744: bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
745: #endif
746: }
747: if (t == 0)
748: /* No packets got set up for TX. */
749: return;
750: if (t == IEE_NCB)
751: ifp->if_flags |= IFF_OACTIVE;
752: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
753: IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
754: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
755: }
756:
757:
758:
759: /* ioctl routine */
760: int
1.15 christos 761: iee_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1.1 jkunz 762: {
763: struct iee_softc *sc = ifp->if_softc;
764: int s;
765: int err;
766:
767: s = splnet();
1.4 thorpej 768: switch (cmd) {
769: case SIOCSIFMEDIA:
770: case SIOCGIFMEDIA:
771: err = ifmedia_ioctl(ifp, (struct ifreq *) data,
772: &sc->sc_ifmedia, cmd);
773: break;
774:
775: default:
1.1 jkunz 776: err = ether_ioctl(ifp, cmd, data);
1.4 thorpej 777: if (err == ENETRESET) {
778: /*
779: * Multicast list as changed; set the hardware filter
780: * accordingly.
781: */
782: if (ifp->if_flags & IFF_RUNNING) {
783: iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
784: IEE_CB_EL | IEE_CB_I);
785: if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
786: (*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
787: }
1.3 thorpej 788: err = 0;
1.4 thorpej 789: }
790: break;
1.1 jkunz 791: }
792: splx(s);
1.19 tsutsui 793: return err;
1.1 jkunz 794: }
795:
796:
797:
798: /* init routine */
799: int
800: iee_init(struct ifnet *ifp)
801: {
802: struct iee_softc *sc = ifp->if_softc;
803: int r;
804: int t;
805: int n;
806: int err;
807:
808: sc->sc_next_cb = 0;
809: sc->sc_next_tbd = 0;
810: sc->sc_flags &= ~IEE_WANT_MCAST;
811: sc->sc_rx_done = 0;
812: SC_SCB->scb_crc_err = 0;
813: SC_SCB->scb_align_err = 0;
814: SC_SCB->scb_resource_err = 0;
815: SC_SCB->scb_overrun_err = 0;
816: SC_SCB->scb_rcvcdt_err = 0;
817: SC_SCB->scb_short_fr_err = 0;
818: sc->sc_crc_err = 0;
819: sc->sc_align_err = 0;
820: sc->sc_resource_err = 0;
821: sc->sc_overrun_err = 0;
822: sc->sc_rcvcdt_err = 0;
823: sc->sc_short_fr_err = 0;
824: sc->sc_tx_col = 0;
825: sc->sc_rx_err = 0;
826: sc->sc_cmd_err = 0;
827: /* Create Transmit DMA maps. */
828: for (t = 0 ; t < IEE_NCB ; t++) {
829: if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
1.8 perry 830: MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
1.1 jkunz 831: &sc->sc_tx_map[t]) != 0) {
1.8 perry 832: printf("%s: iee_init: can't create TX DMA map\n",
1.19 tsutsui 833: device_xname(sc->sc_dev));
1.1 jkunz 834: for (n = 0 ; n < t ; n++)
1.8 perry 835: bus_dmamap_destroy(sc->sc_dmat,
1.1 jkunz 836: sc->sc_tx_map[n]);
1.19 tsutsui 837: return ENOBUFS;
1.1 jkunz 838: }
839: }
840: /* Initialize Receive Frame and Receive Buffer Descriptors */
841: err = 0;
842: memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
843: memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
844: for (r = 0 ; r < IEE_NRFD ; r++) {
845: SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
1.20 tsutsui 846: SC_RFD(r)->rfd_link_addr =
847: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RFD_OFF
848: + IEE_RFD_SZ * ((r + 1) % IEE_NRFD)));
849:
850: SC_RBD(r)->rbd_next_rbd =
851: IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RBD_OFF
852: + IEE_RBD_SZ * ((r + 1) % IEE_NRFD)));
1.1 jkunz 853: if (sc->sc_rx_mbuf[r] == NULL) {
854: MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
855: if (sc->sc_rx_mbuf[r] == NULL) {
1.8 perry 856: printf("%s: iee_init: can't allocate mbuf\n",
1.19 tsutsui 857: device_xname(sc->sc_dev));
1.1 jkunz 858: err = 1;
859: break;
860: }
861: MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
862: MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
863: if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
864: printf("%s: iee_init: can't allocate mbuf"
1.19 tsutsui 865: " cluster\n", device_xname(sc->sc_dev));
1.1 jkunz 866: m_freem(sc->sc_rx_mbuf[r]);
867: err = 1;
868: break;
869: }
870: }
871: if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
1.8 perry 872: MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
1.1 jkunz 873: &sc->sc_rx_map[r]) != 0) {
874: printf("%s: iee_init: can't create RX "
1.19 tsutsui 875: "DMA map\n", device_xname(sc->sc_dev));
1.1 jkunz 876: m_freem(sc->sc_rx_mbuf[r]);
877: err = 1;
878: break;
879: }
880: if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
1.8 perry 881: sc->sc_rx_mbuf[r]->m_ext.ext_buf,
1.1 jkunz 882: sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
883: BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
884: printf("%s: iee_init: can't load RX DMA map\n",
1.19 tsutsui 885: device_xname(sc->sc_dev));
1.1 jkunz 886: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
887: m_freem(sc->sc_rx_mbuf[r]);
888: err = 1;
889: break;
890: }
891: bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
892: sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
893: SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
1.20 tsutsui 894: SC_RBD(r)->rbd_rb_addr =
895: IEE_SWAPA32(sc->sc_rx_map[r]->dm_segs[0].ds_addr);
1.1 jkunz 896: }
1.20 tsutsui 897: SC_RFD(0)->rfd_rbd_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RBD_OFF));
1.1 jkunz 898: if (err != 0) {
899: for (n = 0 ; n < r; n++) {
900: m_freem(sc->sc_rx_mbuf[n]);
901: sc->sc_rx_mbuf[n] = NULL;
902: bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
903: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
904: sc->sc_rx_map[n] = NULL;
905: }
906: for (n = 0 ; n < t ; n++) {
907: bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
908: sc->sc_tx_map[n] = NULL;
909: }
1.19 tsutsui 910: return ENOBUFS;
1.1 jkunz 911: }
912:
913: (sc->sc_iee_reset)(sc);
914: iee_cb_setup(sc, IEE_CB_CMD_IAS);
915: sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
916: sc->sc_cf[1] = IEE_CF_1_DEF;
917: sc->sc_cf[2] = IEE_CF_2_DEF;
1.8 perry 918: sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
1.1 jkunz 919: | IEE_CF_3_PREAMLEN_DEF;
920: sc->sc_cf[4] = IEE_CF_4_DEF;
921: sc->sc_cf[5] = IEE_CF_5_DEF;
922: sc->sc_cf[6] = IEE_CF_6_DEF;
923: sc->sc_cf[7] = IEE_CF_7_DEF;
924: sc->sc_cf[8] = IEE_CF_8_DEF;
925: sc->sc_cf[9] = IEE_CF_9_DEF;
926: sc->sc_cf[10] = IEE_CF_10_DEF;
927: sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
928: sc->sc_cf[12] = IEE_CF_12_DEF;
929: sc->sc_cf[13] = IEE_CF_13_DEF;
930: iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
1.20 tsutsui 931: SC_SCB->scb_rfa_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(IEE_RFD_OFF));
1.1 jkunz 932: bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
933: BUS_DMASYNC_PREWRITE);
934: (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
935: /* Issue a Channel Attention to ACK interrupts we may have caused. */
936: (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
937:
938: /* Mark the interface as running and ready to RX/TX packets. */
939: ifp->if_flags |= IFF_RUNNING;
940: ifp->if_flags &= ~IFF_OACTIVE;
1.19 tsutsui 941: return 0;
1.1 jkunz 942: }
943:
944:
945:
946: /* stop routine */
947: void
948: iee_stop(struct ifnet *ifp, int disable)
949: {
950: struct iee_softc *sc = ifp->if_softc;
951: int n;
952:
953: ifp->if_flags &= ~IFF_RUNNING;
954: ifp->if_flags |= IFF_OACTIVE;
955: ifp->if_timer = 0;
956: /* Reset the chip to get it quiet. */
957: (sc->sc_iee_reset)(ifp->if_softc);
958: /* Issue a Channel Attention to ACK interrupts we may have caused. */
959: (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
1.12 skrll 960: /* Release any dynamically allocated resources. */
1.1 jkunz 961: for (n = 0 ; n < IEE_NCB ; n++) {
962: if (sc->sc_tx_map[n] != NULL)
963: bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
964: sc->sc_tx_map[n] = NULL;
965: }
966: for (n = 0 ; n < IEE_NRFD ; n++) {
967: if (sc->sc_rx_mbuf[n] != NULL)
968: m_freem(sc->sc_rx_mbuf[n]);
969: sc->sc_rx_mbuf[n] = NULL;
970: if (sc->sc_rx_map[n] != NULL) {
971: bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
972: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
973: }
974: sc->sc_rx_map[n] = NULL;
975: }
976: }
977:
978:
979:
980: /* timer routine */
981: void
982: iee_watchdog(struct ifnet *ifp)
983: {
984: struct iee_softc *sc = ifp->if_softc;
985:
986: (sc->sc_iee_reset)(sc);
987: if (sc->sc_next_tbd != 0)
1.8 perry 988: printf("%s: iee_watchdog: transmit timeout %d\n",
1.19 tsutsui 989: device_xname(sc->sc_dev), ++sc->sc_tx_timeout);
1.1 jkunz 990: else
1.8 perry 991: printf("%s: iee_watchdog: setup timeout %d\n",
1.19 tsutsui 992: device_xname(sc->sc_dev), ++sc->sc_setup_timeout);
1.1 jkunz 993: iee_init(ifp);
994: }
CVSweb <webmaster@jp.NetBSD.org>