Annotation of src/sys/dev/qbus/if_dmc.c, Revision 1.25
1.25 ! ozaki-r 1: /* $NetBSD: if_dmc.c,v 1.24 2016/04/20 09:01:04 knakahara Exp $ */
1.1 ragge 2: /*
3: * Copyright (c) 1982, 1986 Regents of the University of California.
4: * All rights reserved.
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
1.7 agc 14: * 3. Neither the name of the University nor the names of its contributors
1.1 ragge 15: * may be used to endorse or promote products derived from this software
16: * without specific prior written permission.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28: * SUCH DAMAGE.
29: *
30: * @(#)if_dmc.c 7.10 (Berkeley) 12/16/90
31: */
32:
33: /*
34: * DMC11 device driver, internet version
35: *
36: * Bill Nesheim
37: * Cornell University
38: *
39: * Lou Salkind
40: * New York University
41: */
1.2 lukem 42:
43: #include <sys/cdefs.h>
1.25 ! ozaki-r 44: __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.24 2016/04/20 09:01:04 knakahara Exp $");
1.1 ragge 45:
46: #undef DMCDEBUG /* for base table dump on fatal error */
47:
48: #include "opt_inet.h"
49:
50: #include <sys/param.h>
51: #include <sys/systm.h>
52: #include <sys/mbuf.h>
53: #include <sys/ioctl.h>
54: #include <sys/socket.h>
55: #include <sys/syslog.h>
56: #include <sys/device.h>
57:
58: #include <net/if.h>
59:
60: #ifdef INET
61: #include <netinet/in.h>
62: #include <netinet/in_var.h>
63: #endif
64:
1.15 ad 65: #include <sys/bus.h>
1.1 ragge 66:
67: #include <dev/qbus/ubareg.h>
68: #include <dev/qbus/ubavar.h>
69: #include <dev/qbus/if_uba.h>
70:
71: #include <dev/qbus/if_dmcreg.h>
72:
73:
74: /*
75: * output timeout value, sec.; should depend on line speed.
76: */
77: static int dmc_timeout = 20;
78:
79: #define NRCV 7
1.9 simonb 80: #define NXMT 3
1.1 ragge 81: #define NCMDS (NRCV+NXMT+4) /* size of command queue */
82:
83: #define DMC_WBYTE(csr, val) \
84: bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
85: #define DMC_WWORD(csr, val) \
86: bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
87: #define DMC_RBYTE(csr) \
88: bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
89: #define DMC_RWORD(csr) \
90: bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
91:
92:
93: #ifdef DMCDEBUG
94: #define printd if(dmcdebug)printf
95: int dmcdebug = 0;
96: #endif
97:
98: /* error reporting intervals */
99: #define DMC_RPNBFS 50
100: #define DMC_RPDSC 1
101: #define DMC_RPTMO 10
102: #define DMC_RPDCK 10
103:
104: struct dmc_command {
105: char qp_cmd; /* command */
106: short qp_ubaddr; /* buffer address */
107: short qp_cc; /* character count || XMEM */
108: struct dmc_command *qp_next; /* next command on queue */
109: };
110:
111: struct dmcbufs {
112: int ubinfo; /* from uballoc */
113: short cc; /* buffer size */
114: short flags; /* access control */
115: };
116: #define DBUF_OURS 0 /* buffer is available */
117: #define DBUF_DMCS 1 /* buffer claimed by somebody */
118: #define DBUF_XMIT 4 /* transmit buffer */
119: #define DBUF_RCV 8 /* receive buffer */
120:
121:
122: /*
123: * DMC software status per interface.
124: *
125: * Each interface is referenced by a network interface structure,
126: * sc_if, which the routing code uses to locate the interface.
127: * This structure contains the output queue for the interface, its address, ...
128: * We also have, for each interface, a set of 7 UBA interface structures
129: * for each, which
130: * contain information about the UNIBUS resources held by the interface:
131: * map registers, buffered data paths, etc. Information is cached in this
132: * structure for use by the if_uba.c routines in running the interface
133: * efficiently.
134: */
135: struct dmc_softc {
1.21 chs 136: device_t sc_dev; /* Configuration common part */
1.1 ragge 137: struct ifnet sc_if; /* network-visible interface */
138: short sc_oused; /* output buffers currently in use */
139: short sc_iused; /* input buffers given to DMC */
140: short sc_flag; /* flags */
141: struct ubinfo sc_ui; /* UBA mapping info for base table */
142: int sc_errors[4]; /* non-fatal error counters */
143: bus_space_tag_t sc_iot;
144: bus_addr_t sc_ioh;
145: bus_dma_tag_t sc_dmat;
146: struct evcnt sc_rintrcnt; /* Interrupt counting */
147: struct evcnt sc_tintrcnt; /* Interrupt counting */
148: #define sc_datck sc_errors[0]
149: #define sc_timeo sc_errors[1]
150: #define sc_nobuf sc_errors[2]
151: #define sc_disc sc_errors[3]
152: struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */
153: struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */
154: struct ifubinfo sc_ifuba; /* UNIBUS resources */
155: struct ifrw sc_ifr[NRCV]; /* UNIBUS receive buffer maps */
156: struct ifxmt sc_ifw[NXMT]; /* UNIBUS receive buffer maps */
157: /* command queue stuff */
158: struct dmc_command sc_cmdbuf[NCMDS];
159: struct dmc_command *sc_qhead; /* head of command queue */
160: struct dmc_command *sc_qtail; /* tail of command queue */
161: struct dmc_command *sc_qactive; /* command in progress */
162: struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */
163: struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */
164: /* end command queue stuff */
165: struct dmc_base {
166: short d_base[128]; /* DMC base table */
167: } dmc_base;
168: };
169:
1.20 cegger 170: static int dmcmatch(device_t, cfdata_t, void *);
171: static void dmcattach(device_t, device_t, void *);
1.1 ragge 172: static int dmcinit(struct ifnet *);
173: static void dmcrint(void *);
174: static void dmcxint(void *);
175: static void dmcdown(struct dmc_softc *sc);
176: static void dmcrestart(struct dmc_softc *);
177: static void dmcload(struct dmc_softc *, int, u_short, u_short);
178: static void dmcstart(struct ifnet *);
179: static void dmctimeout(struct ifnet *);
1.14 christos 180: static int dmcioctl(struct ifnet *, u_long, void *);
1.1 ragge 181: static int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
182: struct rtentry *);
1.20 cegger 183: static void dmcreset(device_t);
1.1 ragge 184:
1.21 chs 185: CFATTACH_DECL_NEW(dmc, sizeof(struct dmc_softc),
1.6 thorpej 186: dmcmatch, dmcattach, NULL, NULL);
1.1 ragge 187:
188: /* flags */
189: #define DMC_RUNNING 0x01 /* device initialized */
190: #define DMC_BMAPPED 0x02 /* base table mapped */
191: #define DMC_RESTART 0x04 /* software restart in progress */
192: #define DMC_ONLINE 0x08 /* device running (had a RDYO) */
193:
194:
195: /* queue manipulation macros */
196: #define QUEUE_AT_HEAD(qp, head, tail) \
197: (qp)->qp_next = (head); \
198: (head) = (qp); \
199: if ((tail) == (struct dmc_command *) 0) \
1.9 simonb 200: (tail) = (head)
1.1 ragge 201:
202: #define QUEUE_AT_TAIL(qp, head, tail) \
203: if ((tail)) \
204: (tail)->qp_next = (qp); \
205: else \
206: (head) = (qp); \
207: (qp)->qp_next = (struct dmc_command *) 0; \
208: (tail) = (qp)
209:
210: #define DEQUEUE(head, tail) \
211: (head) = (head)->qp_next;\
212: if ((head) == (struct dmc_command *) 0)\
213: (tail) = (head)
214:
215: int
1.20 cegger 216: dmcmatch(device_t parent, cfdata_t cf, void *aux)
1.1 ragge 217: {
218: struct uba_attach_args *ua = aux;
219: struct dmc_softc ssc;
220: struct dmc_softc *sc = &ssc;
221: int i;
222:
223: sc->sc_iot = ua->ua_iot;
224: sc->sc_ioh = ua->ua_ioh;
225:
226: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
227: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
228: ;
229: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
230: printf("dmcprobe: can't start device\n" );
231: return (0);
232: }
233: DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
234: /* let's be paranoid */
235: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
236: DELAY(1000000);
237: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
238: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
239: ;
240: return (1);
241: }
242:
243: /*
244: * Interface exists: make available by filling in network interface
245: * record. System will initialize the interface when it is ready
246: * to accept packets.
247: */
248: void
1.20 cegger 249: dmcattach(device_t parent, device_t self, void *aux)
1.1 ragge 250: {
251: struct uba_attach_args *ua = aux;
1.13 thorpej 252: struct dmc_softc *sc = device_private(self);
1.1 ragge 253:
1.21 chs 254: sc->sc_dev = self;
1.1 ragge 255: sc->sc_iot = ua->ua_iot;
256: sc->sc_ioh = ua->ua_ioh;
257: sc->sc_dmat = ua->ua_dmat;
258:
1.21 chs 259: strlcpy(sc->sc_if.if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
1.1 ragge 260: sc->sc_if.if_mtu = DMCMTU;
261: sc->sc_if.if_init = dmcinit;
262: sc->sc_if.if_output = dmcoutput;
263: sc->sc_if.if_ioctl = dmcioctl;
264: sc->sc_if.if_watchdog = dmctimeout;
265: sc->sc_if.if_flags = IFF_POINTOPOINT;
266: sc->sc_if.if_softc = sc;
1.3 itojun 267: IFQ_SET_READY(&sc->sc_if.if_snd);
1.1 ragge 268:
269: uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
270: &sc->sc_rintrcnt);
271: uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
272: &sc->sc_tintrcnt);
1.21 chs 273: uba_reset_establish(dmcreset, sc->sc_dev);
1.1 ragge 274: evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
1.21 chs 275: device_xname(sc->sc_dev), "intr");
1.1 ragge 276: evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
1.21 chs 277: device_xname(sc->sc_dev), "intr");
1.1 ragge 278:
279: if_attach(&sc->sc_if);
280: }
281:
282: /*
283: * Reset of interface after UNIBUS reset.
284: * If interface is on specified UBA, reset its state.
285: */
286: void
1.20 cegger 287: dmcreset(device_t dev)
1.1 ragge 288: {
1.21 chs 289: struct dmc_softc *sc = device_private(dev);
1.1 ragge 290:
291: sc->sc_flag = 0;
292: sc->sc_if.if_flags &= ~IFF_RUNNING;
293: dmcinit(&sc->sc_if);
294: }
295:
296: /*
297: * Initialization of interface; reinitialize UNIBUS usage.
298: */
299: int
300: dmcinit(struct ifnet *ifp)
301: {
302: struct dmc_softc *sc = ifp->if_softc;
303: struct ifrw *ifrw;
304: struct ifxmt *ifxp;
305: struct dmcbufs *rp;
306: struct dmc_command *qp;
307: struct ifaddr *ifa;
1.21 chs 308: cfdata_t ui = device_cfdata(sc->sc_dev);
1.1 ragge 309: int base;
310: int s;
311:
312: /*
313: * Check to see that an address has been set
314: * (both local and destination for an address family).
315: */
1.25 ! ozaki-r 316: IFADDR_READER_FOREACH(ifa, ifp)
1.1 ragge 317: if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
318: break;
1.25 ! ozaki-r 319: if (ifa == NULL)
1.1 ragge 320: return 0;
321:
322: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
323: printf("dmcinit: DMC not running\n");
324: ifp->if_flags &= ~IFF_UP;
325: return 0;
326: }
327: /* map base table */
328: if ((sc->sc_flag & DMC_BMAPPED) == 0) {
329: sc->sc_ui.ui_size = sizeof(struct dmc_base);
1.14 christos 330: sc->sc_ui.ui_vaddr = (void *)&sc->dmc_base;
1.21 chs 331: uballoc(device_private(device_parent(sc->sc_dev)), &sc->sc_ui, 0);
1.1 ragge 332: sc->sc_flag |= DMC_BMAPPED;
333: }
334: /* initialize UNIBUS resources */
335: sc->sc_iused = sc->sc_oused = 0;
336: if ((ifp->if_flags & IFF_RUNNING) == 0) {
1.11 thorpej 337: if (if_ubaminit(&sc->sc_ifuba,
1.21 chs 338: device_private(device_parent(sc->sc_dev)),
1.1 ragge 339: sizeof(struct dmc_header) + DMCMTU,
340: sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
1.21 chs 341: aprint_error_dev(sc->sc_dev, "can't allocate uba resources\n");
1.1 ragge 342: ifp->if_flags &= ~IFF_UP;
343: return 0;
344: }
345: ifp->if_flags |= IFF_RUNNING;
346: }
347: sc->sc_flag &= ~DMC_ONLINE;
348: sc->sc_flag |= DMC_RUNNING;
349: /*
350: * Limit packets enqueued until we see if we're on the air.
351: */
352: ifp->if_snd.ifq_maxlen = 3;
353:
354: /* initialize buffer pool */
355: /* receives */
356: ifrw = &sc->sc_ifr[0];
357: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
358: rp->ubinfo = ifrw->ifrw_info;
359: rp->cc = DMCMTU + sizeof (struct dmc_header);
360: rp->flags = DBUF_OURS|DBUF_RCV;
1.9 simonb 361: ifrw++;
1.1 ragge 362: }
363: /* transmits */
364: ifxp = &sc->sc_ifw[0];
365: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
366: rp->ubinfo = ifxp->ifw_info;
367: rp->cc = 0;
368: rp->flags = DBUF_OURS|DBUF_XMIT;
1.9 simonb 369: ifxp++;
1.1 ragge 370: }
371:
372: /* set up command queues */
373: sc->sc_qfreeh = sc->sc_qfreet
374: = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
375: (struct dmc_command *)0;
376: /* set up free command buffer list */
377: for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
378: QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
379: }
380:
381: /* base in */
382: base = sc->sc_ui.ui_baddr;
383: dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
384: /* specify half duplex operation, flags tell if primary */
385: /* or secondary station */
386: if (ui->cf_flags == 0)
387: /* use DDCMP mode in full duplex */
388: dmcload(sc, DMC_CNTLI, 0, 0);
389: else if (ui->cf_flags == 1)
390: /* use MAINTENENCE mode */
391: dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
392: else if (ui->cf_flags == 2)
393: /* use DDCMP half duplex as primary station */
394: dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
395: else if (ui->cf_flags == 3)
396: /* use DDCMP half duplex as secondary station */
397: dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
398:
399: /* enable operation done interrupts */
400: while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
401: DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
402: s = splnet();
403: /* queue first NRCV buffers for DMC to fill */
404: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
405: rp->flags |= DBUF_DMCS;
406: dmcload(sc, DMC_READ, rp->ubinfo,
407: (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
408: sc->sc_iused++;
409: }
410: splx(s);
411: return 0;
412: }
413:
414: /*
415: * Start output on interface. Get another datagram
416: * to send from the interface queue and map it to
417: * the interface before starting output.
418: *
419: * Must be called at spl 5
420: */
421: void
422: dmcstart(struct ifnet *ifp)
423: {
424: struct dmc_softc *sc = ifp->if_softc;
425: struct mbuf *m;
426: struct dmcbufs *rp;
427: int n;
428:
429: /*
430: * Dequeue up to NXMT requests and map them to the UNIBUS.
431: * If no more requests, or no dmc buffers available, just return.
432: */
433: n = 0;
434: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
435: /* find an available buffer */
436: if ((rp->flags & DBUF_DMCS) == 0) {
1.3 itojun 437: IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
1.1 ragge 438: if (m == 0)
439: return;
440: /* mark it dmcs */
441: rp->flags |= (DBUF_DMCS);
442: /*
443: * Have request mapped to UNIBUS for transmission
444: * and start the output.
445: */
446: rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
447: rp->cc &= DMC_CCOUNT;
448: if (++sc->sc_oused == 1)
449: sc->sc_if.if_timer = dmc_timeout;
1.9 simonb 450: dmcload(sc, DMC_WRITE, rp->ubinfo,
1.1 ragge 451: rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
452: }
453: n++;
454: }
455: }
456:
457: /*
458: * Utility routine to load the DMC device registers.
459: */
460: void
461: dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
462: {
463: struct dmc_command *qp;
464: int sps;
465:
466: sps = splnet();
467:
468: /* grab a command buffer from the free list */
469: if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
470: panic("dmc command queue overflow");
471: DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
472:
473: /* fill in requested info */
474: qp->qp_cmd = (type | DMC_RQI);
475: qp->qp_ubaddr = w0;
476: qp->qp_cc = w1;
1.9 simonb 477:
1.1 ragge 478: if (sc->sc_qactive) { /* command in progress */
479: if (type == DMC_READ) {
480: QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
481: } else {
482: QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
483: }
484: } else { /* command port free */
485: sc->sc_qactive = qp;
486: DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
487: dmcrint(sc);
488: }
489: splx(sps);
490: }
491:
492: /*
493: * DMC interface receiver interrupt.
494: * Ready to accept another command,
495: * pull one off the command queue.
496: */
497: void
498: dmcrint(void *arg)
499: {
500: struct dmc_softc *sc = arg;
501: struct dmc_command *qp;
502: int n;
503:
504: if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
1.21 chs 505: printf("%s: dmcrint no command\n", device_xname(sc->sc_dev));
1.1 ragge 506: return;
507: }
508: while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
509: DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
510: DMC_WWORD(DMC_SEL6, qp->qp_cc);
511: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
512: /* free command buffer */
513: QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
514: while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
515: /*
516: * Can't check for RDYO here 'cause
517: * this routine isn't reentrant!
518: */
519: DELAY(5);
520: }
521: /* move on to next command */
522: if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
523: break; /* all done */
524: /* more commands to do, start the next one */
525: qp = sc->sc_qactive;
526: DEQUEUE(sc->sc_qhead, sc->sc_qtail);
527: DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
528: n = RDYSCAN;
529: while (n-- > 0)
530: if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
531: (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
532: break;
533: }
534: if (sc->sc_qactive) {
535: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
536: /* VMS does it twice !*$%@# */
537: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
538: }
539:
540: }
541:
542: /*
543: * DMC interface transmitter interrupt.
544: * A transfer may have completed, check for errors.
545: * If it was a read, notify appropriate protocol.
546: * If it was a write, pull the next one off the queue.
547: */
548: void
549: dmcxint(void *a)
550: {
1.9 simonb 551: struct dmc_softc *sc = a;
1.1 ragge 552:
553: struct ifnet *ifp;
554: struct mbuf *m;
555: int arg, pkaddr, cmd, len, s;
556: struct ifrw *ifrw;
557: struct dmcbufs *rp;
558: struct ifxmt *ifxp;
559: struct dmc_header *dh;
560: char buf[64];
561:
562: ifp = &sc->sc_if;
563:
564: while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
565:
566: cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
567: arg = DMC_RWORD(DMC_SEL6) & 0xffff;
568: /* reconstruct UNIBUS address of buffer returned to us */
569: pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
570: /* release port */
571: DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
572: switch (cmd & 07) {
573:
574: case DMC_OUR:
575: /*
1.9 simonb 576: * A read has completed.
1.1 ragge 577: * Pass packet to type specific
578: * higher-level input routine.
579: */
580: ifp->if_ipackets++;
581: /* find location in dmcuba struct */
582: ifrw= &sc->sc_ifr[0];
583: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
584: if(rp->ubinfo == pkaddr)
585: break;
586: ifrw++;
587: }
588: if (rp >= &sc->sc_rbufs[NRCV])
589: panic("dmc rcv");
590: if ((rp->flags & DBUF_DMCS) == 0)
1.21 chs 591: aprint_error_dev(sc->sc_dev, "done unalloc rbuf\n");
1.1 ragge 592:
593: len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
594: if (len < 0 || len > DMCMTU) {
595: ifp->if_ierrors++;
596: #ifdef DMCDEBUG
597: printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
1.21 chs 598: device_xname(sc->sc_dev), pkaddr, len);
1.1 ragge 599: #endif
600: goto setup;
601: }
602: /*
603: * Deal with trailer protocol: if type is trailer
604: * get true type from first 16-bit word past data.
605: * Remember that type was trailer by setting off.
606: */
607: dh = (struct dmc_header *)ifrw->ifrw_addr;
608: dh->dmc_type = ntohs((u_short)dh->dmc_type);
609: if (len == 0)
610: goto setup;
611:
612: /*
613: * Pull packet off interface. Off is nonzero if
614: * packet has trailing header; dmc_get will then
615: * force this header information to be at the front,
616: * but we still have to drop the type and length
617: * which are at the front of any trailer data.
618: */
619: m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
620: if (m == 0)
621: goto setup;
622: /* Shave off dmc_header */
623: m_adj(m, sizeof(struct dmc_header));
624: switch (dh->dmc_type) {
625: #ifdef INET
626: case DMC_IPTYPE:
627: break;
628: #endif
629: default:
630: m_freem(m);
631: goto setup;
632: }
633:
634: s = splnet();
1.23 rmind 635: if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1.1 ragge 636: m_freem(m);
1.22 msaitoh 637: }
1.1 ragge 638: splx(s);
639:
640: setup:
641: /* is this needed? */
642: rp->ubinfo = ifrw->ifrw_info;
643:
1.9 simonb 644: dmcload(sc, DMC_READ, rp->ubinfo,
1.1 ragge 645: ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
646: break;
647:
648: case DMC_OUX:
649: /*
650: * A write has completed, start another
651: * transfer if there is more data to send.
652: */
653: ifp->if_opackets++;
654: /* find associated dmcbuf structure */
655: ifxp = &sc->sc_ifw[0];
656: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
657: if(rp->ubinfo == pkaddr)
658: break;
659: ifxp++;
660: }
661: if (rp >= &sc->sc_xbufs[NXMT]) {
1.21 chs 662: aprint_error_dev(sc->sc_dev, "bad packet address 0x%x\n",
1.16 cegger 663: pkaddr);
1.1 ragge 664: break;
665: }
666: if ((rp->flags & DBUF_DMCS) == 0)
1.21 chs 667: aprint_error_dev(sc->sc_dev, "unallocated packet 0x%x\n",
1.16 cegger 668: pkaddr);
1.1 ragge 669: /* mark buffer free */
670: if_ubaend(&sc->sc_ifuba, ifxp);
671: rp->flags &= ~DBUF_DMCS;
672: if (--sc->sc_oused == 0)
673: sc->sc_if.if_timer = 0;
674: else
675: sc->sc_if.if_timer = dmc_timeout;
676: if ((sc->sc_flag & DMC_ONLINE) == 0) {
677: extern int ifqmaxlen;
678:
679: /*
680: * We're on the air.
681: * Open the queue to the usual value.
682: */
683: sc->sc_flag |= DMC_ONLINE;
684: ifp->if_snd.ifq_maxlen = ifqmaxlen;
685: }
686: break;
687:
688: case DMC_CNTLO:
689: arg &= DMC_CNTMASK;
690: if (arg & DMC_FATAL) {
691: if (arg != DMC_START) {
1.18 christos 692: snprintb(buf, sizeof(buf), CNTLO_BITS,
693: arg);
1.1 ragge 694: log(LOG_ERR,
695: "%s: fatal error, flags=%s\n",
1.21 chs 696: device_xname(sc->sc_dev), buf);
1.1 ragge 697: }
698: dmcrestart(sc);
699: break;
700: }
701: /* ACCUMULATE STATISTICS */
702: switch(arg) {
703: case DMC_NOBUFS:
704: ifp->if_ierrors++;
705: if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
706: goto report;
707: break;
708: case DMC_DISCONN:
709: if ((sc->sc_disc++ % DMC_RPDSC) == 0)
710: goto report;
711: break;
712: case DMC_TIMEOUT:
713: if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
714: goto report;
715: break;
716: case DMC_DATACK:
717: ifp->if_oerrors++;
718: if ((sc->sc_datck++ % DMC_RPDCK) == 0)
719: goto report;
720: break;
721: default:
722: goto report;
723: }
724: break;
725: report:
726: #ifdef DMCDEBUG
1.18 christos 727: snprintb(buf, sizeof(buf), CNTLO_BITS, arg);
1.1 ragge 728: printd("%s: soft error, flags=%s\n",
1.21 chs 729: device_xname(sc->sc_dev), buf);
1.1 ragge 730: #endif
731: if ((sc->sc_flag & DMC_RESTART) == 0) {
732: /*
733: * kill off the dmc to get things
734: * going again by generating a
735: * procedure error
736: */
737: sc->sc_flag |= DMC_RESTART;
738: arg = sc->sc_ui.ui_baddr;
739: dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
740: }
741: break;
742:
743: default:
1.9 simonb 744: printf("%s: bad control %o\n",
1.21 chs 745: device_xname(sc->sc_dev), cmd);
1.1 ragge 746: break;
747: }
748: }
749: dmcstart(ifp);
750: }
751:
752: /*
753: * DMC output routine.
754: * Encapsulate a packet of type family for the dmc.
755: * Use trailer local net encapsulation if enough data in first
756: * packet leaves a multiple of 512 bytes of data in remainder.
757: */
758: int
759: dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
760: struct rtentry *rt)
761: {
762: int type, error, s;
763: struct mbuf *m = m0;
764: struct dmc_header *dh;
765:
766: if ((ifp->if_flags & IFF_UP) == 0) {
767: error = ENETDOWN;
768: goto bad;
769: }
770:
1.24 knakahar 771: IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
1.3 itojun 772:
1.1 ragge 773: switch (dst->sa_family) {
774: #ifdef INET
775: case AF_INET:
776: type = DMC_IPTYPE;
777: break;
778: #endif
779:
780: case AF_UNSPEC:
781: dh = (struct dmc_header *)dst->sa_data;
782: type = dh->dmc_type;
783: break;
784:
785: default:
786: printf("%s: can't handle af%d\n", ifp->if_xname,
787: dst->sa_family);
788: error = EAFNOSUPPORT;
789: goto bad;
790: }
791:
792: /*
793: * Add local network header
794: * (there is space for a uba on a vax to step on)
795: */
796: M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
797: if (m == 0) {
798: error = ENOBUFS;
799: goto bad;
800: }
801: dh = mtod(m, struct dmc_header *);
802: dh->dmc_type = htons((u_short)type);
803:
804: /*
805: * Queue message on interface, and start output if interface
806: * not yet active.
807: */
808: s = splnet();
1.24 knakahar 809: IFQ_ENQUEUE(&ifp->if_snd, m, error);
1.3 itojun 810: if (error) {
811: /* mbuf is already freed */
1.1 ragge 812: splx(s);
1.3 itojun 813: return (error);
1.1 ragge 814: }
815: dmcstart(ifp);
816: splx(s);
817: return (0);
818:
819: bad:
820: m_freem(m0);
821: return (error);
822: }
823:
824:
825: /*
826: * Process an ioctl request.
827: */
828: /* ARGSUSED */
829: int
1.14 christos 830: dmcioctl(struct ifnet *ifp, u_long cmd, void *data)
1.1 ragge 831: {
832: int s = splnet(), error = 0;
833: register struct dmc_softc *sc = ifp->if_softc;
834:
835: switch (cmd) {
836:
1.17 dyoung 837: case SIOCINITIFADDR:
1.1 ragge 838: ifp->if_flags |= IFF_UP;
839: if ((ifp->if_flags & IFF_RUNNING) == 0)
1.9 simonb 840: dmcinit(ifp);
1.1 ragge 841: break;
842:
843: case SIOCSIFDSTADDR:
844: if ((ifp->if_flags & IFF_RUNNING) == 0)
1.9 simonb 845: dmcinit(ifp);
1.1 ragge 846: break;
1.9 simonb 847:
1.1 ragge 848: case SIOCSIFFLAGS:
1.17 dyoung 849: if ((error = ifioctl_common(ifp, cmd, data)) != 0)
850: break;
1.1 ragge 851: if ((ifp->if_flags & IFF_UP) == 0 &&
852: sc->sc_flag & DMC_RUNNING)
853: dmcdown(sc);
854: else if (ifp->if_flags & IFF_UP &&
855: (sc->sc_flag & DMC_RUNNING) == 0)
856: dmcrestart(sc);
857: break;
858:
859: default:
1.17 dyoung 860: error = ifioctl_common(ifp, cmd, data);
1.1 ragge 861: }
862: splx(s);
863: return (error);
864: }
865:
866: /*
867: * Restart after a fatal error.
868: * Clear device and reinitialize.
869: */
870: void
871: dmcrestart(struct dmc_softc *sc)
872: {
873: int s, i;
1.9 simonb 874:
1.1 ragge 875: #ifdef DMCDEBUG
876: /* dump base table */
1.21 chs 877: printf("%s base table:\n", device_xname(sc->sc_dev));
1.1 ragge 878: for (i = 0; i < sizeof (struct dmc_base); i++)
879: printf("%o\n" ,dmc_base[unit].d_base[i]);
880: #endif
881:
882: dmcdown(sc);
883:
884: /*
885: * Let the DMR finish the MCLR. At 1 Mbit, it should do so
886: * in about a max of 6.4 milliseconds with diagnostics enabled.
887: */
888: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
889: ;
890: /* Did the timer expire or did the DMR finish? */
891: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
1.21 chs 892: log(LOG_ERR, "%s: M820 Test Failed\n", device_xname(sc->sc_dev));
1.1 ragge 893: return;
894: }
895:
896: /* restart DMC */
897: dmcinit(&sc->sc_if);
898: sc->sc_flag &= ~DMC_RESTART;
899: s = splnet();
900: dmcstart(&sc->sc_if);
901: splx(s);
902: sc->sc_if.if_collisions++; /* why not? */
903: }
904:
905: /*
906: * Reset a device and mark down.
907: * Flush output queue and drop queue limit.
908: */
909: void
910: dmcdown(struct dmc_softc *sc)
911: {
912: struct ifxmt *ifxp;
913:
914: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
915: sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
916:
917: for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
918: #ifdef notyet
919: if (ifxp->ifw_xtofree) {
920: (void) m_freem(ifxp->ifw_xtofree);
921: ifxp->ifw_xtofree = 0;
922: }
923: #endif
924: }
925: IF_PURGE(&sc->sc_if.if_snd);
926: }
927:
928: /*
929: * Watchdog timeout to see that transmitted packets don't
930: * lose interrupts. The device has to be online (the first
931: * transmission may block until the other side comes up).
932: */
933: void
934: dmctimeout(struct ifnet *ifp)
935: {
936: struct dmc_softc *sc = ifp->if_softc;
937: char buf1[64], buf2[64];
938:
939: if (sc->sc_flag & DMC_ONLINE) {
1.18 christos 940: snprintb(buf1, sizeof(buf1), DMC0BITS,
941: DMC_RBYTE(DMC_BSEL0) & 0xff);
942: snprintb(buf2, sizeof(buf2), DMC2BITS,
943: DMC_RBYTE(DMC_BSEL2) & 0xff);
1.1 ragge 944: log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
1.21 chs 945: device_xname(sc->sc_dev), buf1, buf2);
1.1 ragge 946: dmcrestart(sc);
947: }
948: }
CVSweb <webmaster@jp.NetBSD.org>