Annotation of src/sys/dev/qbus/if_dmc.c, Revision 1.21.2.1
1.21.2.1! rmind 1: /* $NetBSD: if_dmc.c,v 1.21 2012/10/27 17:18:37 chs Exp $ */
1.1 ragge 2: /*
3: * Copyright (c) 1982, 1986 Regents of the University of California.
4: * All rights reserved.
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
1.7 agc 14: * 3. Neither the name of the University nor the names of its contributors
1.1 ragge 15: * may be used to endorse or promote products derived from this software
16: * without specific prior written permission.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28: * SUCH DAMAGE.
29: *
30: * @(#)if_dmc.c 7.10 (Berkeley) 12/16/90
31: */
32:
33: /*
34: * DMC11 device driver, internet version
35: *
36: * Bill Nesheim
37: * Cornell University
38: *
39: * Lou Salkind
40: * New York University
41: */
1.2 lukem 42:
43: #include <sys/cdefs.h>
1.21.2.1! rmind 44: __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.21 2012/10/27 17:18:37 chs Exp $");
1.1 ragge 45:
46: #undef DMCDEBUG /* for base table dump on fatal error */
47:
48: #include "opt_inet.h"
49:
50: #include <sys/param.h>
51: #include <sys/systm.h>
52: #include <sys/mbuf.h>
53: #include <sys/ioctl.h>
54: #include <sys/socket.h>
55: #include <sys/syslog.h>
56: #include <sys/device.h>
57:
58: #include <net/if.h>
59: #include <net/netisr.h>
60:
61: #ifdef INET
62: #include <netinet/in.h>
63: #include <netinet/in_var.h>
64: #endif
65:
1.15 ad 66: #include <sys/bus.h>
1.1 ragge 67:
68: #include <dev/qbus/ubareg.h>
69: #include <dev/qbus/ubavar.h>
70: #include <dev/qbus/if_uba.h>
71:
72: #include <dev/qbus/if_dmcreg.h>
73:
74:
75: /*
76: * output timeout value, sec.; should depend on line speed.
77: */
78: static int dmc_timeout = 20;
79:
80: #define NRCV 7
1.9 simonb 81: #define NXMT 3
1.1 ragge 82: #define NCMDS (NRCV+NXMT+4) /* size of command queue */
83:
84: #define DMC_WBYTE(csr, val) \
85: bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
86: #define DMC_WWORD(csr, val) \
87: bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
88: #define DMC_RBYTE(csr) \
89: bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
90: #define DMC_RWORD(csr) \
91: bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
92:
93:
94: #ifdef DMCDEBUG
95: #define printd if(dmcdebug)printf
96: int dmcdebug = 0;
97: #endif
98:
99: /* error reporting intervals */
100: #define DMC_RPNBFS 50
101: #define DMC_RPDSC 1
102: #define DMC_RPTMO 10
103: #define DMC_RPDCK 10
104:
105: struct dmc_command {
106: char qp_cmd; /* command */
107: short qp_ubaddr; /* buffer address */
108: short qp_cc; /* character count || XMEM */
109: struct dmc_command *qp_next; /* next command on queue */
110: };
111:
112: struct dmcbufs {
113: int ubinfo; /* from uballoc */
114: short cc; /* buffer size */
115: short flags; /* access control */
116: };
117: #define DBUF_OURS 0 /* buffer is available */
118: #define DBUF_DMCS 1 /* buffer claimed by somebody */
119: #define DBUF_XMIT 4 /* transmit buffer */
120: #define DBUF_RCV 8 /* receive buffer */
121:
122:
123: /*
124: * DMC software status per interface.
125: *
126: * Each interface is referenced by a network interface structure,
127: * sc_if, which the routing code uses to locate the interface.
128: * This structure contains the output queue for the interface, its address, ...
129: * We also have, for each interface, a set of 7 UBA interface structures
130: * for each, which
131: * contain information about the UNIBUS resources held by the interface:
132: * map registers, buffered data paths, etc. Information is cached in this
133: * structure for use by the if_uba.c routines in running the interface
134: * efficiently.
135: */
136: struct dmc_softc {
1.21 chs 137: device_t sc_dev; /* Configuration common part */
1.1 ragge 138: struct ifnet sc_if; /* network-visible interface */
139: short sc_oused; /* output buffers currently in use */
140: short sc_iused; /* input buffers given to DMC */
141: short sc_flag; /* flags */
142: struct ubinfo sc_ui; /* UBA mapping info for base table */
143: int sc_errors[4]; /* non-fatal error counters */
144: bus_space_tag_t sc_iot;
145: bus_addr_t sc_ioh;
146: bus_dma_tag_t sc_dmat;
147: struct evcnt sc_rintrcnt; /* Interrupt counting */
148: struct evcnt sc_tintrcnt; /* Interrupt counting */
149: #define sc_datck sc_errors[0]
150: #define sc_timeo sc_errors[1]
151: #define sc_nobuf sc_errors[2]
152: #define sc_disc sc_errors[3]
153: struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */
154: struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */
155: struct ifubinfo sc_ifuba; /* UNIBUS resources */
156: struct ifrw sc_ifr[NRCV]; /* UNIBUS receive buffer maps */
157: struct ifxmt sc_ifw[NXMT]; /* UNIBUS receive buffer maps */
158: /* command queue stuff */
159: struct dmc_command sc_cmdbuf[NCMDS];
160: struct dmc_command *sc_qhead; /* head of command queue */
161: struct dmc_command *sc_qtail; /* tail of command queue */
162: struct dmc_command *sc_qactive; /* command in progress */
163: struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */
164: struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */
165: /* end command queue stuff */
166: struct dmc_base {
167: short d_base[128]; /* DMC base table */
168: } dmc_base;
169: };
170:
1.20 cegger 171: static int dmcmatch(device_t, cfdata_t, void *);
172: static void dmcattach(device_t, device_t, void *);
1.1 ragge 173: static int dmcinit(struct ifnet *);
174: static void dmcrint(void *);
175: static void dmcxint(void *);
176: static void dmcdown(struct dmc_softc *sc);
177: static void dmcrestart(struct dmc_softc *);
178: static void dmcload(struct dmc_softc *, int, u_short, u_short);
179: static void dmcstart(struct ifnet *);
180: static void dmctimeout(struct ifnet *);
1.14 christos 181: static int dmcioctl(struct ifnet *, u_long, void *);
1.1 ragge 182: static int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
183: struct rtentry *);
1.20 cegger 184: static void dmcreset(device_t);
1.1 ragge 185:
1.21 chs 186: CFATTACH_DECL_NEW(dmc, sizeof(struct dmc_softc),
1.6 thorpej 187: dmcmatch, dmcattach, NULL, NULL);
1.1 ragge 188:
189: /* flags */
190: #define DMC_RUNNING 0x01 /* device initialized */
191: #define DMC_BMAPPED 0x02 /* base table mapped */
192: #define DMC_RESTART 0x04 /* software restart in progress */
193: #define DMC_ONLINE 0x08 /* device running (had a RDYO) */
194:
195:
196: /* queue manipulation macros */
197: #define QUEUE_AT_HEAD(qp, head, tail) \
198: (qp)->qp_next = (head); \
199: (head) = (qp); \
200: if ((tail) == (struct dmc_command *) 0) \
1.9 simonb 201: (tail) = (head)
1.1 ragge 202:
203: #define QUEUE_AT_TAIL(qp, head, tail) \
204: if ((tail)) \
205: (tail)->qp_next = (qp); \
206: else \
207: (head) = (qp); \
208: (qp)->qp_next = (struct dmc_command *) 0; \
209: (tail) = (qp)
210:
211: #define DEQUEUE(head, tail) \
212: (head) = (head)->qp_next;\
213: if ((head) == (struct dmc_command *) 0)\
214: (tail) = (head)
215:
216: int
1.20 cegger 217: dmcmatch(device_t parent, cfdata_t cf, void *aux)
1.1 ragge 218: {
219: struct uba_attach_args *ua = aux;
220: struct dmc_softc ssc;
221: struct dmc_softc *sc = &ssc;
222: int i;
223:
224: sc->sc_iot = ua->ua_iot;
225: sc->sc_ioh = ua->ua_ioh;
226:
227: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
228: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
229: ;
230: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
231: printf("dmcprobe: can't start device\n" );
232: return (0);
233: }
234: DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
235: /* let's be paranoid */
236: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
237: DELAY(1000000);
238: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
239: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
240: ;
241: return (1);
242: }
243:
244: /*
245: * Interface exists: make available by filling in network interface
246: * record. System will initialize the interface when it is ready
247: * to accept packets.
248: */
249: void
1.20 cegger 250: dmcattach(device_t parent, device_t self, void *aux)
1.1 ragge 251: {
252: struct uba_attach_args *ua = aux;
1.13 thorpej 253: struct dmc_softc *sc = device_private(self);
1.1 ragge 254:
1.21 chs 255: sc->sc_dev = self;
1.1 ragge 256: sc->sc_iot = ua->ua_iot;
257: sc->sc_ioh = ua->ua_ioh;
258: sc->sc_dmat = ua->ua_dmat;
259:
1.21 chs 260: strlcpy(sc->sc_if.if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
1.1 ragge 261: sc->sc_if.if_mtu = DMCMTU;
262: sc->sc_if.if_init = dmcinit;
263: sc->sc_if.if_output = dmcoutput;
264: sc->sc_if.if_ioctl = dmcioctl;
265: sc->sc_if.if_watchdog = dmctimeout;
266: sc->sc_if.if_flags = IFF_POINTOPOINT;
267: sc->sc_if.if_softc = sc;
1.3 itojun 268: IFQ_SET_READY(&sc->sc_if.if_snd);
1.1 ragge 269:
270: uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
271: &sc->sc_rintrcnt);
272: uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
273: &sc->sc_tintrcnt);
1.21 chs 274: uba_reset_establish(dmcreset, sc->sc_dev);
1.1 ragge 275: evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
1.21 chs 276: device_xname(sc->sc_dev), "intr");
1.1 ragge 277: evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
1.21 chs 278: device_xname(sc->sc_dev), "intr");
1.1 ragge 279:
280: if_attach(&sc->sc_if);
281: }
282:
283: /*
284: * Reset of interface after UNIBUS reset.
285: * If interface is on specified UBA, reset its state.
286: */
287: void
1.20 cegger 288: dmcreset(device_t dev)
1.1 ragge 289: {
1.21 chs 290: struct dmc_softc *sc = device_private(dev);
1.1 ragge 291:
292: sc->sc_flag = 0;
293: sc->sc_if.if_flags &= ~IFF_RUNNING;
294: dmcinit(&sc->sc_if);
295: }
296:
297: /*
298: * Initialization of interface; reinitialize UNIBUS usage.
299: */
300: int
301: dmcinit(struct ifnet *ifp)
302: {
303: struct dmc_softc *sc = ifp->if_softc;
304: struct ifrw *ifrw;
305: struct ifxmt *ifxp;
306: struct dmcbufs *rp;
307: struct dmc_command *qp;
308: struct ifaddr *ifa;
1.21 chs 309: cfdata_t ui = device_cfdata(sc->sc_dev);
1.1 ragge 310: int base;
311: int s;
312:
313: /*
314: * Check to see that an address has been set
315: * (both local and destination for an address family).
316: */
1.8 matt 317: IFADDR_FOREACH(ifa, ifp)
1.1 ragge 318: if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
319: break;
320: if (ifa == (struct ifaddr *) 0)
321: return 0;
322:
323: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
324: printf("dmcinit: DMC not running\n");
325: ifp->if_flags &= ~IFF_UP;
326: return 0;
327: }
328: /* map base table */
329: if ((sc->sc_flag & DMC_BMAPPED) == 0) {
330: sc->sc_ui.ui_size = sizeof(struct dmc_base);
1.14 christos 331: sc->sc_ui.ui_vaddr = (void *)&sc->dmc_base;
1.21 chs 332: uballoc(device_private(device_parent(sc->sc_dev)), &sc->sc_ui, 0);
1.1 ragge 333: sc->sc_flag |= DMC_BMAPPED;
334: }
335: /* initialize UNIBUS resources */
336: sc->sc_iused = sc->sc_oused = 0;
337: if ((ifp->if_flags & IFF_RUNNING) == 0) {
1.11 thorpej 338: if (if_ubaminit(&sc->sc_ifuba,
1.21 chs 339: device_private(device_parent(sc->sc_dev)),
1.1 ragge 340: sizeof(struct dmc_header) + DMCMTU,
341: sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
1.21 chs 342: aprint_error_dev(sc->sc_dev, "can't allocate uba resources\n");
1.1 ragge 343: ifp->if_flags &= ~IFF_UP;
344: return 0;
345: }
346: ifp->if_flags |= IFF_RUNNING;
347: }
348: sc->sc_flag &= ~DMC_ONLINE;
349: sc->sc_flag |= DMC_RUNNING;
350: /*
351: * Limit packets enqueued until we see if we're on the air.
352: */
353: ifp->if_snd.ifq_maxlen = 3;
354:
355: /* initialize buffer pool */
356: /* receives */
357: ifrw = &sc->sc_ifr[0];
358: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
359: rp->ubinfo = ifrw->ifrw_info;
360: rp->cc = DMCMTU + sizeof (struct dmc_header);
361: rp->flags = DBUF_OURS|DBUF_RCV;
1.9 simonb 362: ifrw++;
1.1 ragge 363: }
364: /* transmits */
365: ifxp = &sc->sc_ifw[0];
366: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
367: rp->ubinfo = ifxp->ifw_info;
368: rp->cc = 0;
369: rp->flags = DBUF_OURS|DBUF_XMIT;
1.9 simonb 370: ifxp++;
1.1 ragge 371: }
372:
373: /* set up command queues */
374: sc->sc_qfreeh = sc->sc_qfreet
375: = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
376: (struct dmc_command *)0;
377: /* set up free command buffer list */
378: for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
379: QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
380: }
381:
382: /* base in */
383: base = sc->sc_ui.ui_baddr;
384: dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
385: /* specify half duplex operation, flags tell if primary */
386: /* or secondary station */
387: if (ui->cf_flags == 0)
388: /* use DDCMP mode in full duplex */
389: dmcload(sc, DMC_CNTLI, 0, 0);
390: else if (ui->cf_flags == 1)
391: /* use MAINTENENCE mode */
392: dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
393: else if (ui->cf_flags == 2)
394: /* use DDCMP half duplex as primary station */
395: dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
396: else if (ui->cf_flags == 3)
397: /* use DDCMP half duplex as secondary station */
398: dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
399:
400: /* enable operation done interrupts */
401: while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
402: DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
403: s = splnet();
404: /* queue first NRCV buffers for DMC to fill */
405: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
406: rp->flags |= DBUF_DMCS;
407: dmcload(sc, DMC_READ, rp->ubinfo,
408: (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
409: sc->sc_iused++;
410: }
411: splx(s);
412: return 0;
413: }
414:
415: /*
416: * Start output on interface. Get another datagram
417: * to send from the interface queue and map it to
418: * the interface before starting output.
419: *
420: * Must be called at spl 5
421: */
422: void
423: dmcstart(struct ifnet *ifp)
424: {
425: struct dmc_softc *sc = ifp->if_softc;
426: struct mbuf *m;
427: struct dmcbufs *rp;
428: int n;
429:
430: /*
431: * Dequeue up to NXMT requests and map them to the UNIBUS.
432: * If no more requests, or no dmc buffers available, just return.
433: */
434: n = 0;
435: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
436: /* find an available buffer */
437: if ((rp->flags & DBUF_DMCS) == 0) {
1.3 itojun 438: IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
1.1 ragge 439: if (m == 0)
440: return;
441: /* mark it dmcs */
442: rp->flags |= (DBUF_DMCS);
443: /*
444: * Have request mapped to UNIBUS for transmission
445: * and start the output.
446: */
447: rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
448: rp->cc &= DMC_CCOUNT;
449: if (++sc->sc_oused == 1)
450: sc->sc_if.if_timer = dmc_timeout;
1.9 simonb 451: dmcload(sc, DMC_WRITE, rp->ubinfo,
1.1 ragge 452: rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
453: }
454: n++;
455: }
456: }
457:
458: /*
459: * Utility routine to load the DMC device registers.
460: */
461: void
462: dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
463: {
464: struct dmc_command *qp;
465: int sps;
466:
467: sps = splnet();
468:
469: /* grab a command buffer from the free list */
470: if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
471: panic("dmc command queue overflow");
472: DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
473:
474: /* fill in requested info */
475: qp->qp_cmd = (type | DMC_RQI);
476: qp->qp_ubaddr = w0;
477: qp->qp_cc = w1;
1.9 simonb 478:
1.1 ragge 479: if (sc->sc_qactive) { /* command in progress */
480: if (type == DMC_READ) {
481: QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
482: } else {
483: QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
484: }
485: } else { /* command port free */
486: sc->sc_qactive = qp;
487: DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
488: dmcrint(sc);
489: }
490: splx(sps);
491: }
492:
493: /*
494: * DMC interface receiver interrupt.
495: * Ready to accept another command,
496: * pull one off the command queue.
497: */
498: void
499: dmcrint(void *arg)
500: {
501: struct dmc_softc *sc = arg;
502: struct dmc_command *qp;
503: int n;
504:
505: if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
1.21 chs 506: printf("%s: dmcrint no command\n", device_xname(sc->sc_dev));
1.1 ragge 507: return;
508: }
509: while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
510: DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
511: DMC_WWORD(DMC_SEL6, qp->qp_cc);
512: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
513: /* free command buffer */
514: QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
515: while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
516: /*
517: * Can't check for RDYO here 'cause
518: * this routine isn't reentrant!
519: */
520: DELAY(5);
521: }
522: /* move on to next command */
523: if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
524: break; /* all done */
525: /* more commands to do, start the next one */
526: qp = sc->sc_qactive;
527: DEQUEUE(sc->sc_qhead, sc->sc_qtail);
528: DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
529: n = RDYSCAN;
530: while (n-- > 0)
531: if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
532: (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
533: break;
534: }
535: if (sc->sc_qactive) {
536: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
537: /* VMS does it twice !*$%@# */
538: DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
539: }
540:
541: }
542:
543: /*
544: * DMC interface transmitter interrupt.
545: * A transfer may have completed, check for errors.
546: * If it was a read, notify appropriate protocol.
547: * If it was a write, pull the next one off the queue.
548: */
549: void
550: dmcxint(void *a)
551: {
1.9 simonb 552: struct dmc_softc *sc = a;
1.1 ragge 553:
554: struct ifnet *ifp;
555: struct mbuf *m;
556: struct ifqueue *inq;
557: int arg, pkaddr, cmd, len, s;
558: struct ifrw *ifrw;
559: struct dmcbufs *rp;
560: struct ifxmt *ifxp;
561: struct dmc_header *dh;
562: char buf[64];
1.21.2.1! rmind 563: int isr = 0;
1.1 ragge 564:
565: ifp = &sc->sc_if;
566:
567: while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
568:
569: cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
570: arg = DMC_RWORD(DMC_SEL6) & 0xffff;
571: /* reconstruct UNIBUS address of buffer returned to us */
572: pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
573: /* release port */
574: DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
575: switch (cmd & 07) {
576:
577: case DMC_OUR:
578: /*
1.9 simonb 579: * A read has completed.
1.1 ragge 580: * Pass packet to type specific
581: * higher-level input routine.
582: */
583: ifp->if_ipackets++;
584: /* find location in dmcuba struct */
585: ifrw= &sc->sc_ifr[0];
586: for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
587: if(rp->ubinfo == pkaddr)
588: break;
589: ifrw++;
590: }
591: if (rp >= &sc->sc_rbufs[NRCV])
592: panic("dmc rcv");
593: if ((rp->flags & DBUF_DMCS) == 0)
1.21 chs 594: aprint_error_dev(sc->sc_dev, "done unalloc rbuf\n");
1.1 ragge 595:
596: len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
597: if (len < 0 || len > DMCMTU) {
598: ifp->if_ierrors++;
599: #ifdef DMCDEBUG
600: printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
1.21 chs 601: device_xname(sc->sc_dev), pkaddr, len);
1.1 ragge 602: #endif
603: goto setup;
604: }
605: /*
606: * Deal with trailer protocol: if type is trailer
607: * get true type from first 16-bit word past data.
608: * Remember that type was trailer by setting off.
609: */
610: dh = (struct dmc_header *)ifrw->ifrw_addr;
611: dh->dmc_type = ntohs((u_short)dh->dmc_type);
612: if (len == 0)
613: goto setup;
614:
615: /*
616: * Pull packet off interface. Off is nonzero if
617: * packet has trailing header; dmc_get will then
618: * force this header information to be at the front,
619: * but we still have to drop the type and length
620: * which are at the front of any trailer data.
621: */
622: m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
623: if (m == 0)
624: goto setup;
625: /* Shave off dmc_header */
626: m_adj(m, sizeof(struct dmc_header));
627: switch (dh->dmc_type) {
628:
629: #ifdef INET
630: case DMC_IPTYPE:
1.21.2.1! rmind 631: isr = NETISR_IP;
1.1 ragge 632: inq = &ipintrq;
633: break;
634: #endif
635: default:
636: m_freem(m);
637: goto setup;
638: }
639:
640: s = splnet();
641: if (IF_QFULL(inq)) {
642: IF_DROP(inq);
643: m_freem(m);
1.21.2.1! rmind 644: } else {
1.1 ragge 645: IF_ENQUEUE(inq, m);
1.21.2.1! rmind 646: schednetisr(isr);
! 647: }
1.1 ragge 648: splx(s);
649:
650: setup:
651: /* is this needed? */
652: rp->ubinfo = ifrw->ifrw_info;
653:
1.9 simonb 654: dmcload(sc, DMC_READ, rp->ubinfo,
1.1 ragge 655: ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
656: break;
657:
658: case DMC_OUX:
659: /*
660: * A write has completed, start another
661: * transfer if there is more data to send.
662: */
663: ifp->if_opackets++;
664: /* find associated dmcbuf structure */
665: ifxp = &sc->sc_ifw[0];
666: for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
667: if(rp->ubinfo == pkaddr)
668: break;
669: ifxp++;
670: }
671: if (rp >= &sc->sc_xbufs[NXMT]) {
1.21 chs 672: aprint_error_dev(sc->sc_dev, "bad packet address 0x%x\n",
1.16 cegger 673: pkaddr);
1.1 ragge 674: break;
675: }
676: if ((rp->flags & DBUF_DMCS) == 0)
1.21 chs 677: aprint_error_dev(sc->sc_dev, "unallocated packet 0x%x\n",
1.16 cegger 678: pkaddr);
1.1 ragge 679: /* mark buffer free */
680: if_ubaend(&sc->sc_ifuba, ifxp);
681: rp->flags &= ~DBUF_DMCS;
682: if (--sc->sc_oused == 0)
683: sc->sc_if.if_timer = 0;
684: else
685: sc->sc_if.if_timer = dmc_timeout;
686: if ((sc->sc_flag & DMC_ONLINE) == 0) {
687: extern int ifqmaxlen;
688:
689: /*
690: * We're on the air.
691: * Open the queue to the usual value.
692: */
693: sc->sc_flag |= DMC_ONLINE;
694: ifp->if_snd.ifq_maxlen = ifqmaxlen;
695: }
696: break;
697:
698: case DMC_CNTLO:
699: arg &= DMC_CNTMASK;
700: if (arg & DMC_FATAL) {
701: if (arg != DMC_START) {
1.18 christos 702: snprintb(buf, sizeof(buf), CNTLO_BITS,
703: arg);
1.1 ragge 704: log(LOG_ERR,
705: "%s: fatal error, flags=%s\n",
1.21 chs 706: device_xname(sc->sc_dev), buf);
1.1 ragge 707: }
708: dmcrestart(sc);
709: break;
710: }
711: /* ACCUMULATE STATISTICS */
712: switch(arg) {
713: case DMC_NOBUFS:
714: ifp->if_ierrors++;
715: if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
716: goto report;
717: break;
718: case DMC_DISCONN:
719: if ((sc->sc_disc++ % DMC_RPDSC) == 0)
720: goto report;
721: break;
722: case DMC_TIMEOUT:
723: if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
724: goto report;
725: break;
726: case DMC_DATACK:
727: ifp->if_oerrors++;
728: if ((sc->sc_datck++ % DMC_RPDCK) == 0)
729: goto report;
730: break;
731: default:
732: goto report;
733: }
734: break;
735: report:
736: #ifdef DMCDEBUG
1.18 christos 737: snprintb(buf, sizeof(buf), CNTLO_BITS, arg);
1.1 ragge 738: printd("%s: soft error, flags=%s\n",
1.21 chs 739: device_xname(sc->sc_dev), buf);
1.1 ragge 740: #endif
741: if ((sc->sc_flag & DMC_RESTART) == 0) {
742: /*
743: * kill off the dmc to get things
744: * going again by generating a
745: * procedure error
746: */
747: sc->sc_flag |= DMC_RESTART;
748: arg = sc->sc_ui.ui_baddr;
749: dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
750: }
751: break;
752:
753: default:
1.9 simonb 754: printf("%s: bad control %o\n",
1.21 chs 755: device_xname(sc->sc_dev), cmd);
1.1 ragge 756: break;
757: }
758: }
759: dmcstart(ifp);
760: }
761:
762: /*
763: * DMC output routine.
764: * Encapsulate a packet of type family for the dmc.
765: * Use trailer local net encapsulation if enough data in first
766: * packet leaves a multiple of 512 bytes of data in remainder.
767: */
768: int
769: dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
770: struct rtentry *rt)
771: {
772: int type, error, s;
773: struct mbuf *m = m0;
774: struct dmc_header *dh;
1.3 itojun 775: ALTQ_DECL(struct altq_pktattr pktattr;)
1.1 ragge 776:
777: if ((ifp->if_flags & IFF_UP) == 0) {
778: error = ENETDOWN;
779: goto bad;
780: }
781:
1.3 itojun 782: IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
783:
1.1 ragge 784: switch (dst->sa_family) {
785: #ifdef INET
786: case AF_INET:
787: type = DMC_IPTYPE;
788: break;
789: #endif
790:
791: case AF_UNSPEC:
792: dh = (struct dmc_header *)dst->sa_data;
793: type = dh->dmc_type;
794: break;
795:
796: default:
797: printf("%s: can't handle af%d\n", ifp->if_xname,
798: dst->sa_family);
799: error = EAFNOSUPPORT;
800: goto bad;
801: }
802:
803: /*
804: * Add local network header
805: * (there is space for a uba on a vax to step on)
806: */
807: M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
808: if (m == 0) {
809: error = ENOBUFS;
810: goto bad;
811: }
812: dh = mtod(m, struct dmc_header *);
813: dh->dmc_type = htons((u_short)type);
814:
815: /*
816: * Queue message on interface, and start output if interface
817: * not yet active.
818: */
819: s = splnet();
1.3 itojun 820: IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
821: if (error) {
822: /* mbuf is already freed */
1.1 ragge 823: splx(s);
1.3 itojun 824: return (error);
1.1 ragge 825: }
826: dmcstart(ifp);
827: splx(s);
828: return (0);
829:
830: bad:
831: m_freem(m0);
832: return (error);
833: }
834:
835:
836: /*
837: * Process an ioctl request.
838: */
839: /* ARGSUSED */
840: int
1.14 christos 841: dmcioctl(struct ifnet *ifp, u_long cmd, void *data)
1.1 ragge 842: {
843: int s = splnet(), error = 0;
844: register struct dmc_softc *sc = ifp->if_softc;
845:
846: switch (cmd) {
847:
1.17 dyoung 848: case SIOCINITIFADDR:
1.1 ragge 849: ifp->if_flags |= IFF_UP;
850: if ((ifp->if_flags & IFF_RUNNING) == 0)
1.9 simonb 851: dmcinit(ifp);
1.1 ragge 852: break;
853:
854: case SIOCSIFDSTADDR:
855: if ((ifp->if_flags & IFF_RUNNING) == 0)
1.9 simonb 856: dmcinit(ifp);
1.1 ragge 857: break;
1.9 simonb 858:
1.1 ragge 859: case SIOCSIFFLAGS:
1.17 dyoung 860: if ((error = ifioctl_common(ifp, cmd, data)) != 0)
861: break;
1.1 ragge 862: if ((ifp->if_flags & IFF_UP) == 0 &&
863: sc->sc_flag & DMC_RUNNING)
864: dmcdown(sc);
865: else if (ifp->if_flags & IFF_UP &&
866: (sc->sc_flag & DMC_RUNNING) == 0)
867: dmcrestart(sc);
868: break;
869:
870: default:
1.17 dyoung 871: error = ifioctl_common(ifp, cmd, data);
1.1 ragge 872: }
873: splx(s);
874: return (error);
875: }
876:
877: /*
878: * Restart after a fatal error.
879: * Clear device and reinitialize.
880: */
881: void
882: dmcrestart(struct dmc_softc *sc)
883: {
884: int s, i;
1.9 simonb 885:
1.1 ragge 886: #ifdef DMCDEBUG
887: /* dump base table */
1.21 chs 888: printf("%s base table:\n", device_xname(sc->sc_dev));
1.1 ragge 889: for (i = 0; i < sizeof (struct dmc_base); i++)
890: printf("%o\n" ,dmc_base[unit].d_base[i]);
891: #endif
892:
893: dmcdown(sc);
894:
895: /*
896: * Let the DMR finish the MCLR. At 1 Mbit, it should do so
897: * in about a max of 6.4 milliseconds with diagnostics enabled.
898: */
899: for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
900: ;
901: /* Did the timer expire or did the DMR finish? */
902: if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
1.21 chs 903: log(LOG_ERR, "%s: M820 Test Failed\n", device_xname(sc->sc_dev));
1.1 ragge 904: return;
905: }
906:
907: /* restart DMC */
908: dmcinit(&sc->sc_if);
909: sc->sc_flag &= ~DMC_RESTART;
910: s = splnet();
911: dmcstart(&sc->sc_if);
912: splx(s);
913: sc->sc_if.if_collisions++; /* why not? */
914: }
915:
916: /*
917: * Reset a device and mark down.
918: * Flush output queue and drop queue limit.
919: */
920: void
921: dmcdown(struct dmc_softc *sc)
922: {
923: struct ifxmt *ifxp;
924:
925: DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
926: sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
927:
928: for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
929: #ifdef notyet
930: if (ifxp->ifw_xtofree) {
931: (void) m_freem(ifxp->ifw_xtofree);
932: ifxp->ifw_xtofree = 0;
933: }
934: #endif
935: }
936: IF_PURGE(&sc->sc_if.if_snd);
937: }
938:
939: /*
940: * Watchdog timeout to see that transmitted packets don't
941: * lose interrupts. The device has to be online (the first
942: * transmission may block until the other side comes up).
943: */
944: void
945: dmctimeout(struct ifnet *ifp)
946: {
947: struct dmc_softc *sc = ifp->if_softc;
948: char buf1[64], buf2[64];
949:
950: if (sc->sc_flag & DMC_ONLINE) {
1.18 christos 951: snprintb(buf1, sizeof(buf1), DMC0BITS,
952: DMC_RBYTE(DMC_BSEL0) & 0xff);
953: snprintb(buf2, sizeof(buf2), DMC2BITS,
954: DMC_RBYTE(DMC_BSEL2) & 0xff);
1.1 ragge 955: log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
1.21 chs 956: device_xname(sc->sc_dev), buf1, buf2);
1.1 ragge 957: dmcrestart(sc);
958: }
959: }
CVSweb <webmaster@jp.NetBSD.org>