Annotation of src/sys/net/lagg/if_lagg.c, Revision 1.41
1.41 ! yamaguch 1: /* $NetBSD: if_lagg.c,v 1.40 2022/03/31 03:10:59 yamaguchi Exp $ */
1.3 yamaguch 2:
3: /*
4: * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5: * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6: * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
7: * Copyright (c) 2021, Internet Initiative Japan Inc.
8: *
9: * Permission to use, copy, modify, and distribute this software for any
10: * purpose with or without fee is hereby granted, provided that the above
11: * copyright notice and this permission notice appear in all copies.
12: *
13: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20: */
1.1 yamaguch 21:
1.4 thorpej 22: #include <sys/cdefs.h>
1.41 ! yamaguch 23: __KERNEL_RCSID(0, "$NetBSD: if_lagg.c,v 1.40 2022/03/31 03:10:59 yamaguchi Exp $");
1.4 thorpej 24:
1.1 yamaguch 25: #ifdef _KERNEL_OPT
26: #include "opt_inet.h"
27: #include "opt_lagg.h"
28: #endif
29:
30: #include <sys/param.h>
31: #include <sys/types.h>
32:
33: #include <sys/cprng.h>
1.19 yamaguch 34: #include <sys/cpu.h>
1.1 yamaguch 35: #include <sys/device.h>
36: #include <sys/evcnt.h>
37: #include <sys/hash.h>
38: #include <sys/kmem.h>
39: #include <sys/module.h>
40: #include <sys/pserialize.h>
41: #include <sys/pslist.h>
42: #include <sys/psref.h>
43: #include <sys/sysctl.h>
44: #include <sys/syslog.h>
45: #include <sys/workqueue.h>
46:
47: #include <net/bpf.h>
48: #include <net/if.h>
49: #include <net/if_dl.h>
50: #include <net/if_ether.h>
51: #include <net/if_media.h>
52: #include <net/if_types.h>
53: #include <net/if_vlanvar.h>
54: #include <netinet/ip.h>
55: #include <netinet/ip6.h>
56: #include <netinet/tcp.h>
57: #include <netinet/udp.h>
58:
59: #if defined(INET) || defined(INET6)
60: #include <netinet/in.h>
61: #endif
62:
63: #ifdef INET6
64: #include <netinet6/in6_ifattach.h>
65: #include <netinet6/in6_var.h>
66: #endif
67:
68: #include <net/lagg/if_lagg.h>
69: #include <net/lagg/if_laggvar.h>
70: #include <net/lagg/if_laggproto.h>
71:
72: #include "ioconf.h"
73:
74: enum lagg_portctrl {
75: LAGG_PORTCTRL_ALLOC,
76: LAGG_PORTCTRL_FREE,
77: LAGG_PORTCTRL_START,
78: LAGG_PORTCTRL_STOP
79: };
80:
81: enum lagg_iftypes {
82: LAGG_IF_TYPE_ETHERNET,
83: };
84:
85: static const struct lagg_proto lagg_protos[] = {
86: [LAGG_PROTO_NONE] = {
87: .pr_num = LAGG_PROTO_NONE,
88: .pr_attach = lagg_none_attach,
89: },
90: [LAGG_PROTO_LACP] = {
91: .pr_num = LAGG_PROTO_LACP,
92: .pr_attach = lacp_attach,
93: .pr_detach = lacp_detach,
94: .pr_up = lacp_up,
95: .pr_down = lacp_down,
96: .pr_transmit = lacp_transmit,
97: .pr_input = lacp_input,
98: .pr_allocport = lacp_allocport,
99: .pr_freeport = lacp_freeport,
100: .pr_startport = lacp_startport,
101: .pr_stopport = lacp_stopport,
102: .pr_protostat = lacp_protostat,
103: .pr_portstat = lacp_portstat,
1.30 yamaguch 104: .pr_linkstate = lacp_linkstate_ifnet_locked,
1.1 yamaguch 105: .pr_ioctl = lacp_ioctl,
106: },
107: [LAGG_PROTO_FAILOVER] = {
108: .pr_num = LAGG_PROTO_FAILOVER,
109: .pr_attach = lagg_fail_attach,
110: .pr_detach = lagg_common_detach,
111: .pr_transmit = lagg_fail_transmit,
112: .pr_input = lagg_fail_input,
113: .pr_allocport = lagg_common_allocport,
114: .pr_freeport = lagg_common_freeport,
115: .pr_startport = lagg_common_startport,
116: .pr_stopport = lagg_common_stopport,
117: .pr_portstat = lagg_fail_portstat,
118: .pr_linkstate = lagg_common_linkstate,
119: .pr_ioctl = lagg_fail_ioctl,
120: },
121: [LAGG_PROTO_LOADBALANCE] = {
122: .pr_num = LAGG_PROTO_LOADBALANCE,
123: .pr_attach = lagg_lb_attach,
124: .pr_detach = lagg_common_detach,
125: .pr_transmit = lagg_lb_transmit,
126: .pr_input = lagg_lb_input,
127: .pr_allocport = lagg_common_allocport,
128: .pr_freeport = lagg_common_freeport,
129: .pr_startport = lagg_lb_startport,
130: .pr_stopport = lagg_lb_stopport,
131: .pr_portstat = lagg_lb_portstat,
132: .pr_linkstate = lagg_common_linkstate,
133: },
134: };
135:
1.19 yamaguch 136: static int lagg_chg_sadl(struct ifnet *, uint8_t *, size_t);
1.1 yamaguch 137: static struct mbuf *
138: lagg_input_ethernet(struct ifnet *, struct mbuf *);
139: static int lagg_clone_create(struct if_clone *, int);
140: static int lagg_clone_destroy(struct ifnet *);
141: static int lagg_init(struct ifnet *);
142: static int lagg_init_locked(struct lagg_softc *);
143: static void lagg_stop(struct ifnet *, int);
144: static void lagg_stop_locked(struct lagg_softc *);
145: static int lagg_ioctl(struct ifnet *, u_long, void *);
146: static int lagg_transmit(struct ifnet *, struct mbuf *);
147: static void lagg_start(struct ifnet *);
148: static int lagg_media_change(struct ifnet *);
149: static void lagg_media_status(struct ifnet *, struct ifmediareq *);
150: static int lagg_vlan_cb(struct ethercom *, uint16_t, bool);
1.7 yamaguch 151: static void lagg_linkstate_changed(void *);
1.10 yamaguch 152: static void lagg_ifdetach(void *);
1.1 yamaguch 153: static struct lagg_softc *
154: lagg_softc_alloc(enum lagg_iftypes);
155: static void lagg_softc_free(struct lagg_softc *);
156: static int lagg_setup_sysctls(struct lagg_softc *);
157: static void lagg_teardown_sysctls(struct lagg_softc *);
158: static int lagg_proto_attach(struct lagg_softc *, lagg_proto,
159: struct lagg_proto_softc **);
160: static void lagg_proto_detach(struct lagg_variant *);
161: static int lagg_proto_up(struct lagg_softc *);
162: static void lagg_proto_down(struct lagg_softc *);
163: static int lagg_proto_allocport(struct lagg_softc *, struct lagg_port *);
164: static void lagg_proto_freeport(struct lagg_softc *, struct lagg_port *);
165: static void lagg_proto_startport(struct lagg_softc *,
166: struct lagg_port *);
167: static void lagg_proto_stopport(struct lagg_softc *,
168: struct lagg_port *);
169: static struct mbuf *
170: lagg_proto_input(struct lagg_softc *, struct lagg_port *,
171: struct mbuf *);
172: static void lagg_proto_linkstate(struct lagg_softc *, struct lagg_port *);
173: static int lagg_proto_ioctl(struct lagg_softc *, struct lagg_req *);
174: static int lagg_get_stats(struct lagg_softc *, struct lagg_req *, size_t);
175: static int lagg_pr_attach(struct lagg_softc *, lagg_proto);
176: static void lagg_pr_detach(struct lagg_softc *);
177: static int lagg_addport(struct lagg_softc *, struct ifnet *);
178: static int lagg_delport(struct lagg_softc *, struct ifnet *);
1.10 yamaguch 179: static int lagg_delport_all(struct lagg_softc *);
1.1 yamaguch 180: static int lagg_port_ioctl(struct ifnet *, u_long, void *);
181: static int lagg_port_output(struct ifnet *, struct mbuf *,
182: const struct sockaddr *, const struct rtentry *);
1.40 yamaguch 183: static void lagg_config_promisc(struct lagg_softc *, struct lagg_port *);
1.13 yamaguch 184: static void lagg_unconfig_promisc(struct lagg_softc *, struct lagg_port *);
1.1 yamaguch 185: static struct lagg_variant *
186: lagg_variant_getref(struct lagg_softc *, struct psref *);
187: static void lagg_variant_putref(struct lagg_variant *, struct psref *);
188: static int lagg_ether_addmulti(struct lagg_softc *, struct ifreq *);
189: static int lagg_ether_delmulti(struct lagg_softc *, struct ifreq *);
190: static void lagg_port_syncmulti(struct lagg_softc *, struct lagg_port *);
191: static void lagg_port_purgemulti(struct lagg_softc *, struct lagg_port *);
1.9 yamaguch 192: static int lagg_port_setup(struct lagg_softc *, struct lagg_port *,
193: struct ifnet *);
194: static void lagg_port_teardown(struct lagg_softc *, struct lagg_port *,
195: bool);
1.1 yamaguch 196: static void lagg_port_syncvlan(struct lagg_softc *, struct lagg_port *);
197: static void lagg_port_purgevlan(struct lagg_softc *, struct lagg_port *);
1.9 yamaguch 198: static void lagg_lladdr_update(struct lagg_softc *);
1.12 yamaguch 199: static void lagg_capabilities_update(struct lagg_softc *);
1.22 yamaguch 200: static void lagg_sync_ifcaps(struct lagg_softc *);
201: static void lagg_sync_ethcaps(struct lagg_softc *);
1.1 yamaguch 202:
203: static struct if_clone lagg_cloner =
204: IF_CLONE_INITIALIZER("lagg", lagg_clone_create, lagg_clone_destroy);
205: static unsigned int lagg_count;
206: static struct psref_class
207: *lagg_psref_class __read_mostly;
208: static struct psref_class
209: *lagg_port_psref_class __read_mostly;
210:
211: static enum lagg_iftypes
212: lagg_iftype = LAGG_IF_TYPE_ETHERNET;
213:
214: #ifdef LAGG_DEBUG
215: #define LAGG_DPRINTF(_sc, _fmt, _args...) do { \
216: printf("%s: " _fmt, (_sc) != NULL ? \
1.31 yamaguch 217: (_sc)->sc_if.if_xname : "lagg", ##_args); \
1.1 yamaguch 218: } while (0)
219: #else
220: #define LAGG_DPRINTF(_sc, _fmt, _args...) __nothing
221: #endif
222:
1.12 yamaguch 223: #ifndef LAGG_SETCAPS_RETRY
224: #define LAGG_SETCAPS_RETRY (LAGG_MAX_PORTS * 2)
225: #endif
226:
1.9 yamaguch 227: static size_t
1.1 yamaguch 228: lagg_sizeof_softc(enum lagg_iftypes ift)
229: {
230: struct lagg_softc *_dummy = NULL;
231: size_t s;
232:
233: s = sizeof(*_dummy) - sizeof(_dummy->sc_if);
234:
235: switch (ift) {
236: case LAGG_IF_TYPE_ETHERNET:
237: s += sizeof(struct ethercom);
238: break;
239: default:
240: s += sizeof(struct ifnet);
241: break;
242: }
243:
244: return s;
245: }
246:
1.9 yamaguch 247: static bool
1.1 yamaguch 248: lagg_debug_enable(struct lagg_softc *sc)
249: {
250: if (__predict_false(ISSET(sc->sc_if.if_flags, IFF_DEBUG)))
251: return true;
252:
253: return false;
254: }
255:
1.9 yamaguch 256: static void
1.1 yamaguch 257: lagg_evcnt_attach(struct lagg_softc *sc,
258: struct evcnt *ev, const char *name)
259: {
260:
261: evcnt_attach_dynamic(ev, EVCNT_TYPE_MISC, NULL,
262: sc->sc_evgroup, name);
263: }
264:
1.9 yamaguch 265: static void
1.1 yamaguch 266: lagg_in6_ifattach(struct ifnet *ifp)
267: {
268:
269: #ifdef INET6
270: KERNEL_LOCK_UNLESS_NET_MPSAFE();
271: if (in6_present) {
272: if (ISSET(ifp->if_flags, IFF_UP))
273: in6_ifattach(ifp, NULL);
274: }
275: KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
276: #endif
277: }
278:
1.9 yamaguch 279: static void
1.1 yamaguch 280: lagg_in6_ifdetach(struct ifnet *ifp)
281: {
282:
283: #ifdef INET6
284: KERNEL_LOCK_UNLESS_NET_MPSAFE();
1.38 yamaguch 285: if (in6_present)
1.1 yamaguch 286: in6_ifdetach(ifp);
287: KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
288: #endif
289: }
290:
1.9 yamaguch 291: static int
1.1 yamaguch 292: lagg_lp_ioctl(struct lagg_port *lp, u_long cmd, void *data)
293: {
294: struct ifnet *ifp_port;
295: int error;
296:
297: if (lp->lp_ioctl == NULL)
298: return EINVAL;
299:
300: ifp_port = lp->lp_ifp;
301: IFNET_LOCK(ifp_port);
302: error = lp->lp_ioctl(ifp_port, cmd, data);
303: IFNET_UNLOCK(ifp_port);
304:
305: return error;
306: }
307:
1.9 yamaguch 308: static bool
309: lagg_lladdr_equal(const uint8_t *a, const uint8_t *b)
310: {
311:
312: if (memcmp(a, b, ETHER_ADDR_LEN) == 0)
313: return true;
314:
315: return false;
316: }
317:
318: static void
319: lagg_lladdr_cpy(uint8_t *dst, const uint8_t *src)
320: {
321:
322: memcpy(dst, src, ETHER_ADDR_LEN);
323: }
324:
1.1 yamaguch 325: void
326: laggattach(int n)
327: {
328:
329: /*
330: * Nothing to do here, initialization is handled by the
331: * module initialization code in lagginit() below).
332: */
333: }
334:
335: static void
336: lagginit(void)
337: {
338: size_t i;
339:
340: lagg_psref_class = psref_class_create("laggvariant", IPL_SOFTNET);
341: lagg_port_psref_class = psref_class_create("laggport", IPL_SOFTNET);
342:
343: for (i = 0; i < LAGG_PROTO_MAX; i++) {
344: if (lagg_protos[i].pr_init != NULL)
345: lagg_protos[i].pr_init();
346: }
347:
348: lagg_input_ethernet_p = lagg_input_ethernet;
349: if_clone_attach(&lagg_cloner);
350: }
351:
352: static int
353: laggdetach(void)
354: {
355: size_t i;
356:
357: if (lagg_count > 0)
358: return EBUSY;
359:
360: if_clone_detach(&lagg_cloner);
361: lagg_input_ethernet_p = NULL;
362:
363: for (i = 0; i < LAGG_PROTO_MAX; i++) {
364: if (lagg_protos[i].pr_fini != NULL)
365: lagg_protos[i].pr_fini();
366: }
367:
368: psref_class_destroy(lagg_port_psref_class);
369: psref_class_destroy(lagg_psref_class);
370:
371: return 0;
372: }
373:
374: static int
375: lagg_clone_create(struct if_clone *ifc, int unit)
376: {
377: struct lagg_softc *sc;
378: struct ifnet *ifp;
379: int error;
380:
381: sc = lagg_softc_alloc(lagg_iftype);
382: ifp = &sc->sc_if;
383:
384: mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTNET);
385: sc->sc_psz = pserialize_create();
386: SIMPLEQ_INIT(&sc->sc_ports);
387: LIST_INIT(&sc->sc_mclist);
388: TAILQ_INIT(&sc->sc_vtags);
389: sc->sc_hash_mac = true;
390: sc->sc_hash_ipaddr = true;
391: sc->sc_hash_ip6addr = true;
392: sc->sc_hash_tcp = true;
393: sc->sc_hash_udp = true;
394:
395: if_initname(ifp, ifc->ifc_name, unit);
396: ifp->if_softc = sc;
397: ifp->if_init = lagg_init;
398: ifp->if_stop = lagg_stop;
399: ifp->if_ioctl = lagg_ioctl;
400: ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
401: ifp->if_extflags = IFEF_MPSAFE;
402: ifp->if_transmit = lagg_transmit;
403: ifp->if_start = lagg_start;
1.6 ozaki-r 404: IFQ_SET_READY(&ifp->if_snd);
1.1 yamaguch 405:
406: error = lagg_setup_sysctls(sc);
407: if (error != 0)
408: goto destroy_psz;
409:
410: /*XXX dependent on ethernet */
411: ifmedia_init_with_lock(&sc->sc_media, 0, lagg_media_change,
412: lagg_media_status, &sc->sc_lock);
413: ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
414: ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
415:
1.5 riastrad 416: if_initialize(ifp);
1.1 yamaguch 417:
418: switch (lagg_iftype) {
419: case LAGG_IF_TYPE_ETHERNET:
1.9 yamaguch 420: cprng_fast(sc->sc_lladdr_rand, sizeof(sc->sc_lladdr_rand));
1.14 yamaguch 421: sc->sc_lladdr_rand[0] &= 0xFE; /* clear I/G bit */
422: sc->sc_lladdr_rand[0] |= 0x02; /* set G/L bit */
1.9 yamaguch 423: lagg_lladdr_cpy(sc->sc_lladdr, sc->sc_lladdr_rand);
1.1 yamaguch 424: ether_set_vlan_cb((struct ethercom *)ifp, lagg_vlan_cb);
425: ether_ifattach(ifp, sc->sc_lladdr);
426: break;
427: default:
428: panic("unknown if type");
429: }
430:
431: snprintf(sc->sc_evgroup, sizeof(sc->sc_evgroup),
432: "%s", ifp->if_xname);
433: lagg_evcnt_attach(sc, &sc->sc_novar, "no lagg variant");
434: if_link_state_change(&sc->sc_if, LINK_STATE_DOWN);
435: lagg_setup_sysctls(sc);
436: (void)lagg_pr_attach(sc, LAGG_PROTO_NONE);
437: if_register(ifp);
438: lagg_count++;
439:
440: return 0;
441:
442: destroy_psz:
443: pserialize_destroy(sc->sc_psz);
444: mutex_destroy(&sc->sc_lock);
445: lagg_softc_free(sc);
446:
447: return error;
448: }
449:
450: static int
451: lagg_clone_destroy(struct ifnet *ifp)
452: {
453: struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1.10 yamaguch 454: struct lagg_port *lp;
1.1 yamaguch 455:
456: lagg_stop(ifp, 1);
457:
1.10 yamaguch 458: LAGG_LOCK(sc);
459: while ((lp = LAGG_PORTS_FIRST(sc)) != NULL) {
460: lagg_port_teardown(sc, lp, false);
461: }
462: LAGG_UNLOCK(sc);
1.1 yamaguch 463:
464: switch (ifp->if_type) {
465: case IFT_ETHER:
466: ether_ifdetach(ifp);
467: KASSERT(TAILQ_EMPTY(&sc->sc_vtags));
468: break;
469: }
470:
471: if_detach(ifp);
472: ifmedia_fini(&sc->sc_media);
473: lagg_pr_detach(sc);
474: evcnt_detach(&sc->sc_novar);
475: lagg_teardown_sysctls(sc);
476:
477: pserialize_destroy(sc->sc_psz);
478: mutex_destroy(&sc->sc_lock);
479: lagg_softc_free(sc);
480:
481: if (lagg_count > 0)
482: lagg_count--;
483:
484: return 0;
485: }
486:
487: static int
488: lagg_init(struct ifnet *ifp)
489: {
490: struct lagg_softc *sc;
491: int rv;
492:
493: sc = ifp->if_softc;
494: LAGG_LOCK(sc);
495: rv = lagg_init_locked(sc);
496: LAGG_UNLOCK(sc);
497:
498: return rv;
499: }
500:
501: static int
502: lagg_init_locked(struct lagg_softc *sc)
503: {
504: struct ifnet *ifp = &sc->sc_if;
505: int rv;
506:
507: KASSERT(LAGG_LOCKED(sc));
508:
509: if (ISSET(ifp->if_flags, IFF_RUNNING))
510: lagg_stop_locked(sc);
511:
1.9 yamaguch 512: lagg_lladdr_update(sc);
513:
1.1 yamaguch 514: SET(ifp->if_flags, IFF_RUNNING);
515:
516: rv = lagg_proto_up(sc);
517: if (rv != 0)
518: lagg_stop_locked(sc);
519:
520: return rv;
521: }
522:
523: static void
524: lagg_stop(struct ifnet *ifp, int disable __unused)
525: {
526: struct lagg_softc *sc;
527:
528: sc = ifp->if_softc;
529: LAGG_LOCK(sc);
530: lagg_stop_locked(sc);
531: LAGG_UNLOCK(sc);
532: }
533:
534: static void
535: lagg_stop_locked(struct lagg_softc *sc)
536: {
537: struct ifnet *ifp = &sc->sc_if;
538:
539: KASSERT(LAGG_LOCKED(sc));
540:
541: if (!ISSET(ifp->if_flags, IFF_RUNNING))
542: return;
543:
544: CLR(ifp->if_flags, IFF_RUNNING);
545: lagg_proto_down(sc);
546:
547: }
548:
549: static int
550: lagg_config(struct lagg_softc *sc, struct lagg_req *lrq)
551: {
552: struct ifnet *ifp_port;
553: struct laggreqport *rp;
554: struct lagg_port *lp;
555: struct psref psref;
556: size_t i;
557: int error, bound;
558:
559: error = 0;
560: bound = curlwp_bind();
561:
562: switch (lrq->lrq_ioctl) {
563: case LAGGIOC_SETPROTO:
564: if (lrq->lrq_proto >= LAGG_PROTO_MAX) {
565: error = EPROTONOSUPPORT;
566: break;
567: }
568:
1.10 yamaguch 569: error = lagg_delport_all(sc);
570: if (error != 0)
571: break;
1.1 yamaguch 572: error = lagg_pr_attach(sc, lrq->lrq_proto);
573: if (error != 0)
574: break;
575:
576: for (i = 0; i < lrq->lrq_nports; i++) {
577: rp = &lrq->lrq_reqports[i];
578: ifp_port = if_get(rp->rp_portname, &psref);
579: if (ifp_port == NULL) {
580: error = ENOENT;
581: break; /* break for */
582: }
583:
584: error = lagg_addport(sc, ifp_port);
585: if_put(ifp_port, &psref);
586:
587: if (error != 0)
588: break; /* break for */
589: }
590: break; /* break switch */
591: case LAGGIOC_ADDPORT:
592: rp = &lrq->lrq_reqports[0];
593: ifp_port = if_get(rp->rp_portname, &psref);
594: if (ifp_port == NULL) {
595: error = ENOENT;
596: break;
597: }
598:
599: error = lagg_addport(sc, ifp_port);
600: if_put(ifp_port, &psref);
601: break;
602: case LAGGIOC_DELPORT:
603: rp = &lrq->lrq_reqports[0];
604: ifp_port = if_get(rp->rp_portname, &psref);
605: if (ifp_port == NULL) {
606: error = ENOENT;
607: break;
608: }
609:
610: error = lagg_delport(sc, ifp_port);
611: if_put(ifp_port, &psref);
612: break;
613: case LAGGIOC_SETPORTPRI:
614: rp = &lrq->lrq_reqports[0];
615: ifp_port = if_get(rp->rp_portname, &psref);
616: if (ifp_port == NULL) {
617: error = ENOENT;
618: break;
619: }
620:
621: lp = ifp_port->if_lagg;
622: if (lp == NULL || lp->lp_softc != sc) {
623: if_put(ifp_port, &psref);
624: error = ENOENT;
625: break;
626: }
627:
628: lp->lp_prio = rp->rp_prio;
629:
630: /* restart protocol */
631: LAGG_LOCK(sc);
632: lagg_proto_stopport(sc, lp);
633: lagg_proto_startport(sc, lp);
634: LAGG_UNLOCK(sc);
635: if_put(ifp_port, &psref);
636: break;
637: case LAGGIOC_SETPROTOOPT:
638: error = lagg_proto_ioctl(sc, lrq);
639: break;
640: default:
641: error = ENOTTY;
642: }
643:
644: curlwp_bindx(bound);
645: return error;
646: }
647:
648: static int
649: lagg_ioctl(struct ifnet *ifp, u_long cmd, void *data)
650: {
651: struct lagg_softc *sc;
652: struct ifreq *ifr = (struct ifreq *)data;
653: struct lagg_req laggreq, *laggresp;
654: struct lagg_port *lp;
655: size_t allocsiz, outlen, nports;
656: char *outbuf;
657: void *buf;
658: int error = 0, rv;
659:
660: sc = ifp->if_softc;
661:
662: switch (cmd) {
663: case SIOCGLAGG:
664: error = copyin(ifr->ifr_data, &laggreq, sizeof(laggreq));
665: if (error != 0)
666: break;
667:
668: nports = sc->sc_nports;
669: nports = MIN(nports, laggreq.lrq_nports);
670:
671: allocsiz = sizeof(*laggresp)
672: + sizeof(laggresp->lrq_reqports[0]) * nports;
673: laggresp = kmem_zalloc(allocsiz, KM_SLEEP);
674:
675: rv = lagg_get_stats(sc, laggresp, nports);
676:
677: outbuf = (char *)laggresp;
678:
679: nports = MIN(laggresp->lrq_nports, nports);
680: outlen = sizeof(*laggresp)
681: + sizeof(laggresp->lrq_reqports[0]) * nports;
682:
683: error = copyout(outbuf, ifr->ifr_data, outlen);
684: kmem_free(outbuf, allocsiz);
685:
686: if (error == 0 && rv != 0)
687: error = rv;
688:
689: break;
690: case SIOCSLAGG:
691: error = copyin(ifr->ifr_data, &laggreq, sizeof(laggreq));
692: if (error != 0)
693: break;
694:
695: nports = laggreq.lrq_nports;
696: if (nports > 1) {
697: allocsiz = sizeof(struct lagg_req)
698: + sizeof(struct laggreqport) * nports;
699: buf = kmem_alloc(allocsiz, KM_SLEEP);
700:
701: error = copyin(ifr->ifr_data, buf, allocsiz);
702: if (error != 0) {
703: kmem_free(buf, allocsiz);
704: break;
705: }
706: } else {
707: buf = (void *)&laggreq;
708: allocsiz = 0;
709: }
710:
711: error = lagg_config(sc, buf);
712: if (allocsiz > 0)
713: kmem_free(buf, allocsiz);
714: break;
715: case SIOCSIFFLAGS:
716: error = ifioctl_common(ifp, cmd, data);
717: if (error != 0)
718: break;
719:
720: switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
721: case IFF_RUNNING:
1.27 riastrad 722: if_stop(ifp, 1);
1.1 yamaguch 723: break;
724: case IFF_UP:
725: case IFF_UP | IFF_RUNNING:
1.28 riastrad 726: error = if_init(ifp);
1.1 yamaguch 727: break;
728: }
729:
730: if (error != 0)
731: break;
732:
733: /* Set flags on ports too */
734: LAGG_LOCK(sc);
735: LAGG_PORTS_FOREACH(sc, lp) {
736: (void)lagg_config_promisc(sc, lp);
737: }
738: LAGG_UNLOCK(sc);
739: break;
740: case SIOCSIFMTU:
741: LAGG_LOCK(sc);
742: LAGG_PORTS_FOREACH(sc, lp) {
743: error = lagg_lp_ioctl(lp, cmd, (void *)ifr);
744:
745: if (error != 0) {
746: lagg_log(sc, LOG_ERR,
747: "failed to change MTU to %d on port %s, "
1.2 rillig 748: "reverting all ports to original "
749: "MTU(%" PRIu64 ")\n",
1.1 yamaguch 750: ifr->ifr_mtu, lp->lp_ifp->if_xname,
751: ifp->if_mtu);
752: break;
753: }
754: }
755:
756: if (error == 0) {
757: ifp->if_mtu = ifr->ifr_mtu;
758: } else {
759: /* set every port back to the original MTU */
760: ifr->ifr_mtu = ifp->if_mtu;
761: LAGG_PORTS_FOREACH(sc, lp) {
1.38 yamaguch 762: if (lp->lp_ioctl != NULL)
1.1 yamaguch 763: lagg_lp_ioctl(lp, cmd, (void *)ifr);
764: }
765: }
766: LAGG_UNLOCK(sc);
767: break;
768: case SIOCADDMULTI:
769: if (sc->sc_if.if_type == IFT_ETHER) {
770: error = lagg_ether_addmulti(sc, ifr);
771: } else {
772: error = EPROTONOSUPPORT;
773: }
774: break;
775: case SIOCDELMULTI:
776: if (sc->sc_if.if_type == IFT_ETHER) {
777: error = lagg_ether_delmulti(sc, ifr);
778: } else {
779: error = EPROTONOSUPPORT;
780: }
781: break;
1.22 yamaguch 782: case SIOCSIFCAP:
783: error = ether_ioctl(ifp, cmd, data);
784: if (error == 0)
785: lagg_sync_ifcaps(sc);
786: break;
787: case SIOCSETHERCAP:
788: error = ether_ioctl(ifp, cmd, data);
789: if (error == 0)
790: lagg_sync_ethcaps(sc);
791: break;
1.1 yamaguch 792: default:
793: error = ether_ioctl(ifp, cmd, data);
794: }
795: return error;
796: }
797:
798: static int
799: lagg_setup_sysctls(struct lagg_softc *sc)
800: {
801: struct sysctllog **log;
802: const struct sysctlnode **rnode, *hashnode;
803: const char *ifname;
804: int error;
805:
806: log = &sc->sc_sysctllog;
807: rnode = &sc->sc_sysctlnode;
808: ifname = sc->sc_if.if_xname;
809:
810: error = sysctl_createv(log, 0, NULL, rnode,
811: CTLFLAG_PERMANENT, CTLTYPE_NODE, ifname,
812: SYSCTL_DESCR("lagg information and settings"),
813: NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL);
814: if (error != 0)
815: goto done;
816:
817: error = sysctl_createv(log, 0, rnode, &hashnode,
818: CTLFLAG_PERMANENT, CTLTYPE_NODE, "hash",
819: SYSCTL_DESCR("hash calculation settings"),
820: NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
821: if (error != 0)
822: goto done;
823:
824: error = sysctl_createv(log, 0, &hashnode, NULL,
825: CTLFLAG_READWRITE, CTLTYPE_BOOL, "macaddr",
826: SYSCTL_DESCR("use src/dst mac addresses"),
827: NULL, 0, &sc->sc_hash_mac, 0, CTL_CREATE, CTL_EOL);
828: if (error != 0)
829: goto done;
830:
831: error = sysctl_createv(log, 0, &hashnode, NULL,
832: CTLFLAG_READWRITE, CTLTYPE_BOOL, "ipaddr",
833: SYSCTL_DESCR("use src/dst IPv4 addresses"),
834: NULL, 0, &sc->sc_hash_ipaddr, 0, CTL_CREATE, CTL_EOL);
835: if (error != 0)
836: goto done;
837:
838: error = sysctl_createv(log, 0, &hashnode, NULL,
839: CTLFLAG_READWRITE, CTLTYPE_BOOL, "ip6addr",
840: SYSCTL_DESCR("use src/dst IPv6 addresses"),
841: NULL, 0, &sc->sc_hash_ip6addr, 0, CTL_CREATE, CTL_EOL);
842: if (error != 0)
843: goto done;
844:
845: error = sysctl_createv(log, 0, &hashnode, NULL,
846: CTLFLAG_READWRITE, CTLTYPE_BOOL, "tcp",
847: SYSCTL_DESCR("use TCP src/dst port"),
848: NULL, 0, &sc->sc_hash_tcp, 0, CTL_CREATE, CTL_EOL);
849: if (error != 0)
850: goto done;
851:
852: error = sysctl_createv(log, 0, &hashnode, NULL,
853: CTLFLAG_READWRITE, CTLTYPE_BOOL, "udp",
854: SYSCTL_DESCR("use UDP src/dst port"),
855: NULL, 0, &sc->sc_hash_udp, 0, CTL_CREATE, CTL_EOL);
856: done:
857: if (error != 0) {
858: lagg_log(sc, LOG_ERR, "unable to create sysctl node\n");
859: sysctl_teardown(log);
860: }
861:
862: return error;
863: }
864:
865: static void
866: lagg_teardown_sysctls(struct lagg_softc *sc)
867: {
868:
869: sc->sc_sysctlnode = NULL;
870: sysctl_teardown(&sc->sc_sysctllog);
871: }
872:
873: uint32_t
874: lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m)
875: {
876: union {
877: struct ether_header _eh;
878: struct ether_vlan_header _evl;
879: struct ip _ip;
880: struct ip6_hdr _ip6;
881: struct tcphdr _th;
882: struct udphdr _uh;
883: } buf;
884: const struct ether_header *eh;
885: const struct ether_vlan_header *evl;
886: const struct ip *ip;
887: const struct ip6_hdr *ip6;
888: const struct tcphdr *th;
889: const struct udphdr *uh;
1.17 yamaguch 890: uint32_t hash, hash_src, hash_dst;
1.1 yamaguch 891: uint32_t flowlabel;
892: uint16_t etype, vlantag;
893: uint8_t proto;
894: size_t off;
895:
896: KASSERT(ISSET(m->m_flags, M_PKTHDR));
897:
1.17 yamaguch 898: hash = HASH32_BUF_INIT;
899: hash_src = HASH32_BUF_INIT;
900: hash_dst = HASH32_BUF_INIT;
901:
902: #define LAGG_HASH_ADD(hp, v) do { \
903: *(hp) = hash32_buf(&(v), sizeof(v), *(hp)); \
904: } while(0)
905:
1.1 yamaguch 906: eh = lagg_m_extract(m, 0, sizeof(*eh), &buf);
1.38 yamaguch 907: if (eh == NULL)
1.17 yamaguch 908: goto out;
1.38 yamaguch 909:
1.1 yamaguch 910: off = ETHER_HDR_LEN;
911: etype = ntohs(eh->ether_type);
912:
913: if (etype == ETHERTYPE_VLAN) {
914: evl = lagg_m_extract(m, 0, sizeof(*evl), &buf);
1.38 yamaguch 915: if (evl == NULL)
1.17 yamaguch 916: goto out;
1.1 yamaguch 917:
918: vlantag = ntohs(evl->evl_tag);
919: etype = ntohs(evl->evl_proto);
920: off += ETHER_VLAN_ENCAP_LEN;
921: } else if (vlan_has_tag(m)) {
922: vlantag = vlan_get_tag(m);
923: } else {
924: vlantag = 0;
925: }
926:
927: if (sc->sc_hash_mac) {
1.17 yamaguch 928: LAGG_HASH_ADD(&hash_dst, eh->ether_dhost);
929: LAGG_HASH_ADD(&hash_src, eh->ether_shost);
930: LAGG_HASH_ADD(&hash, vlantag);
1.1 yamaguch 931: }
932:
933: switch (etype) {
934: case ETHERTYPE_IP:
935: ip = lagg_m_extract(m, off, sizeof(*ip), &buf);
1.38 yamaguch 936: if (ip == NULL)
1.17 yamaguch 937: goto out;
1.1 yamaguch 938:
939: if (sc->sc_hash_ipaddr) {
1.17 yamaguch 940: LAGG_HASH_ADD(&hash_src, ip->ip_src);
941: LAGG_HASH_ADD(&hash_dst, ip->ip_dst);
942: LAGG_HASH_ADD(&hash, ip->ip_p);
1.1 yamaguch 943: }
944: off += ip->ip_hl << 2;
945: proto = ip->ip_p;
946: break;
947: case ETHERTYPE_IPV6:
948: ip6 = lagg_m_extract(m, off, sizeof(*ip6), &buf);
1.38 yamaguch 949: if (ip6 == NULL)
1.17 yamaguch 950: goto out;
1.1 yamaguch 951:
952: if (sc->sc_hash_ip6addr) {
1.17 yamaguch 953: LAGG_HASH_ADD(&hash_src, ip6->ip6_src);
954: LAGG_HASH_ADD(&hash_dst, ip6->ip6_dst);
1.1 yamaguch 955: flowlabel = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1.17 yamaguch 956: LAGG_HASH_ADD(&hash, flowlabel);
1.1 yamaguch 957: }
958: proto = ip6->ip6_nxt;
959: off += sizeof(*ip6);
1.17 yamaguch 960: break;
1.1 yamaguch 961:
962: default:
963: return hash;
964: }
965:
966: switch (proto) {
967: case IPPROTO_TCP:
968: th = lagg_m_extract(m, off, sizeof(*th), &buf);
1.38 yamaguch 969: if (th == NULL)
1.17 yamaguch 970: goto out;
1.1 yamaguch 971:
972: if (sc->sc_hash_tcp) {
1.17 yamaguch 973: LAGG_HASH_ADD(&hash_src, th->th_sport);
974: LAGG_HASH_ADD(&hash_dst, th->th_dport);
1.1 yamaguch 975: }
976: break;
977: case IPPROTO_UDP:
978: uh = lagg_m_extract(m, off, sizeof(*uh), &buf);
1.38 yamaguch 979: if (uh == NULL)
1.17 yamaguch 980: goto out;
1.1 yamaguch 981:
982: if (sc->sc_hash_udp) {
1.17 yamaguch 983: LAGG_HASH_ADD(&hash_src, uh->uh_sport);
984: LAGG_HASH_ADD(&hash_dst, uh->uh_dport);
1.1 yamaguch 985: }
986: break;
987: }
988:
1.17 yamaguch 989: out:
990: hash_src ^= hash_dst;
991: LAGG_HASH_ADD(&hash, hash_src);
992: #undef LAGG_HASH_ADD
993:
1.1 yamaguch 994: return hash;
995: }
996:
997: static int
998: lagg_tx_common(struct ifnet *ifp, struct mbuf *m)
999: {
1000: struct lagg_variant *var;
1001: lagg_proto pr;
1002: struct psref psref;
1003: int error;
1004:
1005: var = lagg_variant_getref(ifp->if_softc, &psref);
1006:
1007: if (__predict_false(var == NULL)) {
1008: m_freem(m);
1009: if_statinc(ifp, if_oerrors);
1010: return ENOENT;
1011: }
1012:
1013: pr = var->lv_proto;
1014: if (__predict_true(lagg_protos[pr].pr_transmit != NULL)) {
1015: error = lagg_protos[pr].pr_transmit(var->lv_psc, m);
1016: /* mbuf is already freed */
1017: } else {
1018: m_freem(m);
1019: if_statinc(ifp, if_oerrors);
1020: error = ENOBUFS;
1021: }
1022:
1023: lagg_variant_putref(var, &psref);
1024:
1025: return error;
1026: }
1027:
1028: static int
1029: lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1030: {
1031:
1032: return lagg_tx_common(ifp, m);
1033: }
1034:
1035: static void
1036: lagg_start(struct ifnet *ifp)
1037: {
1038: struct mbuf *m;
1039:
1040: for (;;) {
1041: IFQ_DEQUEUE(&ifp->if_snd, m);
1042: if (m == NULL)
1043: break;
1044:
1045: (void)lagg_tx_common(ifp, m);
1046: }
1047: }
1048:
1049: void
1050: lagg_enqueue(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1051: {
1052: struct ifnet *ifp;
1053: int len, error;
1054: short mflags;
1055:
1056: ifp = &sc->sc_if;
1057: len = m->m_pkthdr.len;
1058: mflags = m->m_flags;
1059:
1060: error = lagg_port_xmit(lp, m);
1061: if (error) {
1062: /* mbuf is already freed */
1063: if_statinc(ifp, if_oerrors);
1064: }
1065:
1066: net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1067: if_statinc_ref(nsr, if_opackets);
1068: if_statadd_ref(nsr, if_obytes, len);
1069: if (mflags & M_MCAST)
1070: if_statinc_ref(nsr, if_omcasts);
1071: IF_STAT_PUTREF(ifp);
1072: }
1073:
1074: static struct mbuf *
1075: lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1076: {
1077: struct psref psref;
1078: struct lagg_variant *var;
1079: lagg_proto pr;
1080:
1081: var = lagg_variant_getref(sc, &psref);
1082:
1083: if (var == NULL) {
1084: sc->sc_novar.ev_count++;
1085: m_freem(m);
1086: return NULL;
1087: }
1088:
1089: pr = var->lv_proto;
1090:
1091: if (lagg_protos[pr].pr_input != NULL) {
1092: m = lagg_protos[pr].pr_input(var->lv_psc, lp, m);
1093: } else {
1094: m_freem(m);
1095: m = NULL;
1096: }
1097:
1098: lagg_variant_putref(var, &psref);
1099:
1100: return m;
1101: }
1102:
1103: static struct mbuf *
1104: lagg_input_ethernet(struct ifnet *ifp_port, struct mbuf *m)
1105: {
1106: struct ifnet *ifp;
1107: struct psref psref;
1108: struct lagg_port *lp;
1109: int s;
1110:
1111: /* sanity check */
1112: s = pserialize_read_enter();
1113: lp = atomic_load_consume(&ifp_port->if_lagg);
1114: if (lp == NULL) {
1115: /* This interface is not a member of lagg */
1116: pserialize_read_exit(s);
1117: return m;
1118: }
1119: lagg_port_getref(lp, &psref);
1120: pserialize_read_exit(s);
1121:
1.11 yamaguch 1122: ifp = &lp->lp_softc->sc_if;
1123:
1124: /*
1125: * Drop promiscuously received packets
1126: * if we are not in promiscuous mode.
1127: */
1128: if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 &&
1129: (ifp_port->if_flags & IFF_PROMISC) != 0 &&
1130: (ifp->if_flags & IFF_PROMISC) == 0) {
1131: struct ether_header *eh;
1132:
1133: eh = mtod(m, struct ether_header *);
1134: if (memcmp(CLLADDR(ifp->if_sadl),
1135: eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
1136: m_freem(m);
1137: m = NULL;
1138: if_statinc(ifp, if_ierrors);
1139: goto out;
1140: }
1141: }
1142:
1.1 yamaguch 1143: if (pfil_run_hooks(ifp_port->if_pfil, &m,
1.38 yamaguch 1144: ifp_port, PFIL_IN) != 0)
1.1 yamaguch 1145: goto out;
1146:
1147: m = lagg_proto_input(lp->lp_softc, lp, m);
1148: if (m != NULL) {
1149: m_set_rcvif(m, ifp);
1.15 yamaguch 1150: m->m_flags &= ~M_PROMISC;
1.1 yamaguch 1151: if_input(ifp, m);
1152: m = NULL;
1153: }
1154:
1155: out:
1156: lagg_port_putref(lp, &psref);
1157:
1158: return m;
1159: }
1160:
1161: static int
1162: lagg_media_change(struct ifnet *ifp)
1163: {
1164:
1165: if (ISSET(ifp->if_flags, IFF_DEBUG))
1166: printf("%s: ignore media change\n", ifp->if_xname);
1167:
1168: return 0;
1169: }
1170:
1171: static void
1172: lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1173: {
1174: struct lagg_softc *sc;
1175: struct lagg_port *lp;
1176:
1177: sc = ifp->if_softc;
1178:
1179: imr->ifm_status = IFM_AVALID;
1180: imr->ifm_active = IFM_ETHER | IFM_AUTO;
1181:
1182: LAGG_LOCK(sc);
1183: LAGG_PORTS_FOREACH(sc, lp) {
1184: if (lagg_portactive(lp))
1185: imr->ifm_status |= IFM_ACTIVE;
1186: }
1187: LAGG_UNLOCK(sc);
1188: }
1189:
1190: static int
1191: lagg_port_vlan_cb(struct lagg_port *lp,
1192: struct lagg_vlantag *lvt, bool set)
1193: {
1194: struct ifnet *ifp_port;
1195: int error;
1196:
1.23 yamaguch 1197: if (lp->lp_iftype != IFT_ETHER)
1.1 yamaguch 1198: return 0;
1199:
1200: error = 0;
1201: ifp_port = lp->lp_ifp;
1202:
1203: if (set) {
1.26 yamaguch 1204: error = ether_add_vlantag(ifp_port,
1205: lvt->lvt_vtag, NULL);
1.1 yamaguch 1206: } else {
1.26 yamaguch 1207: error = ether_del_vlantag(ifp_port,
1208: lvt->lvt_vtag);
1.1 yamaguch 1209: }
1210:
1211: return error;
1212: }
1213:
1214: static int
1215: lagg_vlan_cb(struct ethercom *ec, uint16_t vtag, bool set)
1216: {
1217: struct ifnet *ifp;
1218: struct lagg_softc *sc;
1219: struct lagg_vlantag *lvt, *lvt0;
1220: struct lagg_port *lp;
1221: int error;
1222:
1223: ifp = (struct ifnet *)ec;
1224: sc = ifp->if_softc;
1225:
1226: if (set) {
1227: lvt = kmem_zalloc(sizeof(*lvt), KM_SLEEP);
1228: lvt->lvt_vtag = vtag;
1229: TAILQ_INSERT_TAIL(&sc->sc_vtags, lvt, lvt_entry);
1230: } else {
1231: TAILQ_FOREACH_SAFE(lvt, &sc->sc_vtags, lvt_entry, lvt0) {
1232: if (lvt->lvt_vtag == vtag) {
1233: TAILQ_REMOVE(&sc->sc_vtags, lvt, lvt_entry);
1234: break;
1235: }
1236: }
1237:
1238: if (lvt == NULL)
1239: return ENOENT;
1240: }
1241:
1242: KASSERT(lvt != NULL);
1243: LAGG_PORTS_FOREACH(sc, lp) {
1244: error = lagg_port_vlan_cb(lp, lvt, set);
1245: if (error != 0) {
1246: lagg_log(sc, LOG_WARNING,
1247: "%s failed to configure vlan on %d\n",
1248: lp->lp_ifp->if_xname, error);
1249: }
1250: }
1251:
1252: return 0;
1253: }
1254:
1255: static struct lagg_softc *
1256: lagg_softc_alloc(enum lagg_iftypes ift)
1257: {
1258: struct lagg_softc *sc;
1259: size_t s;
1260:
1261: s = lagg_sizeof_softc(ift);
1262: KASSERT(s > 0);
1263:
1264: sc = kmem_zalloc(s, KM_SLEEP);
1265: KASSERT(sc != NULL);
1266:
1267: return sc;
1268: }
1269:
1270: static void
1271: lagg_softc_free(struct lagg_softc *sc)
1272: {
1273:
1274: kmem_free(sc,
1275: lagg_sizeof_softc(sc->sc_iftype));
1276: }
1277:
1278: static void
1279: lagg_variant_update(struct lagg_softc *sc, struct lagg_variant *newvar)
1280: {
1281: struct lagg_variant *oldvar;
1282:
1283: KASSERT(LAGG_LOCKED(sc));
1284:
1285: psref_target_init(&newvar->lv_psref, lagg_psref_class);
1286:
1287: oldvar = sc->sc_var;
1288: atomic_store_release(&sc->sc_var, newvar);
1289: pserialize_perform(sc->sc_psz);
1290:
1291: if (__predict_true(oldvar != NULL))
1292: psref_target_destroy(&oldvar->lv_psref, lagg_psref_class);
1293: }
1294:
1295: static struct lagg_variant *
1296: lagg_variant_getref(struct lagg_softc *sc, struct psref *psref)
1297: {
1298: struct lagg_variant *var;
1299: int s;
1300:
1301: s = pserialize_read_enter();
1302: var = atomic_load_consume(&sc->sc_var);
1303: if (var == NULL) {
1304: pserialize_read_exit(s);
1305: return NULL;
1306: }
1307:
1308: psref_acquire(psref, &var->lv_psref, lagg_psref_class);
1309: pserialize_read_exit(s);
1310:
1311: return var;
1312: }
1313:
1314: static void
1315: lagg_variant_putref(struct lagg_variant *var, struct psref *psref)
1316: {
1317:
1318: if (__predict_false(var == NULL))
1319: return;
1320: psref_release(psref, &var->lv_psref, lagg_psref_class);
1321: }
1322:
1323: static int
1324: lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr,
1325: struct lagg_proto_softc **psc)
1326: {
1327:
1328: KASSERT(lagg_protos[pr].pr_attach != NULL);
1329: return lagg_protos[pr].pr_attach(sc, psc);
1330: }
1331:
1332: static void
1333: lagg_proto_detach(struct lagg_variant *oldvar)
1334: {
1335: lagg_proto pr;
1336:
1337: pr = oldvar->lv_proto;
1338:
1339: if (lagg_protos[pr].pr_detach == NULL)
1340: return;
1341:
1342: lagg_protos[pr].pr_detach(oldvar->lv_psc);
1343: }
1344:
1345: static int
1346: lagg_proto_updown(struct lagg_softc *sc, bool is_up)
1347: {
1348: struct lagg_variant *var;
1349: struct psref psref;
1350: lagg_proto pr;
1351: int error, bound;
1352:
1353: error = 0;
1354: bound = curlwp_bind();
1355:
1356: var = lagg_variant_getref(sc, &psref);
1357: if (var == NULL) {
1358: curlwp_bindx(bound);
1359: return ENXIO;
1360: }
1361:
1362: pr = var->lv_proto;
1363:
1.32 yamaguch 1364: if (is_up && lagg_protos[pr].pr_up != NULL) {
1365: error = lagg_protos[pr].pr_up(var->lv_psc);
1366: } else if (!is_up && lagg_protos[pr].pr_down != NULL) {
1367: lagg_protos[pr].pr_down(var->lv_psc);
1.1 yamaguch 1368: }
1369:
1370: lagg_variant_putref(var, &psref);
1371: curlwp_bindx(bound);
1372:
1373: return error;
1374: }
1375:
1376: static int
1377: lagg_proto_up(struct lagg_softc *sc)
1378: {
1379:
1380: return lagg_proto_updown(sc, true);
1381: }
1382:
1383: static void
1384: lagg_proto_down(struct lagg_softc *sc)
1385: {
1386:
1387: (void)lagg_proto_updown(sc, false);
1388: }
1389:
1390: static int
1391: lagg_proto_portctrl(struct lagg_softc *sc, struct lagg_port *lp,
1392: enum lagg_portctrl ctrl)
1393: {
1394: struct lagg_variant *var;
1395: struct psref psref;
1396: lagg_proto pr;
1397: int error, bound;
1398:
1399: error = 0;
1400: bound = curlwp_bind();
1401:
1402: var = lagg_variant_getref(sc, &psref);
1403: if (var == NULL) {
1404: curlwp_bindx(bound);
1405: return ENXIO;
1406: }
1407:
1408: pr = var->lv_proto;
1409:
1410: switch (ctrl) {
1411: case LAGG_PORTCTRL_ALLOC:
1.32 yamaguch 1412: if (lagg_protos[pr].pr_allocport == NULL) {
1413: goto nosupport;
1414: }
1415: error = lagg_protos[pr].pr_allocport(var->lv_psc, lp);
1.1 yamaguch 1416: break;
1417: case LAGG_PORTCTRL_FREE:
1.32 yamaguch 1418: if (lagg_protos[pr].pr_freeport == NULL) {
1419: goto nosupport;
1.1 yamaguch 1420: }
1.32 yamaguch 1421: lagg_protos[pr].pr_freeport(var->lv_psc, lp);
1.1 yamaguch 1422: break;
1423: case LAGG_PORTCTRL_START:
1.32 yamaguch 1424: if (lagg_protos[pr].pr_startport == NULL) {
1425: goto nosupport;
1.1 yamaguch 1426: }
1.32 yamaguch 1427: lagg_protos[pr].pr_startport(var->lv_psc, lp);
1.1 yamaguch 1428: break;
1429: case LAGG_PORTCTRL_STOP:
1.32 yamaguch 1430: if (lagg_protos[pr].pr_stopport == NULL) {
1431: goto nosupport;
1.1 yamaguch 1432: }
1.32 yamaguch 1433: lagg_protos[pr].pr_stopport(var->lv_psc, lp);
1.1 yamaguch 1434: break;
1.32 yamaguch 1435: default:
1436: goto nosupport;
1.1 yamaguch 1437: }
1438:
1439: lagg_variant_putref(var, &psref);
1440: curlwp_bindx(bound);
1441: return error;
1.32 yamaguch 1442:
1443: nosupport:
1444: lagg_variant_putref(var, &psref);
1445: curlwp_bindx(bound);
1446: return EPROTONOSUPPORT;
1.1 yamaguch 1447: }
1448:
1449: static int
1450: lagg_proto_allocport(struct lagg_softc *sc, struct lagg_port *lp)
1451: {
1452:
1453: return lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_ALLOC);
1454: }
1455:
1456: static void
1457: lagg_proto_freeport(struct lagg_softc *sc, struct lagg_port *lp)
1458: {
1459:
1460: lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_FREE);
1461: }
1462:
1463: static void
1464: lagg_proto_startport(struct lagg_softc *sc, struct lagg_port *lp)
1465: {
1466:
1467: lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_START);
1468: }
1469:
1470: static void
1471: lagg_proto_stopport(struct lagg_softc *sc, struct lagg_port *lp)
1472: {
1473:
1474: lagg_proto_portctrl(sc, lp, LAGG_PORTCTRL_STOP);
1475: }
1476:
1477: static void
1478: lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
1479: {
1480: struct lagg_variant *var;
1481: struct psref psref;
1482: lagg_proto pr;
1483: int bound;
1484:
1.30 yamaguch 1485: KASSERT(IFNET_LOCKED(lp->lp_ifp));
1486:
1.1 yamaguch 1487: bound = curlwp_bind();
1488: var = lagg_variant_getref(sc, &psref);
1489:
1490: if (var == NULL) {
1491: curlwp_bindx(bound);
1492: return;
1493: }
1494:
1495: pr = var->lv_proto;
1496:
1497: if (lagg_protos[pr].pr_linkstate)
1498: lagg_protos[pr].pr_linkstate(var->lv_psc, lp);
1499:
1500: lagg_variant_putref(var, &psref);
1501: curlwp_bindx(bound);
1502: }
1503:
1504: static void
1505: lagg_proto_stat(struct lagg_variant *var, struct laggreqproto *resp)
1506: {
1507: lagg_proto pr;
1508:
1509: pr = var->lv_proto;
1510:
1511: if (lagg_protos[pr].pr_protostat != NULL)
1512: lagg_protos[pr].pr_protostat(var->lv_psc, resp);
1513: }
1514:
1515: static void
1516: lagg_proto_portstat(struct lagg_variant *var, struct lagg_port *lp,
1517: struct laggreqport *resp)
1518: {
1519: lagg_proto pr;
1520:
1521: pr = var->lv_proto;
1522:
1523: if (lagg_protos[pr].pr_portstat != NULL)
1524: lagg_protos[pr].pr_portstat(var->lv_psc, lp, resp);
1525: }
1526:
1527: static int
1528: lagg_proto_ioctl(struct lagg_softc *sc, struct lagg_req *lreq)
1529: {
1530: struct lagg_variant *var;
1531: struct psref psref;
1532: lagg_proto pr;
1533: int bound, error;
1534:
1535: error = ENOTTY;
1536: bound = curlwp_bind();
1537: var = lagg_variant_getref(sc, &psref);
1538:
1539: if (var == NULL) {
1540: error = ENXIO;
1541: goto done;
1542: }
1543:
1544: pr = var->lv_proto;
1545: if (pr != lreq->lrq_proto) {
1546: error = EBUSY;
1547: goto done;
1548: }
1549:
1550: if (lagg_protos[pr].pr_ioctl != NULL) {
1551: error = lagg_protos[pr].pr_ioctl(var->lv_psc,
1552: &lreq->lrq_reqproto);
1553: }
1554:
1555: done:
1556: if (var != NULL)
1557: lagg_variant_putref(var, &psref);
1558: curlwp_bindx(bound);
1559: return error;
1560: }
1561:
1562: static int
1563: lagg_pr_attach(struct lagg_softc *sc, lagg_proto pr)
1564: {
1565: struct lagg_variant *newvar, *oldvar;
1566: struct lagg_proto_softc *psc;
1567: bool cleanup_oldvar;
1568: int error;
1569:
1570: error = 0;
1571: cleanup_oldvar = false;
1572: newvar = kmem_alloc(sizeof(*newvar), KM_SLEEP);
1573:
1574: LAGG_LOCK(sc);
1575: oldvar = sc->sc_var;
1576:
1577: if (oldvar != NULL && oldvar->lv_proto == pr) {
1578: error = 0;
1579: goto done;
1580: }
1581:
1582: error = lagg_proto_attach(sc, pr, &psc);
1583: if (error != 0)
1584: goto done;
1585:
1586: newvar->lv_proto = pr;
1587: newvar->lv_psc = psc;
1588:
1589: lagg_variant_update(sc, newvar);
1590: newvar = NULL;
1591:
1592: if (oldvar != NULL) {
1593: lagg_proto_detach(oldvar);
1594: cleanup_oldvar = true;
1595: }
1596: done:
1597: LAGG_UNLOCK(sc);
1598:
1599: if (newvar != NULL)
1600: kmem_free(newvar, sizeof(*newvar));
1601: if (cleanup_oldvar)
1602: kmem_free(oldvar, sizeof(*oldvar));
1603:
1604: return error;
1605: }
1606:
1607: static void
1608: lagg_pr_detach(struct lagg_softc *sc)
1609: {
1610: struct lagg_variant *var;
1611:
1612: LAGG_LOCK(sc);
1613:
1614: var = sc->sc_var;
1615: atomic_store_release(&sc->sc_var, NULL);
1616: pserialize_perform(sc->sc_psz);
1617:
1618: if (var != NULL)
1619: lagg_proto_detach(var);
1620:
1621: LAGG_UNLOCK(sc);
1622:
1623: if (var != NULL)
1624: kmem_free(var, sizeof(*var));
1625: }
1626:
1627: static int
1628: lagg_ether_addmulti(struct lagg_softc *sc, struct ifreq *ifr)
1629: {
1630: struct lagg_port *lp;
1631: struct lagg_mc_entry *mc;
1632: struct ethercom *ec;
1633: const struct sockaddr *sa;
1634: uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1635: int error;
1636:
1637: if (sc->sc_if.if_type != IFT_ETHER)
1638: return EPROTONOSUPPORT;
1639:
1640: ec = (struct ethercom *)&sc->sc_if;
1641: sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1642:
1643: error = ether_addmulti(sa, ec);
1644: if (error != ENETRESET)
1645: return error;
1646:
1647: error = ether_multiaddr(sa, addrlo, addrhi);
1648: KASSERT(error == 0);
1649:
1650: mc = kmem_zalloc(sizeof(*mc), KM_SLEEP);
1651:
1652: ETHER_LOCK(ec);
1653: mc->mc_enm = ether_lookup_multi(addrlo, addrhi, ec);
1654: ETHER_UNLOCK(ec);
1655:
1656: KASSERT(mc->mc_enm != NULL);
1657:
1658: LAGG_LOCK(sc);
1659: LAGG_PORTS_FOREACH(sc, lp) {
1660: (void)lagg_lp_ioctl(lp, SIOCADDMULTI, (void *)ifr);
1661: }
1662: LAGG_UNLOCK(sc);
1663:
1664: KASSERT(sa->sa_len <= sizeof(mc->mc_addr));
1665: memcpy(&mc->mc_addr, sa, sa->sa_len);
1666: LIST_INSERT_HEAD(&sc->sc_mclist, mc, mc_entry);
1667:
1668: return 0;
1669: }
1670:
1671: static int
1672: lagg_ether_delmulti(struct lagg_softc *sc, struct ifreq *ifr)
1673: {
1674: struct lagg_port *lp;
1675: struct lagg_mc_entry *mc;
1676: const struct sockaddr *sa;
1677: struct ethercom *ec;
1678: struct ether_multi *enm;
1679: uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1680: int error;
1681:
1682: ec = (struct ethercom *)&sc->sc_if;
1683: sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1684: error = ether_multiaddr(sa, addrlo, addrhi);
1685: if (error != 0)
1686: return error;
1687:
1688: ETHER_LOCK(ec);
1689: enm = ether_lookup_multi(addrlo, addrhi, ec);
1690: ETHER_UNLOCK(ec);
1691:
1692: if (enm == NULL)
1693: return ENOENT;
1694:
1695: LIST_FOREACH(mc, &sc->sc_mclist, mc_entry) {
1696: if (mc->mc_enm == enm)
1697: break;
1698: }
1699:
1700: if (mc == NULL)
1701: return ENOENT;
1702:
1703: error = ether_delmulti(sa, ec);
1704: if (error != ENETRESET)
1705: return error;
1706:
1707: LAGG_LOCK(sc);
1708: LAGG_PORTS_FOREACH(sc, lp) {
1709: (void)lagg_lp_ioctl(lp, SIOCDELMULTI, (void *)ifr);
1710: }
1711: LAGG_UNLOCK(sc);
1712:
1713: LIST_REMOVE(mc, mc_entry);
1714: kmem_free(mc, sizeof(*mc));
1715:
1716: return 0;
1717: }
1718:
1719: static void
1720: lagg_port_multi(struct lagg_softc *sc, struct lagg_port *lp,
1721: u_long cmd)
1722: {
1723: struct lagg_mc_entry *mc;
1724: struct ifreq ifr;
1725: struct ifnet *ifp_port;
1726: const struct sockaddr *sa;
1727:
1728: ifp_port = lp->lp_ifp;
1729:
1730: memset(&ifr, 0, sizeof(ifr));
1731: strlcpy(ifr.ifr_name, ifp_port->if_xname, sizeof(ifr.ifr_name));
1732:
1733: LIST_FOREACH(mc, &sc->sc_mclist, mc_entry) {
1734: sa = (struct sockaddr *)&mc->mc_addr;
1735: KASSERT(sizeof(ifr.ifr_space) >= sa->sa_len);
1736: memcpy(&ifr.ifr_addr, sa, sa->sa_len);
1737: (void)lagg_lp_ioctl(lp, cmd, (void *)&ifr);
1738: }
1739:
1740: }
1741:
1742: static void
1743: lagg_port_syncmulti(struct lagg_softc *sc, struct lagg_port *lp)
1744: {
1745:
1746: lagg_port_multi(sc, lp, SIOCADDMULTI);
1747: }
1748:
1749: static void
1750: lagg_port_purgemulti(struct lagg_softc *sc, struct lagg_port *lp)
1751: {
1752:
1753: lagg_port_multi(sc, lp, SIOCDELMULTI);
1754: }
1755:
1756: static void
1757: lagg_port_vlan(struct lagg_softc *sc, struct lagg_port *lp,
1758: bool set)
1759: {
1760: struct lagg_vlantag *lvt;
1761: int error;
1762:
1763: TAILQ_FOREACH(lvt, &sc->sc_vtags, lvt_entry) {
1764: error = lagg_port_vlan_cb(lp, lvt, set);
1765: if (error != 0) {
1766: lagg_log(sc, LOG_WARNING,
1767: "%s failed to configure vlan on %d\n",
1768: lp->lp_ifp->if_xname, error);
1769: }
1770: }
1771: }
1772:
1773: static void
1774: lagg_port_syncvlan(struct lagg_softc *sc, struct lagg_port *lp)
1775:
1776: {
1777: lagg_port_vlan(sc, lp, true);
1778: }
1779:
1780: static void
1781: lagg_port_purgevlan(struct lagg_softc *sc, struct lagg_port *lp)
1782: {
1783:
1784: lagg_port_vlan(sc, lp, false);
1785: }
1786:
1787: static int
1.12 yamaguch 1788: lagg_setifcaps(struct lagg_port *lp, uint64_t cap)
1789: {
1790: struct ifcapreq ifcr;
1791: int error;
1792:
1793: if (lp->lp_ifp->if_capenable == cap)
1794: return 0;
1795:
1796: memset(&ifcr, 0, sizeof(ifcr));
1797: ifcr.ifcr_capenable = cap;
1798:
1799: IFNET_LOCK(lp->lp_ifp);
1800: error = LAGG_PORT_IOCTL(lp, SIOCSIFCAP, &ifcr);
1801: IFNET_UNLOCK(lp->lp_ifp);
1802:
1803: return error;
1804: }
1805:
1.22 yamaguch 1806: static void
1807: lagg_sync_ifcaps(struct lagg_softc *sc)
1808: {
1809: struct lagg_port *lp;
1810: struct ifnet *ifp;
1811: int error = 0;
1812:
1813: ifp = (struct ifnet *)&sc->sc_if;
1814:
1815: LAGG_LOCK(sc);
1816: LAGG_PORTS_FOREACH(sc, lp) {
1817: error = lagg_setifcaps(lp, ifp->if_capenable);
1818:
1819: if (error != 0) {
1820: lagg_log(sc, LOG_WARNING,
1821: "failed to update capabilities "
1822: "of %s, error=%d",
1823: lp->lp_ifp->if_xname, error);
1824: }
1825: }
1826: LAGG_UNLOCK(sc);
1827: }
1828:
1.12 yamaguch 1829: static int
1830: lagg_setethcaps(struct lagg_port *lp, int cap)
1831: {
1832: struct ethercom *ec;
1833: struct eccapreq eccr;
1834: int error;
1835:
1836: KASSERT(lp->lp_iftype == IFT_ETHER);
1837: ec = (struct ethercom *)lp->lp_ifp;
1838:
1839: if (ec->ec_capenable == cap)
1840: return 0;
1841:
1842: memset(&eccr, 0, sizeof(eccr));
1843: eccr.eccr_capenable = cap;
1844:
1845: IFNET_LOCK(lp->lp_ifp);
1846: error = LAGG_PORT_IOCTL(lp, SIOCSETHERCAP, &eccr);
1847: IFNET_UNLOCK(lp->lp_ifp);
1848:
1849: return error;
1850: }
1851:
1852: static void
1.22 yamaguch 1853: lagg_sync_ethcaps(struct lagg_softc *sc)
1854: {
1855: struct ethercom *ec;
1856: struct lagg_port *lp;
1857: int error;
1858:
1859: ec = (struct ethercom *)&sc->sc_if;
1860:
1861: LAGG_LOCK(sc);
1862: LAGG_PORTS_FOREACH(sc, lp) {
1863: if (lp->lp_iftype != IFT_ETHER)
1864: continue;
1865:
1866: error = lagg_setethcaps(lp, ec->ec_capenable);
1867: if (error != 0) {
1868: lagg_log(sc, LOG_WARNING,
1869: "failed to update ether "
1870: "capabilities"" of %s, error=%d",
1871: lp->lp_ifp->if_xname, error);
1872: }
1873:
1874: }
1875: LAGG_UNLOCK(sc);
1876: }
1877:
1878: static void
1.12 yamaguch 1879: lagg_ifcap_update(struct lagg_softc *sc)
1880: {
1881: struct ifnet *ifp;
1882: struct lagg_port *lp;
1883: uint64_t cap, ena, pena;
1884: size_t i;
1885:
1886: KASSERT(LAGG_LOCKED(sc));
1887:
1888: /* Get common capabilities for the lagg ports */
1889: ena = ~(uint64_t)0;
1890: cap = ~(uint64_t)0;
1891: LAGG_PORTS_FOREACH(sc, lp) {
1892: ena &= lp->lp_ifp->if_capenable;
1893: cap &= lp->lp_ifp->if_capabilities;
1894: }
1895:
1896: if (ena == ~(uint64_t)0)
1897: ena = 0;
1898: if (cap == ~(uint64_t)0)
1899: cap = 0;
1900:
1901: /*
1902: * Apply common enabled capabilities back to the lagg ports.
1903: * May require several iterations if they are dependent.
1904: */
1905: for (i = 0; i < LAGG_SETCAPS_RETRY; i++) {
1906: pena = ena;
1907: LAGG_PORTS_FOREACH(sc, lp) {
1908: lagg_setifcaps(lp, ena);
1909: ena &= lp->lp_ifp->if_capenable;
1910: }
1911:
1912: if (pena == ena)
1913: break;
1914: }
1915:
1916: if (pena != ena) {
1917: lagg_log(sc, LOG_DEBUG, "couldn't set "
1918: "capabilities 0x%08"PRIx64, pena);
1919: }
1920:
1921: ifp = &sc->sc_if;
1922:
1923: if (ifp->if_capabilities != cap ||
1924: ifp->if_capenable != ena) {
1925: ifp->if_capabilities = cap;
1926: ifp->if_capenable = ena;
1927:
1928: lagg_log(sc, LOG_DEBUG,"capabilities "
1929: "0x%08"PRIx64" enabled 0x%08"PRIx64,
1930: cap, ena);
1931: }
1932: }
1933:
1934: static void
1935: lagg_ethercap_update(struct lagg_softc *sc)
1936: {
1937: struct ethercom *ec;
1938: struct lagg_port *lp;
1939: int cap, ena, pena;
1940: size_t i;
1941:
1942: KASSERT(LAGG_LOCKED(sc));
1943:
1944: if (sc->sc_if.if_type != IFT_ETHER)
1945: return;
1946:
1947: /* Get common enabled capabilities for the lagg ports */
1948: ena = ~0;
1949: cap = ~0;
1950: LAGG_PORTS_FOREACH(sc, lp) {
1.39 yamaguch 1951: switch (lp->lp_iftype) {
1952: case IFT_ETHER:
1.12 yamaguch 1953: ec = (struct ethercom *)lp->lp_ifp;
1954: ena &= ec->ec_capenable;
1955: cap &= ec->ec_capabilities;
1.39 yamaguch 1956: break;
1957: case IFT_L2TP:
1958: ena &= (ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU);
1959: cap &= (ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU);
1960: break;
1961: default:
1.12 yamaguch 1962: ena = 0;
1963: cap = 0;
1964: }
1965: }
1966:
1967: if (ena == ~0)
1968: ena = 0;
1969: if (cap == ~0)
1970: cap = 0;
1971:
1972: /*
1973: * Apply common enabled capabilities back to the lagg ports.
1974: * May require several iterations if they are dependent.
1975: */
1976: for (i = 0; i < LAGG_SETCAPS_RETRY; i++) {
1977: pena = ena;
1978: LAGG_PORTS_FOREACH(sc, lp) {
1979: if (lp->lp_iftype != IFT_ETHER)
1980: continue;
1981:
1982: ec = (struct ethercom *)lp->lp_ifp;
1983: lagg_setethcaps(lp, ena);
1984: ena &= ec->ec_capenable;
1985: }
1986:
1987: if (pena == ena)
1988: break;
1989: }
1990:
1991: if (pena != ena) {
1992: lagg_log(sc, LOG_DEBUG, "couldn't set "
1993: "ether capabilities 0x%08x", pena);
1994: }
1995:
1996: ec = (struct ethercom *)&sc->sc_if;
1997:
1998: if (ec->ec_capabilities != cap ||
1999: ec->ec_capenable != ena) {
2000: ec->ec_capabilities = cap;
2001: ec->ec_capenable = ena;
2002:
2003: lagg_log(sc, LOG_DEBUG,
2004: "ether capabilities 0x%08x"
2005: " enabled 0x%08x", cap, ena);
2006: }
2007: }
2008:
2009: static void
2010: lagg_capabilities_update(struct lagg_softc *sc)
2011: {
2012:
2013: lagg_ifcap_update(sc);
2014: lagg_ethercap_update(sc);
2015: }
2016:
2017: static int
1.9 yamaguch 2018: lagg_setup_mtu(struct lagg_softc *sc, struct lagg_port *lp)
2019: {
2020: struct ifnet *ifp_port;
2021: struct ifreq ifr;
2022: int error;
2023:
2024: ifp_port = lp->lp_ifp;
2025: KASSERT(IFNET_LOCKED(ifp_port));
2026:
2027: error = 0;
2028: memset(&ifr, 0, sizeof(ifr));
2029:
2030: if (SIMPLEQ_EMPTY(&sc->sc_ports)) {
2031: ifr.ifr_mtu = lp->lp_mtu;
2032: } else {
2033: ifr.ifr_mtu = sc->sc_if.if_mtu;
2034: }
2035:
1.38 yamaguch 2036: if (sc->sc_if.if_mtu != (uint64_t)ifr.ifr_mtu)
1.9 yamaguch 2037: sc->sc_if.if_mtu = ifr.ifr_mtu;
2038:
2039: if (lp->lp_mtu != (uint64_t)ifr.ifr_mtu) {
2040: if (lp->lp_ioctl == NULL) {
2041: LAGG_DPRINTF(sc, "cannot change MTU for %s\n",
2042: ifp_port->if_xname);
2043: return EINVAL;
2044: }
2045:
2046: strlcpy(ifr.ifr_name, ifp_port->if_xname, sizeof(ifr.ifr_name));
2047: error = lp->lp_ioctl(ifp_port, SIOCSIFMTU, (void *)&ifr);
2048: if (error != 0) {
2049: LAGG_DPRINTF(sc, "invalid MTU %d for %s\n",
2050: ifr.ifr_mtu, ifp_port->if_xname);
2051: return error;
2052: }
2053: }
2054:
2055: return 0;
2056: }
2057:
2058: static void
2059: lagg_teardown_mtu(struct lagg_softc *sc, struct lagg_port *lp)
1.1 yamaguch 2060: {
2061: struct ifnet *ifp_port;
2062: struct ifreq ifr;
1.9 yamaguch 2063: int error;
2064:
2065: if (lp->lp_ioctl == NULL)
2066: return;
2067:
2068: ifp_port = lp->lp_ifp;
2069: KASSERT(IFNET_LOCKED(ifp_port));
2070:
1.38 yamaguch 2071: if (SIMPLEQ_EMPTY(&sc->sc_ports))
1.9 yamaguch 2072: sc->sc_if.if_mtu = 0;
2073:
2074: if (ifp_port->if_mtu != lp->lp_mtu) {
2075: memset(&ifr, 0, sizeof(ifr));
2076: strlcpy(ifr.ifr_name, ifp_port->if_xname, sizeof(ifr.ifr_name));
2077: ifr.ifr_mtu = lp->lp_mtu;
2078: error = lp->lp_ioctl(ifp_port, SIOCSIFMTU, (void *)&ifr);
2079: if (error != 0) {
2080: lagg_log(sc, LOG_WARNING,
2081: "failed to reset MTU %d to %s\n",
2082: ifr.ifr_mtu, ifp_port->if_xname);
2083: }
2084: }
2085: }
2086:
2087: static void
2088: lagg_port_setsadl(struct lagg_port *lp, uint8_t *lladdr,
2089: bool iftype_changed)
2090: {
2091: struct ifnet *ifp_port;
2092: bool lladdr_changed;
2093: int error;
2094:
2095: ifp_port = lp->lp_ifp;
2096:
2097: KASSERT(LAGG_LOCKED(lp->lp_softc));
2098: KASSERT(IFNET_LOCKED(ifp_port));
2099:
1.15 yamaguch 2100: switch (lp->lp_iftype) {
2101: case IFT_ETHER:
2102: lladdr_changed = lagg_lladdr_equal(lladdr,
2103: CLLADDR(ifp_port->if_sadl)) ? false : true;
2104:
2105: if (lladdr_changed == false &&
2106: iftype_changed == false) {
2107: break;
2108: }
2109:
1.19 yamaguch 2110: lagg_chg_sadl(ifp_port,
2111: lladdr, ETHER_ADDR_LEN);
1.15 yamaguch 2112:
1.21 yamaguch 2113: if (ifp_port->if_init != NULL) {
2114: error = 0;
2115: if (ISSET(ifp_port->if_flags, IFF_RUNNING))
1.28 riastrad 2116: error = if_init(ifp_port);
1.15 yamaguch 2117:
1.21 yamaguch 2118: if (error != 0) {
2119: lagg_log(lp->lp_softc, LOG_WARNING,
2120: "%s failed to if_init() on %d\n",
2121: ifp_port->if_xname, error);
2122: }
2123: } else {
2124: if (lp->lp_promisc == false) {
2125: ifpromisc_locked(ifp_port, 1);
2126: lp->lp_promisc = true;
2127: }
1.15 yamaguch 2128: }
2129: break;
2130: default:
2131: if_alloc_sadl(ifp_port);
2132: break;
1.9 yamaguch 2133: }
1.15 yamaguch 2134: }
2135:
2136: static void
2137: lagg_port_unsetsadl(struct lagg_port *lp)
2138: {
2139: struct ifnet *ifp_port;
2140: int error;
2141:
2142: ifp_port = lp->lp_ifp;
2143:
2144: KASSERT(LAGG_LOCKED(lp->lp_softc));
2145: KASSERT(IFNET_LOCKED(ifp_port));
1.9 yamaguch 2146:
1.15 yamaguch 2147: switch (lp->lp_iftype) {
2148: case IFT_ETHER:
1.19 yamaguch 2149: /* reset if_type before changing ifp->if_sadl */
1.15 yamaguch 2150: ifp_port->if_type = lp->lp_iftype;
2151:
1.19 yamaguch 2152: lagg_chg_sadl(ifp_port,
2153: lp->lp_lladdr, ETHER_ADDR_LEN);
1.15 yamaguch 2154:
1.21 yamaguch 2155: if (ifp_port->if_init != NULL) {
2156: error = 0;
2157: if (ISSET(ifp_port->if_flags, IFF_RUNNING))
1.28 riastrad 2158: error = if_init(ifp_port);
1.15 yamaguch 2159:
1.21 yamaguch 2160: if (error != 0) {
2161: lagg_log(lp->lp_softc, LOG_WARNING,
2162: "%s failed to if_init() on %d\n",
2163: ifp_port->if_xname, error);
2164: }
2165: } else {
2166: if (lp->lp_promisc == true) {
2167: ifpromisc_locked(ifp_port, 0);
2168: lp->lp_promisc = false;
2169: }
1.15 yamaguch 2170: }
2171: break;
2172:
2173: default:
2174: /* reset if_type before if_alloc_sadl */
2175: ifp_port->if_type = lp->lp_iftype;
2176: if_alloc_sadl(ifp_port);
2177: break;
1.9 yamaguch 2178: }
2179: }
2180:
2181: static void
2182: lagg_setup_lladdr(struct lagg_softc *sc, struct lagg_port *lp)
2183: {
2184:
2185: KASSERT(LAGG_LOCKED(sc));
2186:
1.38 yamaguch 2187: if (lagg_lladdr_equal(sc->sc_lladdr, sc->sc_lladdr_rand))
1.9 yamaguch 2188: lagg_lladdr_cpy(sc->sc_lladdr, lp->lp_lladdr);
2189: }
2190:
2191: static void
2192: lagg_teardown_lladdr(struct lagg_softc *sc, struct lagg_port *lp)
2193: {
2194: struct lagg_port *lp0;
2195: uint8_t *lladdr_next;
2196:
2197: KASSERT(LAGG_LOCKED(sc));
2198:
2199: if (lagg_lladdr_equal(sc->sc_lladdr,
2200: lp->lp_lladdr) == false) {
2201: return;
2202: }
2203:
2204: lladdr_next = sc->sc_lladdr_rand;
2205:
2206: LAGG_PORTS_FOREACH(sc, lp0) {
2207: if (lp0->lp_iftype == IFT_ETHER) {
2208: lladdr_next = lp0->lp_lladdr;
2209: break;
2210: }
2211: }
2212:
2213: lagg_lladdr_cpy(sc->sc_lladdr, lladdr_next);
2214: }
2215:
2216: static void
2217: lagg_lladdr_update(struct lagg_softc *sc)
2218: {
2219: struct ifnet *ifp;
2220: struct lagg_port *lp;
2221: const uint8_t *lladdr;
2222:
2223: ifp = &sc->sc_if;
2224:
2225: KASSERT(LAGG_LOCKED(sc));
2226: KASSERT(IFNET_LOCKED(ifp));
2227: KASSERT(!ISSET(ifp->if_flags, IFF_RUNNING));
2228:
2229: lladdr = CLLADDR(ifp->if_sadl);
2230:
2231: if (lagg_lladdr_equal(sc->sc_lladdr, lladdr))
2232: return;
2233:
2234: lagg_lladdr_cpy(sc->sc_lladdr, lladdr);
2235:
2236: LAGG_PORTS_FOREACH(sc, lp) {
1.15 yamaguch 2237: IFNET_LOCK(lp->lp_ifp);
2238: lagg_port_setsadl(lp, sc->sc_lladdr, false);
2239: IFNET_UNLOCK(lp->lp_ifp);
1.9 yamaguch 2240: }
2241: }
2242:
2243: static void
2244: lagg_sadl_update(struct lagg_softc *sc, uint8_t *lladdr_prev)
2245: {
2246: struct ifnet *ifp;
1.18 yamaguch 2247: struct lagg_port *lp;
1.9 yamaguch 2248: const uint8_t *lladdr;
2249:
2250: ifp = &sc->sc_if;
2251:
2252: KASSERT(LAGG_LOCKED(sc));
2253: KASSERT(IFNET_LOCKED(ifp));
2254:
2255: lladdr = CLLADDR(ifp->if_sadl);
2256:
2257: if (lagg_lladdr_equal(sc->sc_lladdr, lladdr))
2258: return;
2259:
2260: if (lagg_lladdr_equal(lladdr_prev, lladdr) == false)
2261: return;
2262:
1.19 yamaguch 2263: lagg_chg_sadl(ifp, sc->sc_lladdr, ETHER_ADDR_LEN);
1.9 yamaguch 2264:
1.18 yamaguch 2265: LAGG_PORTS_FOREACH(sc, lp) {
2266: IFNET_LOCK(lp->lp_ifp);
2267: lagg_port_setsadl(lp, sc->sc_lladdr, false);
2268: IFNET_UNLOCK(lp->lp_ifp);
2269: }
2270:
1.9 yamaguch 2271: LAGG_UNLOCK(sc);
2272: lagg_in6_ifdetach(ifp);
2273: lagg_in6_ifattach(ifp);
2274: LAGG_LOCK(sc);
2275: }
2276:
2277: static int
2278: lagg_port_setup(struct lagg_softc *sc,
2279: struct lagg_port *lp, struct ifnet *ifp_port)
2280: {
1.1 yamaguch 2281: u_char if_type;
2282: int error;
1.9 yamaguch 2283: bool stopped, iftype_changed;
1.1 yamaguch 2284:
2285: KASSERT(LAGG_LOCKED(sc));
1.9 yamaguch 2286: IFNET_ASSERT_UNLOCKED(ifp_port);
1.1 yamaguch 2287:
2288: if (&sc->sc_if == ifp_port) {
2289: LAGG_DPRINTF(sc, "cannot add a lagg to itself as a port\n");
2290: return EINVAL;
2291: }
2292:
1.38 yamaguch 2293: if (sc->sc_nports > LAGG_MAX_PORTS)
1.1 yamaguch 2294: return ENOSPC;
2295:
2296: if (ifp_port->if_lagg != NULL) {
2297: lp = (struct lagg_port *)ifp_port->if_lagg;
2298: if (lp->lp_softc == sc)
2299: return EEXIST;
2300: return EBUSY;
2301: }
2302:
2303: switch (ifp_port->if_type) {
2304: case IFT_ETHER:
1.15 yamaguch 2305: case IFT_L2TP:
1.1 yamaguch 2306: if_type = IFT_IEEE8023ADLAG;
2307: break;
2308: default:
2309: return ENOTSUP;
2310: }
2311:
1.9 yamaguch 2312: error = 0;
2313: stopped = false;
2314: lp->lp_softc = sc;
2315: lp->lp_prio = LAGG_PORT_PRIO;
2316: lp->lp_linkstate_hook = if_linkstate_change_establish(ifp_port,
2317: lagg_linkstate_changed, ifp_port);
1.10 yamaguch 2318: lp->lp_ifdetach_hook = ether_ifdetachhook_establish(ifp_port,
2319: lagg_ifdetach, ifp_port);
1.9 yamaguch 2320: psref_target_init(&lp->lp_psref, lagg_port_psref_class);
2321:
2322: IFNET_LOCK(ifp_port);
2323: lp->lp_iftype = ifp_port->if_type;
2324: lp->lp_ioctl = ifp_port->if_ioctl;
2325: lp->lp_output = ifp_port->if_output;
2326: lp->lp_ifcapenable = ifp_port->if_capenable;
2327: lp->lp_mtu = ifp_port->if_mtu;
1.12 yamaguch 2328: if (lp->lp_iftype == IFT_ETHER) {
2329: struct ethercom *ec;
2330: ec = (struct ethercom *)ifp_port;
2331:
1.9 yamaguch 2332: lagg_lladdr_cpy(lp->lp_lladdr, CLLADDR(ifp_port->if_sadl));
1.12 yamaguch 2333: lp->lp_eccapenable = ec->ec_capenable;
2334: }
1.1 yamaguch 2335:
1.9 yamaguch 2336: ifp_port->if_type = if_type;
2337: ifp_port->if_ioctl = lagg_port_ioctl;
1.1 yamaguch 2338:
1.9 yamaguch 2339: iftype_changed = (lp->lp_iftype != ifp_port->if_type);
1.1 yamaguch 2340:
1.15 yamaguch 2341: if (ISSET(ifp_port->if_flags, IFF_RUNNING) &&
2342: ifp_port->if_init != NULL) {
1.27 riastrad 2343: if_stop(ifp_port, 0);
1.1 yamaguch 2344: stopped = true;
2345: }
1.9 yamaguch 2346:
1.1 yamaguch 2347: /* to delete ipv6 link local address */
2348: lagg_in6_ifdetach(ifp_port);
2349:
1.9 yamaguch 2350: error = lagg_setup_mtu(sc, lp);
1.38 yamaguch 2351: if (error != 0)
1.9 yamaguch 2352: goto restore_ipv6lla;
2353:
1.38 yamaguch 2354: if (lp->lp_iftype == IFT_ETHER)
1.9 yamaguch 2355: lagg_setup_lladdr(sc, lp);
1.38 yamaguch 2356:
1.15 yamaguch 2357: lagg_port_setsadl(lp, sc->sc_lladdr, iftype_changed);
1.1 yamaguch 2358:
2359: IFNET_UNLOCK(ifp_port);
2360:
2361: error = lagg_proto_allocport(sc, lp);
2362: if (error != 0)
1.9 yamaguch 2363: goto teardown_lladdr;
1.1 yamaguch 2364:
2365: atomic_store_release(&ifp_port->if_lagg, (void *)lp);
2366: SIMPLEQ_INSERT_TAIL(&sc->sc_ports, lp, lp_entry);
2367: sc->sc_nports++;
2368:
1.25 yamaguch 2369: lagg_port_syncmulti(sc, lp);
2370: lagg_port_syncvlan(sc, lp);
2371:
1.34 yamaguch 2372: IFNET_LOCK(ifp_port);
2373: ifp_port->if_output = lagg_port_output;
1.1 yamaguch 2374: if (stopped) {
1.29 riastrad 2375: if (!ISSET(ifp_port->if_flags, IFF_RUNNING)) {
2376: error = if_init(ifp_port);
2377: if (error != 0)
2378: goto remove_port;
2379: }
1.1 yamaguch 2380: }
1.34 yamaguch 2381: IFNET_UNLOCK(ifp_port);
1.1 yamaguch 2382:
1.13 yamaguch 2383: lagg_config_promisc(sc, lp);
1.1 yamaguch 2384: lagg_proto_startport(sc, lp);
1.12 yamaguch 2385: lagg_capabilities_update(sc);
1.1 yamaguch 2386:
2387: return 0;
2388:
2389: remove_port:
2390: SIMPLEQ_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entry);
2391: sc->sc_nports--;
1.34 yamaguch 2392: IFNET_LOCK(ifp_port);
2393: ifp_port->if_output = lp->lp_output;
2394: IFNET_UNLOCK(ifp_port);
1.1 yamaguch 2395: atomic_store_release(&ifp_port->if_lagg, NULL);
2396: pserialize_perform(sc->sc_psz);
2397: lagg_port_purgemulti(sc, lp);
2398: lagg_port_purgevlan(sc, lp);
1.35 yamaguch 2399: lagg_proto_freeport(sc, lp);
1.1 yamaguch 2400:
1.9 yamaguch 2401: teardown_lladdr:
2402: IFNET_LOCK(ifp_port);
2403: lagg_teardown_mtu(sc, lp);
1.15 yamaguch 2404: lagg_port_unsetsadl(lp);
1.38 yamaguch 2405: if (lp->lp_iftype == IFT_ETHER)
1.9 yamaguch 2406: lagg_teardown_lladdr(sc, lp);
2407: restore_ipv6lla:
2408: KASSERT(IFNET_LOCKED(ifp_port));
2409: lagg_in6_ifdetach(ifp_port);
2410: if (stopped) {
1.28 riastrad 2411: if (if_init(ifp_port) != 0) {
1.9 yamaguch 2412: lagg_log(sc, LOG_WARNING,
2413: "couldn't re-start port %s\n",
2414: ifp_port->if_xname);
2415: }
2416: }
1.1 yamaguch 2417: ifp_port->if_type = lp->lp_iftype;
1.9 yamaguch 2418: if (ifp_port->if_ioctl == lagg_port_ioctl)
2419: ifp_port->if_ioctl = lp->lp_ioctl;
2420:
1.1 yamaguch 2421: IFNET_UNLOCK(ifp_port);
2422:
1.9 yamaguch 2423: psref_target_destroy(&lp->lp_psref, lagg_port_psref_class);
2424: if_linkstate_change_disestablish(ifp_port,
2425: lp->lp_linkstate_hook, NULL);
1.10 yamaguch 2426: ether_ifdetachhook_disestablish(ifp_port,
2427: lp->lp_ifdetach_hook, &sc->sc_lock);
1.1 yamaguch 2428:
2429: return error;
2430: }
2431:
2432: static void
1.9 yamaguch 2433: lagg_port_teardown(struct lagg_softc *sc, struct lagg_port *lp,
1.8 yamaguch 2434: bool is_ifdetach)
1.1 yamaguch 2435: {
2436: struct ifnet *ifp_port;
1.15 yamaguch 2437: bool stopped;
1.1 yamaguch 2438:
2439: KASSERT(LAGG_LOCKED(sc));
2440:
2441: ifp_port = lp->lp_ifp;
2442: stopped = false;
2443:
1.10 yamaguch 2444: ether_ifdetachhook_disestablish(ifp_port,
2445: lp->lp_ifdetach_hook, &sc->sc_lock);
2446:
2447: if (ifp_port->if_lagg == NULL) {
2448: /* already done in lagg_ifdetach() */
2449: return;
2450: }
2451:
1.34 yamaguch 2452: lagg_proto_stopport(sc, lp);
2453:
2454: IFNET_LOCK(ifp_port);
2455: if (ISSET(ifp_port->if_flags, IFF_RUNNING) &&
2456: ifp_port->if_init != NULL) {
2457: if_stop(ifp_port, 0);
2458: stopped = true;
2459: }
2460: ifp_port->if_output = lp->lp_output;
2461: IFNET_UNLOCK(ifp_port);
2462:
1.1 yamaguch 2463: SIMPLEQ_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entry);
2464: sc->sc_nports--;
2465: atomic_store_release(&ifp_port->if_lagg, NULL);
2466: pserialize_perform(sc->sc_psz);
2467:
1.7 yamaguch 2468: if_linkstate_change_disestablish(ifp_port,
2469: lp->lp_linkstate_hook, NULL);
2470:
1.1 yamaguch 2471: psref_target_destroy(&lp->lp_psref, lagg_port_psref_class);
2472:
1.9 yamaguch 2473: lagg_port_purgemulti(sc, lp);
2474: lagg_port_purgevlan(sc, lp);
2475: lagg_teardown_lladdr(sc, lp);
2476:
2477: IFNET_LOCK(ifp_port);
1.1 yamaguch 2478: ifp_port->if_type = lp->lp_iftype;
1.9 yamaguch 2479: if (ifp_port->if_ioctl == lagg_port_ioctl)
2480: ifp_port->if_ioctl = lp->lp_ioctl;
2481: lagg_teardown_mtu(sc, lp);
1.1 yamaguch 2482:
1.38 yamaguch 2483: if (stopped)
1.28 riastrad 2484: if_init(ifp_port);
1.29 riastrad 2485: IFNET_UNLOCK(ifp_port);
1.1 yamaguch 2486:
1.8 yamaguch 2487: if (is_ifdetach == false) {
1.13 yamaguch 2488: lagg_unconfig_promisc(sc, lp);
1.12 yamaguch 2489: lagg_setifcaps(lp, lp->lp_ifcapenable);
2490: if (lp->lp_iftype == IFT_ETHER)
2491: lagg_setethcaps(lp, lp->lp_eccapenable);
2492:
1.1 yamaguch 2493: IFNET_LOCK(ifp_port);
1.15 yamaguch 2494: lagg_port_unsetsadl(lp);
1.1 yamaguch 2495: lagg_in6_ifattach(ifp_port);
2496: IFNET_UNLOCK(ifp_port);
2497: }
1.9 yamaguch 2498:
1.12 yamaguch 2499: lagg_capabilities_update(sc);
2500:
2501: lagg_proto_freeport(sc, lp);
2502: kmem_free(lp, sizeof(*lp));
1.1 yamaguch 2503: }
2504:
1.9 yamaguch 2505: static int
2506: lagg_addport(struct lagg_softc *sc, struct ifnet *ifp_port)
1.1 yamaguch 2507: {
1.9 yamaguch 2508: struct lagg_port *lp;
2509: uint8_t lladdr[ETHER_ADDR_LEN];
2510: int error;
2511:
2512: lp = kmem_zalloc(sizeof(*lp), KM_SLEEP);
2513: lp->lp_ifp = ifp_port;
1.1 yamaguch 2514:
2515: LAGG_LOCK(sc);
1.9 yamaguch 2516: lagg_lladdr_cpy(lladdr, sc->sc_lladdr);
2517: error = lagg_port_setup(sc, lp, ifp_port);
2518: if (error == 0)
2519: lagg_sadl_update(sc, lladdr);
1.1 yamaguch 2520: LAGG_UNLOCK(sc);
1.9 yamaguch 2521:
2522: if (error != 0)
2523: kmem_free(lp, sizeof(*lp));
2524:
2525: return error;
1.1 yamaguch 2526: }
2527:
2528: static int
2529: lagg_delport(struct lagg_softc *sc, struct ifnet *ifp_port)
2530: {
2531: struct lagg_port *lp;
1.9 yamaguch 2532: uint8_t lladdr[ETHER_ADDR_LEN];
1.10 yamaguch 2533: int error;
1.1 yamaguch 2534:
2535: KASSERT(IFNET_LOCKED(&sc->sc_if));
2536:
1.10 yamaguch 2537: error = 0;
1.9 yamaguch 2538: LAGG_LOCK(sc);
1.1 yamaguch 2539: lp = ifp_port->if_lagg;
1.9 yamaguch 2540: if (lp == NULL || lp->lp_softc != sc) {
1.10 yamaguch 2541: error = ENOENT;
2542: goto out;
2543: }
2544:
2545: if (lp->lp_ifdetaching) {
2546: error = EBUSY;
2547: goto out;
1.9 yamaguch 2548: }
1.1 yamaguch 2549:
1.9 yamaguch 2550: lagg_lladdr_cpy(lladdr, sc->sc_lladdr);
2551: lagg_port_teardown(sc, lp, false);
2552: lagg_sadl_update(sc, lladdr);
1.10 yamaguch 2553:
2554: out:
1.1 yamaguch 2555: LAGG_UNLOCK(sc);
2556:
1.10 yamaguch 2557: return error;
1.1 yamaguch 2558: }
2559:
1.10 yamaguch 2560: static int
1.9 yamaguch 2561: lagg_delport_all(struct lagg_softc *sc)
2562: {
1.10 yamaguch 2563: struct lagg_port *lp;
1.9 yamaguch 2564: uint8_t lladdr[ETHER_ADDR_LEN];
1.10 yamaguch 2565: int error;
2566:
2567: KASSERT(IFNET_LOCKED(&sc->sc_if));
2568:
2569: error = 0;
1.9 yamaguch 2570:
2571: LAGG_LOCK(sc);
2572: lagg_lladdr_cpy(lladdr, sc->sc_lladdr);
1.10 yamaguch 2573: while ((lp = LAGG_PORTS_FIRST(sc)) != NULL) {
2574: if (lp->lp_ifdetaching) {
2575: error = EBUSY;
2576: continue;
2577: }
2578:
1.9 yamaguch 2579: lagg_port_teardown(sc, lp, false);
2580: }
2581:
2582: lagg_sadl_update(sc, lladdr);
2583: LAGG_UNLOCK(sc);
1.10 yamaguch 2584:
2585: return error;
1.9 yamaguch 2586: }
2587:
1.1 yamaguch 2588: static int
2589: lagg_get_stats(struct lagg_softc *sc, struct lagg_req *resp,
2590: size_t nports)
2591: {
2592: struct lagg_variant *var;
2593: struct lagg_port *lp;
2594: struct laggreqport *port;
2595: struct psref psref;
2596: struct ifnet *ifp;
2597: int bound;
2598: size_t n;
2599:
2600: bound = curlwp_bind();
2601: var = lagg_variant_getref(sc, &psref);
1.33 yamaguch 2602: if (var == NULL) {
2603: curlwp_bindx(bound);
2604: return ENOENT;
2605: }
1.1 yamaguch 2606:
2607: resp->lrq_proto = var->lv_proto;
2608:
2609: lagg_proto_stat(var, &resp->lrq_reqproto);
2610:
2611: n = 0;
2612: LAGG_LOCK(sc);
2613: LAGG_PORTS_FOREACH(sc, lp) {
2614: if (n < nports) {
2615: port = &resp->lrq_reqports[n];
2616:
2617: ifp = lp->lp_ifp;
2618: strlcpy(port->rp_portname, ifp->if_xname,
2619: sizeof(port->rp_portname));
2620:
2621: port->rp_prio = lp->lp_prio;
2622: port->rp_flags = lp->lp_flags;
2623: lagg_proto_portstat(var, lp, port);
2624: }
2625: n++;
2626: }
2627: LAGG_UNLOCK(sc);
2628:
2629: resp->lrq_nports = n;
2630:
2631: lagg_variant_putref(var, &psref);
2632: curlwp_bindx(bound);
2633:
2634: if (resp->lrq_nports > nports) {
1.36 yamaguch 2635: return ENOBUFS;
1.1 yamaguch 2636: }
2637: return 0;
2638: }
2639:
1.40 yamaguch 2640: static void
1.1 yamaguch 2641: lagg_config_promisc(struct lagg_softc *sc, struct lagg_port *lp)
2642: {
1.40 yamaguch 2643: struct ifnet *ifp, *ifp_port;
1.1 yamaguch 2644: int error;
1.40 yamaguch 2645: bool promisc;
2646:
2647: KASSERT(LAGG_LOCKED(sc));
1.1 yamaguch 2648:
2649: ifp = &sc->sc_if;
1.40 yamaguch 2650: ifp_port = lp->lp_ifp;
2651:
2652: if (lp->lp_iftype == IFT_ETHER) {
2653: promisc = ISSET(ifp->if_flags, IFF_PROMISC) ?
2654: true : false;
2655: } else {
2656: promisc = true;
2657: }
2658:
2659: if (lp->lp_promisc == promisc)
2660: return;
1.1 yamaguch 2661:
1.40 yamaguch 2662: error = ifpromisc(ifp_port, promisc ? 1 : 0);
2663: if (error == ENETRESET) {
2664: error = ifp_port->if_init(ifp_port);
1.13 yamaguch 2665: }
1.1 yamaguch 2666:
1.40 yamaguch 2667: if (error == 0) {
2668: lp->lp_promisc = promisc;
2669: } else {
2670: lagg_log(sc, LOG_WARNING,
2671: "couldn't %s promisc on %s\n",
2672: promisc ? "set" : "unset",
2673: ifp_port->if_xname);
2674: }
1.1 yamaguch 2675: }
2676:
1.13 yamaguch 2677: static void
2678: lagg_unconfig_promisc(struct lagg_softc *sc, struct lagg_port *lp)
2679: {
1.40 yamaguch 2680: struct ifnet *ifp_port;
1.13 yamaguch 2681: int error;
2682:
1.40 yamaguch 2683: KASSERT(LAGG_LOCKED(sc));
2684:
2685: ifp_port = lp->lp_ifp;
2686:
2687: if (lp->lp_promisc == false)
2688: return;
2689:
2690: error = ifpromisc(ifp_port, 0);
2691: if (error == ENETRESET) {
2692: error = ifp_port->if_init(ifp_port);
2693: }
2694:
2695: if (error != 0) {
2696: lagg_log(sc, LOG_WARNING,
2697: "couldn't unset promisc on %s\n",
2698: ifp_port->if_xname);
1.13 yamaguch 2699: }
2700: }
2701:
1.1 yamaguch 2702: static int
2703: lagg_port_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2704: {
2705: struct lagg_softc *sc;
2706: struct lagg_port *lp;
2707: int error = 0;
2708: u_int ifflags;
2709:
2710: if ((lp = ifp->if_lagg) == NULL ||
2711: (sc = lp->lp_softc) == NULL) {
2712: goto fallback;
2713: }
2714:
1.30 yamaguch 2715: KASSERT(IFNET_LOCKED(lp->lp_ifp));
2716:
1.1 yamaguch 2717: switch (cmd) {
2718: case SIOCSIFCAP:
2719: case SIOCSIFMTU:
1.12 yamaguch 2720: case SIOCSETHERCAP:
1.1 yamaguch 2721: /* Do not allow the setting to be cahanged once joined */
2722: error = EINVAL;
2723: break;
2724: case SIOCSIFFLAGS:
2725: ifflags = ifp->if_flags;
2726: error = LAGG_PORT_IOCTL(lp, cmd, data);
2727: ifflags ^= ifp->if_flags;
2728:
1.38 yamaguch 2729: if ((ifflags & (IFF_UP | IFF_RUNNING)) != 0)
1.1 yamaguch 2730: lagg_proto_linkstate(sc, lp);
2731: break;
2732: default:
2733: goto fallback;
2734: }
2735:
2736: return error;
2737: fallback:
2738: if (lp != NULL) {
2739: error = LAGG_PORT_IOCTL(lp, cmd, data);
2740: } else {
2741: error = ENOTTY;
2742: }
2743:
2744: return error;
2745: }
2746:
2747: static int
2748: lagg_port_output(struct ifnet *ifp, struct mbuf *m,
2749: const struct sockaddr *dst, const struct rtentry *rt)
2750: {
2751: struct lagg_port *lp = ifp->if_lagg;
2752: int error = 0;
2753:
2754: switch (dst->sa_family) {
2755: case pseudo_AF_HDRCMPLT:
2756: case AF_UNSPEC:
2757: if (lp != NULL)
2758: error = lp->lp_output(ifp, m, dst, rt);
2759: else
2760: error = ENETDOWN;
2761: break;
2762: default:
2763: m_freem(m);
2764: error = ENETDOWN;
2765: }
2766:
2767: return error;
2768: }
2769:
2770: void
1.10 yamaguch 2771: lagg_ifdetach(void *xifp_port)
1.1 yamaguch 2772: {
1.10 yamaguch 2773: struct ifnet *ifp_port = xifp_port;
1.1 yamaguch 2774: struct lagg_port *lp;
2775: struct lagg_softc *sc;
1.9 yamaguch 2776: uint8_t lladdr[ETHER_ADDR_LEN];
1.1 yamaguch 2777: int s;
2778:
2779: IFNET_ASSERT_UNLOCKED(ifp_port);
2780:
2781: s = pserialize_read_enter();
2782: lp = atomic_load_consume(&ifp_port->if_lagg);
2783: if (lp == NULL) {
2784: pserialize_read_exit(s);
2785: return;
2786: }
2787:
2788: sc = lp->lp_softc;
1.8 yamaguch 2789: if (sc == NULL) {
2790: pserialize_read_exit(s);
1.1 yamaguch 2791: return;
2792: }
1.8 yamaguch 2793: pserialize_read_exit(s);
1.1 yamaguch 2794:
1.8 yamaguch 2795: LAGG_LOCK(sc);
1.10 yamaguch 2796: lp = ifp_port->if_lagg;
2797: if (lp == NULL) {
2798: LAGG_UNLOCK(sc);
2799: return;
2800: }
2801:
1.8 yamaguch 2802: /*
1.10 yamaguch 2803: * mark as a detaching to prevent other
2804: * lagg_port_teardown() processings with IFNET_LOCK() held
1.8 yamaguch 2805: */
1.10 yamaguch 2806: lp->lp_ifdetaching = true;
2807:
2808: LAGG_UNLOCK(sc);
2809:
2810: IFNET_LOCK(&sc->sc_if);
2811: LAGG_LOCK(sc);
1.8 yamaguch 2812: lp = ifp_port->if_lagg;
1.10 yamaguch 2813: if (lp != NULL) {
2814: lagg_lladdr_cpy(lladdr, sc->sc_lladdr);
2815: lagg_port_teardown(sc, lp, true);
2816: lagg_sadl_update(sc, lladdr);
2817: }
1.1 yamaguch 2818: LAGG_UNLOCK(sc);
2819: IFNET_UNLOCK(&sc->sc_if);
2820: }
2821:
2822: void
1.7 yamaguch 2823: lagg_linkstate_changed(void *xifp)
1.1 yamaguch 2824: {
1.7 yamaguch 2825: struct ifnet *ifp = xifp;
1.1 yamaguch 2826: struct lagg_port *lp;
2827: struct psref psref;
2828: int s, bound;
2829:
2830: s = pserialize_read_enter();
2831: lp = atomic_load_consume(&ifp->if_lagg);
2832: if (lp != NULL) {
2833: bound = curlwp_bind();
2834: lagg_port_getref(lp, &psref);
2835: } else {
2836: pserialize_read_exit(s);
2837: return;
2838: }
2839: pserialize_read_exit(s);
2840:
1.30 yamaguch 2841: IFNET_LOCK(lp->lp_ifp);
1.1 yamaguch 2842: lagg_proto_linkstate(lp->lp_softc, lp);
1.30 yamaguch 2843: IFNET_UNLOCK(lp->lp_ifp);
2844:
1.1 yamaguch 2845: lagg_port_putref(lp, &psref);
2846: curlwp_bindx(bound);
2847: }
2848:
2849: void
2850: lagg_port_getref(struct lagg_port *lp, struct psref *psref)
2851: {
2852:
2853: psref_acquire(psref, &lp->lp_psref, lagg_port_psref_class);
2854: }
2855:
2856: void
2857: lagg_port_putref(struct lagg_port *lp, struct psref *psref)
2858: {
2859:
2860: psref_release(psref, &lp->lp_psref, lagg_port_psref_class);
2861: }
2862:
2863: void
2864: lagg_log(struct lagg_softc *sc, int lvl, const char *fmt, ...)
2865: {
2866: va_list ap;
2867:
2868: if (lvl == LOG_DEBUG && !lagg_debug_enable(sc))
2869: return;
2870:
2871: log(lvl, "%s: ", sc->sc_if.if_xname);
2872: va_start(ap, fmt);
2873: vlog(lvl, fmt, ap);
2874: va_end(ap);
2875: }
2876:
2877: static void
2878: lagg_workq_work(struct work *wk, void *context)
2879: {
2880: struct lagg_work *lw;
2881:
2882: lw = container_of(wk, struct lagg_work, lw_cookie);
2883:
2884: atomic_cas_uint(&lw->lw_state, LAGG_WORK_ENQUEUED, LAGG_WORK_IDLE);
2885: lw->lw_func(lw, lw->lw_arg);
2886: }
2887:
2888: struct workqueue *
2889: lagg_workq_create(const char *name, pri_t prio, int ipl, int flags)
2890: {
2891: struct workqueue *wq;
2892: int error;
2893:
2894: error = workqueue_create(&wq, name, lagg_workq_work,
2895: NULL, prio, ipl, flags);
2896:
2897: if (error)
2898: return NULL;
2899:
2900: return wq;
2901: }
2902:
2903: void
2904: lagg_workq_destroy(struct workqueue *wq)
2905: {
2906:
2907: workqueue_destroy(wq);
2908: }
2909:
2910: void
2911: lagg_workq_add(struct workqueue *wq, struct lagg_work *lw)
2912: {
2913:
2914: if (atomic_cas_uint(&lw->lw_state, LAGG_WORK_IDLE,
2915: LAGG_WORK_ENQUEUED) != LAGG_WORK_IDLE)
2916: return;
2917:
2918: KASSERT(lw->lw_func != NULL);
2919: kpreempt_disable();
2920: workqueue_enqueue(wq, &lw->lw_cookie, NULL);
2921: kpreempt_enable();
2922: }
2923:
2924: void
2925: lagg_workq_wait(struct workqueue *wq, struct lagg_work *lw)
2926: {
2927:
2928: atomic_swap_uint(&lw->lw_state, LAGG_WORK_STOPPING);
2929: workqueue_wait(wq, &lw->lw_cookie);
2930: }
2931:
1.19 yamaguch 2932: static int
2933: lagg_chg_sadl(struct ifnet *ifp, uint8_t *lla, size_t lla_len)
2934: {
2935: struct psref psref_cur, psref_next;
2936: struct ifaddr *ifa_cur, *ifa_next, *ifa_lla;
2937: const struct sockaddr_dl *sdl, *nsdl;
2938: int s, error;
2939:
2940: KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2941: KASSERT(IFNET_LOCKED(ifp));
2942: KASSERT(ifp->if_addrlen == lla_len);
2943:
2944: error = 0;
2945: ifa_lla = NULL;
2946:
2947: while (1) {
2948: s = pserialize_read_enter();
2949: IFADDR_READER_FOREACH(ifa_cur, ifp) {
2950: sdl = satocsdl(ifa_cur->ifa_addr);
2951: if (sdl->sdl_family != AF_LINK)
2952: continue;
2953:
2954: if (sdl->sdl_type != ifp->if_type) {
2955: ifa_acquire(ifa_cur, &psref_cur);
2956: break;
2957: }
2958: }
2959: pserialize_read_exit(s);
2960:
2961: if (ifa_cur == NULL)
2962: break;
2963:
2964: ifa_next = if_dl_create(ifp, &nsdl);
2965: if (ifa_next == NULL) {
2966: error = ENOMEM;
2967: ifa_release(ifa_cur, &psref_cur);
2968: goto done;
2969: }
2970: ifa_acquire(ifa_next, &psref_next);
2971: (void)sockaddr_dl_setaddr(__UNCONST(nsdl), nsdl->sdl_len,
2972: CLLADDR(sdl), ifp->if_addrlen);
2973: ifa_insert(ifp, ifa_next);
2974:
2975: if (ifa_lla == NULL &&
2976: memcmp(CLLADDR(sdl), lla, lla_len) == 0) {
2977: ifa_lla = ifa_next;
2978: ifaref(ifa_lla);
2979: }
2980:
2981: if (ifa_cur == ifp->if_dl)
2982: if_activate_sadl(ifp, ifa_next, nsdl);
2983:
2984: if (ifa_cur == ifp->if_hwdl) {
2985: ifp->if_hwdl = ifa_next;
2986: ifaref(ifa_next);
2987: ifafree(ifa_cur);
2988: }
2989:
2990: ifaref(ifa_cur);
2991: ifa_release(ifa_cur, &psref_cur);
2992: ifa_remove(ifp, ifa_cur);
2993: KASSERTMSG(ifa_cur->ifa_refcnt == 1,
2994: "ifa_refcnt=%d", ifa_cur->ifa_refcnt);
2995: ifafree(ifa_cur);
2996: ifa_release(ifa_next, &psref_next);
2997: }
2998:
2999: if (ifa_lla != NULL) {
3000: ifa_next = ifa_lla;
3001:
3002: ifa_acquire(ifa_next, &psref_next);
3003: ifafree(ifa_lla);
3004:
3005: nsdl = satocsdl(ifa_next->ifa_addr);
3006: } else {
3007: ifa_next = if_dl_create(ifp, &nsdl);
3008: if (ifa_next == NULL) {
3009: error = ENOMEM;
3010: goto done;
3011: }
3012: ifa_acquire(ifa_next, &psref_next);
3013: (void)sockaddr_dl_setaddr(__UNCONST(nsdl),
3014: nsdl->sdl_len, lla, ifp->if_addrlen);
3015: ifa_insert(ifp, ifa_next);
3016: }
3017:
3018: if (ifa_next != ifp->if_dl) {
3019: ifa_cur = ifp->if_dl;
3020: if (ifa_cur != NULL)
3021: ifa_acquire(ifa_cur, &psref_cur);
3022:
3023: if_activate_sadl(ifp, ifa_next, nsdl);
3024:
3025: if (ifa_cur != NULL) {
3026: if (ifa_cur != ifp->if_hwdl) {
3027: ifaref(ifa_cur);
3028: ifa_release(ifa_cur, &psref_cur);
3029: ifa_remove(ifp, ifa_cur);
3030: KASSERTMSG(ifa_cur->ifa_refcnt == 1,
3031: "ifa_refcnt=%d",
3032: ifa_cur->ifa_refcnt);
3033: ifafree(ifa_cur);
3034: } else {
3035: ifa_release(ifa_cur, &psref_cur);
3036: }
3037: }
3038: }
3039:
3040: ifa_release(ifa_next, &psref_next);
3041:
3042: done:
3043: return error;
3044: }
3045:
1.1 yamaguch 3046: /*
3047: * Module infrastructure
3048: */
3049: #include <net/if_module.h>
3050:
3051: IF_MODULE(MODULE_CLASS_DRIVER, lagg, NULL)
CVSweb <webmaster@jp.NetBSD.org>