kernel - Add atomic_readandclear_int()
[dragonfly.git] / sys / net / bridge / if_bridge.c
blob6cd2fa1e60bc4bf2a8fc6fff935c043ea81b1ed4
1 /*
2 * Copyright 2001 Wasabi Systems, Inc.
3 * All rights reserved.
5 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Wasabi Systems, Inc.
19 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
21 * written permission.
23 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Jason L. Wright
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
66 * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67 * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68 * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69 * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
73 * Network interface bridge support.
75 * TODO:
77 * - Currently only supports Ethernet-like interfaces (Ethernet,
78 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
79 * to bridge other types of interfaces (FDDI-FDDI, and maybe
80 * consider heterogenous bridges).
83 * Bridge's route information is duplicated to each CPUs:
85 * CPU0 CPU1 CPU2 CPU3
86 * +-----------+ +-----------+ +-----------+ +-----------+
87 * | rtnode | | rtnode | | rtnode | | rtnode |
88 * | | | | | | | |
89 * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90 * +-----------+ +-----------+ +-----------+ +-----------+
91 * | | | |
92 * | | | |
93 * | | +----------+ | |
94 * | | | rtinfo | | |
95 * | +---->| |<---+ |
96 * | | flags | |
97 * +-------------->| timeout |<-------------+
98 * | dst_ifp |
99 * +----------+
101 * We choose to put timeout and dst_ifp into shared part, so updating
102 * them will be cheaper than using message forwarding. Also there is
103 * not need to use spinlock to protect the updating: timeout and dst_ifp
104 * is not related and specific field's updating order has no importance.
105 * The cache pollution by the share part should not be heavy: in a stable
106 * setup, dst_ifp probably will be not changed in rtnode's life time,
107 * while timeout is refreshed once per second; most of the time, timeout
108 * and dst_ifp are read-only accessed.
111 * Bridge route information installation on bridge_input path:
113 * CPU0 CPU1 CPU2 CPU3
115 * tcp_thread2
117 * alloc nmsg
118 * snd nmsg |
119 * w/o rtinfo |
120 * ifnet0<-----------------------+
121 * | :
122 * lookup dst :
123 * rtnode exists?(Y)free nmsg :
124 * |(N) :
126 * alloc rtinfo
127 * alloc rtnode
128 * install rtnode
130 * +---------->ifnet1
131 * : fwd nmsg |
132 * : w/ rtinfo |
133 * : |
134 * : |
135 * alloc rtnode
136 * (w/ nmsg's rtinfo)
137 * install rtnode
139 * +---------->ifnet2
140 * : fwd nmsg |
141 * : w/ rtinfo |
142 * : |
143 * : same as ifnet1
145 * +---------->ifnet3
146 * : fwd nmsg |
147 * : w/ rtinfo |
148 * : |
149 * : same as ifnet1
150 * free nmsg
154 * The netmsgs forwarded between protocol threads and ifnet threads are
155 * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156 * cases (route information is too precious to be not installed :).
157 * Since multiple threads may try to install route information for the
158 * same dst eaddr, we look up route information in ifnet0. However, this
159 * looking up only need to be performed on ifnet0, which is the start
160 * point of the route information installation process.
163 * Bridge route information deleting/flushing:
165 * CPU0 CPU1 CPU2 CPU3
167 * netisr0
169 * find suitable rtnodes,
170 * mark their rtinfo dead
172 * | domsg <------------------------------------------+
173 * | | replymsg
174 * | |
175 * V fwdmsg fwdmsg fwdmsg |
176 * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177 * delete rtnodes delete rtnodes delete rtnodes delete rtnodes
178 * w/ dead rtinfo w/ dead rtinfo w/ dead rtinfo w/ dead rtinfo
179 * free dead rtinfos
181 * All deleting/flushing operations are serialized by netisr0, so each
182 * operation only reaps the route information marked dead by itself.
185 * Bridge route information adding/deleting/flushing:
186 * Since all operation is serialized by the fixed message flow between
187 * ifnet threads, it is not possible to create corrupted per-cpu route
188 * information.
192 * Percpu member interface list iteration with blocking operation:
193 * Since one bridge could only delete one member interface at a time and
194 * the deleted member interface is not freed after netmsg_service_sync(),
195 * following way is used to make sure that even if the certain member
196 * interface is ripped from the percpu list during the blocking operation,
197 * the iteration still could keep going:
199 * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200 * blocking operation;
201 * blocking operation;
202 * ...
203 * ...
204 * if (nbif != NULL && !nbif->bif_onlist) {
205 * KKASSERT(bif->bif_onlist);
206 * nbif = LIST_NEXT(bif, bif_next);
210 * As mentioned above only one member interface could be unlinked from the
211 * percpu member interface list, so either bif or nbif may be not on the list,
212 * but _not_ both. To keep the list iteration, we don't care about bif, but
213 * only nbif. Since removed member interface will only be freed after we
214 * finish our work, it is safe to access any field in an unlinked bif (here
215 * bif_onlist). If nbif is no longer on the list, then bif must be on the
216 * list, so we change nbif to the next element of bif and keep going.
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h> /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/priv.h>
237 #include <sys/lock.h>
238 #include <sys/thread.h>
239 #include <sys/thread2.h>
240 #include <sys/mpipe.h>
242 #include <net/bpf.h>
243 #include <net/if.h>
244 #include <net/if_dl.h>
245 #include <net/if_types.h>
246 #include <net/if_var.h>
247 #include <net/pfil.h>
248 #include <net/ifq_var.h>
249 #include <net/if_clone.h>
251 #include <netinet/in.h> /* for struct arpcom */
252 #include <netinet/in_systm.h>
253 #include <netinet/in_var.h>
254 #include <netinet/ip.h>
255 #include <netinet/ip_var.h>
256 #ifdef INET6
257 #include <netinet/ip6.h>
258 #include <netinet6/ip6_var.h>
259 #endif
260 #include <netinet/if_ether.h> /* for struct arpcom */
261 #include <net/bridge/if_bridgevar.h>
262 #include <net/if_llc.h>
263 #include <net/netmsg2.h>
265 #include <net/route.h>
266 #include <sys/in_cksum.h>
269 * Size of the route hash table. Must be a power of two.
271 #ifndef BRIDGE_RTHASH_SIZE
272 #define BRIDGE_RTHASH_SIZE 1024
273 #endif
275 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
278 * Maximum number of addresses to cache.
280 #ifndef BRIDGE_RTABLE_MAX
281 #define BRIDGE_RTABLE_MAX 100
282 #endif
285 * Spanning tree defaults.
287 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
288 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
289 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
290 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
291 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
292 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
293 #define BSTP_DEFAULT_PATH_COST 55
296 * Timeout (in seconds) for entries learned dynamically.
298 #ifndef BRIDGE_RTABLE_TIMEOUT
299 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
300 #endif
303 * Number of seconds between walks of the route list.
305 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
306 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
307 #endif
310 * List of capabilities to mask on the member interface.
312 #define BRIDGE_IFCAPS_MASK IFCAP_TXCSUM
314 typedef int (*bridge_ctl_t)(struct bridge_softc *, void *);
316 struct netmsg_brctl {
317 struct netmsg bc_nmsg;
318 bridge_ctl_t bc_func;
319 struct bridge_softc *bc_sc;
320 void *bc_arg;
323 struct netmsg_brsaddr {
324 struct netmsg br_nmsg;
325 struct bridge_softc *br_softc;
326 struct ifnet *br_dst_if;
327 struct bridge_rtinfo *br_rtinfo;
328 int br_setflags;
329 uint8_t br_dst[ETHER_ADDR_LEN];
330 uint8_t br_flags;
333 struct netmsg_braddbif {
334 struct netmsg br_nmsg;
335 struct bridge_softc *br_softc;
336 struct bridge_ifinfo *br_bif_info;
337 struct ifnet *br_bif_ifp;
340 struct netmsg_brdelbif {
341 struct netmsg br_nmsg;
342 struct bridge_softc *br_softc;
343 struct bridge_ifinfo *br_bif_info;
344 struct bridge_iflist_head *br_bif_list;
347 struct netmsg_brsflags {
348 struct netmsg br_nmsg;
349 struct bridge_softc *br_softc;
350 struct bridge_ifinfo *br_bif_info;
351 uint32_t br_bif_flags;
354 eventhandler_tag bridge_detach_cookie = NULL;
356 extern struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
357 extern int (*bridge_output_p)(struct ifnet *, struct mbuf *);
358 extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
360 static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
362 static int bridge_clone_create(struct if_clone *, int);
363 static void bridge_clone_destroy(struct ifnet *);
365 static int bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
367 static void bridge_ifdetach(void *, struct ifnet *);
368 static void bridge_init(void *);
369 static void bridge_stop(struct ifnet *);
370 static void bridge_start(struct ifnet *);
371 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
372 static int bridge_output(struct ifnet *, struct mbuf *);
374 static void bridge_forward(struct bridge_softc *, struct mbuf *m);
376 static void bridge_timer_handler(struct netmsg *);
377 static void bridge_timer(void *);
379 static void bridge_start_bcast(struct bridge_softc *, struct mbuf *);
380 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
381 struct mbuf *);
382 static void bridge_span(struct bridge_softc *, struct mbuf *);
384 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
385 struct ifnet *, uint8_t);
386 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
387 static void bridge_rtreap(struct bridge_softc *);
388 static void bridge_rtreap_async(struct bridge_softc *);
389 static void bridge_rttrim(struct bridge_softc *);
390 static int bridge_rtage_finddead(struct bridge_softc *);
391 static void bridge_rtage(struct bridge_softc *);
392 static void bridge_rtflush(struct bridge_softc *, int);
393 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
394 static int bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
395 struct ifnet *, uint8_t);
396 static void bridge_rtmsg_sync(struct bridge_softc *sc);
397 static void bridge_rtreap_handler(struct netmsg *);
398 static void bridge_rtinstall_handler(struct netmsg *);
399 static int bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
400 struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
402 static void bridge_rtable_init(struct bridge_softc *);
403 static void bridge_rtable_fini(struct bridge_softc *);
405 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
406 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
407 const uint8_t *);
408 static void bridge_rtnode_insert(struct bridge_softc *,
409 struct bridge_rtnode *);
410 static void bridge_rtnode_destroy(struct bridge_softc *,
411 struct bridge_rtnode *);
413 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
414 const char *name);
415 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
416 struct ifnet *ifp);
417 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
418 struct bridge_ifinfo *);
419 static void bridge_delete_member(struct bridge_softc *,
420 struct bridge_iflist *, int);
421 static void bridge_delete_span(struct bridge_softc *,
422 struct bridge_iflist *);
424 static int bridge_control(struct bridge_softc *, u_long,
425 bridge_ctl_t, void *);
426 static int bridge_ioctl_init(struct bridge_softc *, void *);
427 static int bridge_ioctl_stop(struct bridge_softc *, void *);
428 static int bridge_ioctl_add(struct bridge_softc *, void *);
429 static int bridge_ioctl_del(struct bridge_softc *, void *);
430 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
431 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
432 static int bridge_ioctl_scache(struct bridge_softc *, void *);
433 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
434 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
435 static int bridge_ioctl_rts(struct bridge_softc *, void *);
436 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
437 static int bridge_ioctl_sto(struct bridge_softc *, void *);
438 static int bridge_ioctl_gto(struct bridge_softc *, void *);
439 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
440 static int bridge_ioctl_flush(struct bridge_softc *, void *);
441 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
442 static int bridge_ioctl_spri(struct bridge_softc *, void *);
443 static int bridge_ioctl_ght(struct bridge_softc *, void *);
444 static int bridge_ioctl_sht(struct bridge_softc *, void *);
445 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
446 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
447 static int bridge_ioctl_gma(struct bridge_softc *, void *);
448 static int bridge_ioctl_sma(struct bridge_softc *, void *);
449 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
450 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
451 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
452 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
453 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
454 int);
455 static int bridge_ip_checkbasic(struct mbuf **mp);
456 #ifdef INET6
457 static int bridge_ip6_checkbasic(struct mbuf **mp);
458 #endif /* INET6 */
459 static int bridge_fragment(struct ifnet *, struct mbuf *,
460 struct ether_header *, int, struct llc *);
461 static void bridge_enqueue_handler(struct netmsg *);
462 static void bridge_handoff(struct ifnet *, struct mbuf *);
464 static void bridge_del_bif_handler(struct netmsg *);
465 static void bridge_add_bif_handler(struct netmsg *);
466 static void bridge_set_bifflags_handler(struct netmsg *);
467 static void bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
468 struct bridge_iflist_head *);
469 static void bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
470 struct ifnet *);
471 static void bridge_set_bifflags(struct bridge_softc *,
472 struct bridge_ifinfo *, uint32_t);
474 SYSCTL_DECL(_net_link);
475 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
477 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
478 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
479 static int pfil_member = 1; /* run pfil hooks on the member interface */
480 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
481 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
482 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
483 &pfil_bridge, 0, "Packet filter on the bridge interface");
484 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
485 &pfil_member, 0, "Packet filter on the member interface");
487 struct bridge_control_arg {
488 union {
489 struct ifbreq ifbreq;
490 struct ifbifconf ifbifconf;
491 struct ifbareq ifbareq;
492 struct ifbaconf ifbaconf;
493 struct ifbrparam ifbrparam;
494 } bca_u;
495 int bca_len;
496 void *bca_uptr;
497 void *bca_kptr;
500 struct bridge_control {
501 bridge_ctl_t bc_func;
502 int bc_argsize;
503 int bc_flags;
506 #define BC_F_COPYIN 0x01 /* copy arguments in */
507 #define BC_F_COPYOUT 0x02 /* copy arguments out */
508 #define BC_F_SUSER 0x04 /* do super-user check */
510 const struct bridge_control bridge_control_table[] = {
511 { bridge_ioctl_add, sizeof(struct ifbreq),
512 BC_F_COPYIN|BC_F_SUSER },
513 { bridge_ioctl_del, sizeof(struct ifbreq),
514 BC_F_COPYIN|BC_F_SUSER },
516 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
517 BC_F_COPYIN|BC_F_COPYOUT },
518 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
519 BC_F_COPYIN|BC_F_SUSER },
521 { bridge_ioctl_scache, sizeof(struct ifbrparam),
522 BC_F_COPYIN|BC_F_SUSER },
523 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
524 BC_F_COPYOUT },
526 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
527 BC_F_COPYIN|BC_F_COPYOUT },
528 { bridge_ioctl_rts, sizeof(struct ifbaconf),
529 BC_F_COPYIN|BC_F_COPYOUT },
531 { bridge_ioctl_saddr, sizeof(struct ifbareq),
532 BC_F_COPYIN|BC_F_SUSER },
534 { bridge_ioctl_sto, sizeof(struct ifbrparam),
535 BC_F_COPYIN|BC_F_SUSER },
536 { bridge_ioctl_gto, sizeof(struct ifbrparam),
537 BC_F_COPYOUT },
539 { bridge_ioctl_daddr, sizeof(struct ifbareq),
540 BC_F_COPYIN|BC_F_SUSER },
542 { bridge_ioctl_flush, sizeof(struct ifbreq),
543 BC_F_COPYIN|BC_F_SUSER },
545 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
546 BC_F_COPYOUT },
547 { bridge_ioctl_spri, sizeof(struct ifbrparam),
548 BC_F_COPYIN|BC_F_SUSER },
550 { bridge_ioctl_ght, sizeof(struct ifbrparam),
551 BC_F_COPYOUT },
552 { bridge_ioctl_sht, sizeof(struct ifbrparam),
553 BC_F_COPYIN|BC_F_SUSER },
555 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
556 BC_F_COPYOUT },
557 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
558 BC_F_COPYIN|BC_F_SUSER },
560 { bridge_ioctl_gma, sizeof(struct ifbrparam),
561 BC_F_COPYOUT },
562 { bridge_ioctl_sma, sizeof(struct ifbrparam),
563 BC_F_COPYIN|BC_F_SUSER },
565 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
566 BC_F_COPYIN|BC_F_SUSER },
568 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
569 BC_F_COPYIN|BC_F_SUSER },
571 { bridge_ioctl_addspan, sizeof(struct ifbreq),
572 BC_F_COPYIN|BC_F_SUSER },
573 { bridge_ioctl_delspan, sizeof(struct ifbreq),
574 BC_F_COPYIN|BC_F_SUSER },
576 static const int bridge_control_table_size =
577 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
579 LIST_HEAD(, bridge_softc) bridge_list;
581 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
582 bridge_clone_create,
583 bridge_clone_destroy, 0, IF_MAXUNIT);
585 static int
586 bridge_modevent(module_t mod, int type, void *data)
588 switch (type) {
589 case MOD_LOAD:
590 LIST_INIT(&bridge_list);
591 if_clone_attach(&bridge_cloner);
592 bridge_input_p = bridge_input;
593 bridge_output_p = bridge_output;
594 bridge_detach_cookie = EVENTHANDLER_REGISTER(
595 ifnet_detach_event, bridge_ifdetach, NULL,
596 EVENTHANDLER_PRI_ANY);
597 #if notyet
598 bstp_linkstate_p = bstp_linkstate;
599 #endif
600 break;
601 case MOD_UNLOAD:
602 if (!LIST_EMPTY(&bridge_list))
603 return (EBUSY);
604 EVENTHANDLER_DEREGISTER(ifnet_detach_event,
605 bridge_detach_cookie);
606 if_clone_detach(&bridge_cloner);
607 bridge_input_p = NULL;
608 bridge_output_p = NULL;
609 #if notyet
610 bstp_linkstate_p = NULL;
611 #endif
612 break;
613 default:
614 return (EOPNOTSUPP);
616 return (0);
619 static moduledata_t bridge_mod = {
620 "if_bridge",
621 bridge_modevent,
625 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
629 * bridge_clone_create:
631 * Create a new bridge instance.
633 static int
634 bridge_clone_create(struct if_clone *ifc, int unit)
636 struct bridge_softc *sc;
637 struct ifnet *ifp;
638 u_char eaddr[6];
639 int cpu, rnd;
641 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
642 ifp = sc->sc_ifp = &sc->sc_if;
644 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
645 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
646 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
647 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
648 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
649 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
650 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
652 /* Initialize our routing table. */
653 bridge_rtable_init(sc);
655 callout_init(&sc->sc_brcallout);
656 netmsg_init(&sc->sc_brtimemsg, NULL, &netisr_adone_rport,
657 MSGF_DROPABLE, bridge_timer_handler);
658 sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
660 callout_init(&sc->sc_bstpcallout);
661 netmsg_init(&sc->sc_bstptimemsg, NULL, &netisr_adone_rport,
662 MSGF_DROPABLE, bstp_tick_handler);
663 sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
665 /* Initialize per-cpu member iface lists */
666 sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
667 M_DEVBUF, M_WAITOK);
668 for (cpu = 0; cpu < ncpus; ++cpu)
669 LIST_INIT(&sc->sc_iflists[cpu]);
671 LIST_INIT(&sc->sc_spanlist);
673 ifp->if_softc = sc;
674 if_initname(ifp, ifc->ifc_name, unit);
675 ifp->if_mtu = ETHERMTU;
676 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
677 ifp->if_ioctl = bridge_ioctl;
678 ifp->if_start = bridge_start;
679 ifp->if_init = bridge_init;
680 ifp->if_type = IFT_BRIDGE;
681 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
682 ifq_set_ready(&ifp->if_snd);
683 ifp->if_hdrlen = ETHER_HDR_LEN;
686 * Generate a random ethernet address and use the private AC:DE:48
687 * OUI code.
689 rnd = karc4random();
690 bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
691 rnd = karc4random();
692 bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
694 eaddr[0] &= ~1; /* clear multicast bit */
695 eaddr[0] |= 2; /* set the LAA bit */
697 ether_ifattach(ifp, eaddr, NULL);
698 /* Now undo some of the damage... */
699 ifp->if_baudrate = 0;
700 ifp->if_type = IFT_BRIDGE;
702 crit_enter(); /* XXX MP */
703 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
704 crit_exit();
706 return (0);
709 static void
710 bridge_delete_dispatch(struct netmsg *nmsg)
712 struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
713 struct bridge_softc *sc = lmsg->u.ms_resultp;
714 struct ifnet *bifp = sc->sc_ifp;
715 struct bridge_iflist *bif;
717 ifnet_serialize_all(bifp);
719 while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
720 bridge_delete_member(sc, bif, 0);
722 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
723 bridge_delete_span(sc, bif);
725 ifnet_deserialize_all(bifp);
727 lwkt_replymsg(lmsg, 0);
731 * bridge_clone_destroy:
733 * Destroy a bridge instance.
735 static void
736 bridge_clone_destroy(struct ifnet *ifp)
738 struct bridge_softc *sc = ifp->if_softc;
739 struct lwkt_msg *lmsg;
740 struct netmsg nmsg;
742 ifnet_serialize_all(ifp);
744 bridge_stop(ifp);
745 ifp->if_flags &= ~IFF_UP;
747 ifnet_deserialize_all(ifp);
749 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
750 0, bridge_delete_dispatch);
751 lmsg = &nmsg.nm_lmsg;
752 lmsg->u.ms_resultp = sc;
753 lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
755 crit_enter(); /* XXX MP */
756 LIST_REMOVE(sc, sc_list);
757 crit_exit();
759 ether_ifdetach(ifp);
761 /* Tear down the routing table. */
762 bridge_rtable_fini(sc);
764 /* Free per-cpu member iface lists */
765 kfree(sc->sc_iflists, M_DEVBUF);
767 kfree(sc, M_DEVBUF);
771 * bridge_ioctl:
773 * Handle a control request from the operator.
775 static int
776 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
778 struct bridge_softc *sc = ifp->if_softc;
779 struct bridge_control_arg args;
780 struct ifdrv *ifd = (struct ifdrv *) data;
781 const struct bridge_control *bc;
782 int error = 0;
784 ASSERT_IFNET_SERIALIZED_ALL(ifp);
786 switch (cmd) {
787 case SIOCADDMULTI:
788 case SIOCDELMULTI:
789 break;
791 case SIOCGDRVSPEC:
792 case SIOCSDRVSPEC:
793 if (ifd->ifd_cmd >= bridge_control_table_size) {
794 error = EINVAL;
795 break;
797 bc = &bridge_control_table[ifd->ifd_cmd];
799 if (cmd == SIOCGDRVSPEC &&
800 (bc->bc_flags & BC_F_COPYOUT) == 0) {
801 error = EINVAL;
802 break;
803 } else if (cmd == SIOCSDRVSPEC &&
804 (bc->bc_flags & BC_F_COPYOUT)) {
805 error = EINVAL;
806 break;
809 if (bc->bc_flags & BC_F_SUSER) {
810 error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
811 if (error)
812 break;
815 if (ifd->ifd_len != bc->bc_argsize ||
816 ifd->ifd_len > sizeof(args.bca_u)) {
817 error = EINVAL;
818 break;
821 memset(&args, 0, sizeof(args));
822 if (bc->bc_flags & BC_F_COPYIN) {
823 error = copyin(ifd->ifd_data, &args.bca_u,
824 ifd->ifd_len);
825 if (error)
826 break;
829 error = bridge_control(sc, cmd, bc->bc_func, &args);
830 if (error) {
831 KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
832 break;
835 if (bc->bc_flags & BC_F_COPYOUT) {
836 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
837 if (args.bca_len != 0) {
838 KKASSERT(args.bca_kptr != NULL);
839 if (!error) {
840 error = copyout(args.bca_kptr,
841 args.bca_uptr, args.bca_len);
843 kfree(args.bca_kptr, M_TEMP);
844 } else {
845 KKASSERT(args.bca_kptr == NULL);
847 } else {
848 KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
850 break;
852 case SIOCSIFFLAGS:
853 if (!(ifp->if_flags & IFF_UP) &&
854 (ifp->if_flags & IFF_RUNNING)) {
856 * If interface is marked down and it is running,
857 * then stop it.
859 bridge_stop(ifp);
860 } else if ((ifp->if_flags & IFF_UP) &&
861 !(ifp->if_flags & IFF_RUNNING)) {
863 * If interface is marked up and it is stopped, then
864 * start it.
866 ifp->if_init(sc);
868 break;
870 case SIOCSIFMTU:
871 /* Do not allow the MTU to be changed on the bridge */
872 error = EINVAL;
873 break;
875 default:
876 error = ether_ioctl(ifp, cmd, data);
877 break;
879 return (error);
883 * bridge_mutecaps:
885 * Clear or restore unwanted capabilities on the member interface
887 static void
888 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
890 struct ifreq ifr;
891 int error;
893 if (ifp->if_ioctl == NULL)
894 return;
896 bzero(&ifr, sizeof(ifr));
897 ifr.ifr_reqcap = ifp->if_capenable;
899 if (mute) {
900 /* mask off and save capabilities */
901 bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
902 if (bif_info->bifi_mutecap != 0)
903 ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
904 } else {
905 /* restore muted capabilities */
906 ifr.ifr_reqcap |= bif_info->bifi_mutecap;
909 if (bif_info->bifi_mutecap != 0) {
910 ifnet_serialize_all(ifp);
911 error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
912 ifnet_deserialize_all(ifp);
917 * bridge_lookup_member:
919 * Lookup a bridge member interface.
921 static struct bridge_iflist *
922 bridge_lookup_member(struct bridge_softc *sc, const char *name)
924 struct bridge_iflist *bif;
926 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
927 if (strcmp(bif->bif_ifp->if_xname, name) == 0)
928 return (bif);
930 return (NULL);
934 * bridge_lookup_member_if:
936 * Lookup a bridge member interface by ifnet*.
938 static struct bridge_iflist *
939 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
941 struct bridge_iflist *bif;
943 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
944 if (bif->bif_ifp == member_ifp)
945 return (bif);
947 return (NULL);
951 * bridge_lookup_member_ifinfo:
953 * Lookup a bridge member interface by bridge_ifinfo.
955 static struct bridge_iflist *
956 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
957 struct bridge_ifinfo *bif_info)
959 struct bridge_iflist *bif;
961 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
962 if (bif->bif_info == bif_info)
963 return (bif);
965 return (NULL);
969 * bridge_delete_member:
971 * Delete the specified member interface.
973 static void
974 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
975 int gone)
977 struct ifnet *ifs = bif->bif_ifp;
978 struct ifnet *bifp = sc->sc_ifp;
979 struct bridge_ifinfo *bif_info = bif->bif_info;
980 struct bridge_iflist_head saved_bifs;
982 ASSERT_IFNET_SERIALIZED_ALL(bifp);
983 KKASSERT(bif_info != NULL);
985 ifs->if_bridge = NULL;
988 * Release bridge interface's serializer:
989 * - To avoid possible dead lock.
990 * - Various sync operation will block the current thread.
992 ifnet_deserialize_all(bifp);
994 if (!gone) {
995 switch (ifs->if_type) {
996 case IFT_ETHER:
997 case IFT_L2VLAN:
999 * Take the interface out of promiscuous mode.
1001 ifpromisc(ifs, 0);
1002 bridge_mutecaps(bif_info, ifs, 0);
1003 break;
1005 case IFT_GIF:
1006 break;
1008 default:
1009 panic("bridge_delete_member: impossible");
1010 break;
1015 * Remove bifs from percpu linked list.
1017 * Removed bifs are not freed immediately, instead,
1018 * they are saved in saved_bifs. They will be freed
1019 * after we make sure that no one is accessing them,
1020 * i.e. after following netmsg_service_sync()
1022 LIST_INIT(&saved_bifs);
1023 bridge_del_bif(sc, bif_info, &saved_bifs);
1026 * Make sure that all protocol threads:
1027 * o see 'ifs' if_bridge is changed
1028 * o know that bif is removed from the percpu linked list
1030 netmsg_service_sync();
1033 * Free the removed bifs
1035 KKASSERT(!LIST_EMPTY(&saved_bifs));
1036 while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1037 LIST_REMOVE(bif, bif_next);
1038 kfree(bif, M_DEVBUF);
1041 /* See the comment in bridge_ioctl_stop() */
1042 bridge_rtmsg_sync(sc);
1043 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1045 ifnet_serialize_all(bifp);
1047 if (bifp->if_flags & IFF_RUNNING)
1048 bstp_initialization(sc);
1051 * Free the bif_info after bstp_initialization(), so that
1052 * bridge_softc.sc_root_port will not reference a dangling
1053 * pointer.
1055 kfree(bif_info, M_DEVBUF);
1059 * bridge_delete_span:
1061 * Delete the specified span interface.
1063 static void
1064 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1066 KASSERT(bif->bif_ifp->if_bridge == NULL,
1067 ("%s: not a span interface", __func__));
1069 LIST_REMOVE(bif, bif_next);
1070 kfree(bif, M_DEVBUF);
1073 static int
1074 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1076 struct ifnet *ifp = sc->sc_ifp;
1078 if (ifp->if_flags & IFF_RUNNING)
1079 return 0;
1081 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1082 bridge_timer, sc);
1084 ifp->if_flags |= IFF_RUNNING;
1085 bstp_initialization(sc);
1086 return 0;
1089 static int
1090 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1092 struct ifnet *ifp = sc->sc_ifp;
1093 struct lwkt_msg *lmsg;
1095 if ((ifp->if_flags & IFF_RUNNING) == 0)
1096 return 0;
1098 callout_stop(&sc->sc_brcallout);
1100 crit_enter();
1101 lmsg = &sc->sc_brtimemsg.nm_lmsg;
1102 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1103 /* Pending to be processed; drop it */
1104 lwkt_dropmsg(lmsg);
1106 crit_exit();
1108 bstp_stop(sc);
1110 ifp->if_flags &= ~IFF_RUNNING;
1112 ifnet_deserialize_all(ifp);
1114 /* Let everyone know that we are stopped */
1115 netmsg_service_sync();
1118 * Sync ifnetX msgports in the order we forward rtnode
1119 * installation message. This is used to make sure that
1120 * all rtnode installation messages sent by bridge_rtupdate()
1121 * during above netmsg_service_sync() are flushed.
1123 bridge_rtmsg_sync(sc);
1124 bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1126 ifnet_serialize_all(ifp);
1127 return 0;
1130 static int
1131 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1133 struct ifbreq *req = arg;
1134 struct bridge_iflist *bif;
1135 struct bridge_ifinfo *bif_info;
1136 struct ifnet *ifs, *bifp;
1137 int error = 0;
1139 bifp = sc->sc_ifp;
1140 ASSERT_IFNET_SERIALIZED_ALL(bifp);
1142 ifs = ifunit(req->ifbr_ifsname);
1143 if (ifs == NULL)
1144 return (ENOENT);
1146 /* If it's in the span list, it can't be a member. */
1147 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1148 if (ifs == bif->bif_ifp)
1149 return (EBUSY);
1151 /* Allow the first Ethernet member to define the MTU */
1152 if (ifs->if_type != IFT_GIF) {
1153 if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1154 bifp->if_mtu = ifs->if_mtu;
1155 } else if (bifp->if_mtu != ifs->if_mtu) {
1156 if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1157 return (EINVAL);
1161 if (ifs->if_bridge == sc)
1162 return (EEXIST);
1164 if (ifs->if_bridge != NULL)
1165 return (EBUSY);
1167 bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1168 bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1169 bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1170 bif_info->bifi_ifp = ifs;
1173 * Release bridge interface's serializer:
1174 * - To avoid possible dead lock.
1175 * - Various sync operation will block the current thread.
1177 ifnet_deserialize_all(bifp);
1179 switch (ifs->if_type) {
1180 case IFT_ETHER:
1181 case IFT_L2VLAN:
1183 * Place the interface into promiscuous mode.
1185 error = ifpromisc(ifs, 1);
1186 if (error) {
1187 ifnet_serialize_all(bifp);
1188 goto out;
1190 bridge_mutecaps(bif_info, ifs, 1);
1191 break;
1193 case IFT_GIF: /* :^) */
1194 break;
1196 default:
1197 error = EINVAL;
1198 ifnet_serialize_all(bifp);
1199 goto out;
1203 * Add bifs to percpu linked lists
1205 bridge_add_bif(sc, bif_info, ifs);
1207 ifnet_serialize_all(bifp);
1209 if (bifp->if_flags & IFF_RUNNING)
1210 bstp_initialization(sc);
1211 else
1212 bstp_stop(sc);
1215 * Everything has been setup, so let the member interface
1216 * deliver packets to this bridge on its input/output path.
1218 ifs->if_bridge = sc;
1219 out:
1220 if (error) {
1221 if (bif_info != NULL)
1222 kfree(bif_info, M_DEVBUF);
1224 return (error);
1227 static int
1228 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1230 struct ifbreq *req = arg;
1231 struct bridge_iflist *bif;
1233 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1234 if (bif == NULL)
1235 return (ENOENT);
1237 bridge_delete_member(sc, bif, 0);
1239 return (0);
1242 static int
1243 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1245 struct ifbreq *req = arg;
1246 struct bridge_iflist *bif;
1248 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1249 if (bif == NULL)
1250 return (ENOENT);
1252 req->ifbr_ifsflags = bif->bif_flags;
1253 req->ifbr_state = bif->bif_state;
1254 req->ifbr_priority = bif->bif_priority;
1255 req->ifbr_path_cost = bif->bif_path_cost;
1256 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1258 return (0);
1261 static int
1262 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1264 struct ifbreq *req = arg;
1265 struct bridge_iflist *bif;
1266 struct ifnet *bifp = sc->sc_ifp;
1268 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1269 if (bif == NULL)
1270 return (ENOENT);
1272 if (req->ifbr_ifsflags & IFBIF_SPAN) {
1273 /* SPAN is readonly */
1274 return (EINVAL);
1277 if (req->ifbr_ifsflags & IFBIF_STP) {
1278 switch (bif->bif_ifp->if_type) {
1279 case IFT_ETHER:
1280 /* These can do spanning tree. */
1281 break;
1283 default:
1284 /* Nothing else can. */
1285 return (EINVAL);
1289 ifnet_deserialize_all(bifp);
1290 bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1291 ifnet_serialize_all(bifp);
1293 if (bifp->if_flags & IFF_RUNNING)
1294 bstp_initialization(sc);
1296 return (0);
1299 static int
1300 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1302 struct ifbrparam *param = arg;
1303 struct ifnet *ifp = sc->sc_ifp;
1305 sc->sc_brtmax = param->ifbrp_csize;
1307 ifnet_deserialize_all(ifp);
1308 bridge_rttrim(sc);
1309 ifnet_serialize_all(ifp);
1311 return (0);
1314 static int
1315 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1317 struct ifbrparam *param = arg;
1319 param->ifbrp_csize = sc->sc_brtmax;
1321 return (0);
1324 static int
1325 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1327 struct bridge_control_arg *bc_arg = arg;
1328 struct ifbifconf *bifc = arg;
1329 struct bridge_iflist *bif;
1330 struct ifbreq *breq;
1331 int count, len;
1333 count = 0;
1334 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1335 count++;
1336 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1337 count++;
1339 if (bifc->ifbic_len == 0) {
1340 bifc->ifbic_len = sizeof(*breq) * count;
1341 return 0;
1342 } else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1343 bifc->ifbic_len = 0;
1344 return 0;
1347 len = min(bifc->ifbic_len, sizeof(*breq) * count);
1348 KKASSERT(len >= sizeof(*breq));
1350 breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1351 if (breq == NULL) {
1352 bifc->ifbic_len = 0;
1353 return ENOMEM;
1355 bc_arg->bca_kptr = breq;
1357 count = 0;
1358 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1359 if (len < sizeof(*breq))
1360 break;
1362 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1363 sizeof(breq->ifbr_ifsname));
1364 breq->ifbr_ifsflags = bif->bif_flags;
1365 breq->ifbr_state = bif->bif_state;
1366 breq->ifbr_priority = bif->bif_priority;
1367 breq->ifbr_path_cost = bif->bif_path_cost;
1368 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1369 breq++;
1370 count++;
1371 len -= sizeof(*breq);
1373 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1374 if (len < sizeof(*breq))
1375 break;
1377 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1378 sizeof(breq->ifbr_ifsname));
1379 breq->ifbr_ifsflags = bif->bif_flags;
1380 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1381 breq++;
1382 count++;
1383 len -= sizeof(*breq);
1386 bifc->ifbic_len = sizeof(*breq) * count;
1387 KKASSERT(bifc->ifbic_len > 0);
1389 bc_arg->bca_len = bifc->ifbic_len;
1390 bc_arg->bca_uptr = bifc->ifbic_req;
1391 return 0;
1394 static int
1395 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1397 struct bridge_control_arg *bc_arg = arg;
1398 struct ifbaconf *bac = arg;
1399 struct bridge_rtnode *brt;
1400 struct ifbareq *bareq;
1401 int count, len;
1403 count = 0;
1404 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1405 count++;
1407 if (bac->ifbac_len == 0) {
1408 bac->ifbac_len = sizeof(*bareq) * count;
1409 return 0;
1410 } else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1411 bac->ifbac_len = 0;
1412 return 0;
1415 len = min(bac->ifbac_len, sizeof(*bareq) * count);
1416 KKASSERT(len >= sizeof(*bareq));
1418 bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1419 if (bareq == NULL) {
1420 bac->ifbac_len = 0;
1421 return ENOMEM;
1423 bc_arg->bca_kptr = bareq;
1425 count = 0;
1426 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1427 struct bridge_rtinfo *bri = brt->brt_info;
1428 unsigned long expire;
1430 if (len < sizeof(*bareq))
1431 break;
1433 strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1434 sizeof(bareq->ifba_ifsname));
1435 memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1436 expire = bri->bri_expire;
1437 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1438 time_second < expire)
1439 bareq->ifba_expire = expire - time_second;
1440 else
1441 bareq->ifba_expire = 0;
1442 bareq->ifba_flags = bri->bri_flags;
1443 bareq++;
1444 count++;
1445 len -= sizeof(*bareq);
1448 bac->ifbac_len = sizeof(*bareq) * count;
1449 KKASSERT(bac->ifbac_len > 0);
1451 bc_arg->bca_len = bac->ifbac_len;
1452 bc_arg->bca_uptr = bac->ifbac_req;
1453 return 0;
1456 static int
1457 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1459 struct ifbareq *req = arg;
1460 struct bridge_iflist *bif;
1461 struct ifnet *ifp = sc->sc_ifp;
1462 int error;
1464 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1466 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1467 if (bif == NULL)
1468 return (ENOENT);
1470 ifnet_deserialize_all(ifp);
1471 error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1472 req->ifba_flags);
1473 ifnet_serialize_all(ifp);
1474 return (error);
1477 static int
1478 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1480 struct ifbrparam *param = arg;
1482 sc->sc_brttimeout = param->ifbrp_ctime;
1484 return (0);
1487 static int
1488 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1490 struct ifbrparam *param = arg;
1492 param->ifbrp_ctime = sc->sc_brttimeout;
1494 return (0);
1497 static int
1498 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1500 struct ifbareq *req = arg;
1501 struct ifnet *ifp = sc->sc_ifp;
1502 int error;
1504 ifnet_deserialize_all(ifp);
1505 error = bridge_rtdaddr(sc, req->ifba_dst);
1506 ifnet_serialize_all(ifp);
1507 return error;
1510 static int
1511 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1513 struct ifbreq *req = arg;
1514 struct ifnet *ifp = sc->sc_ifp;
1516 ifnet_deserialize_all(ifp);
1517 bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1518 ifnet_serialize_all(ifp);
1520 return (0);
1523 static int
1524 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1526 struct ifbrparam *param = arg;
1528 param->ifbrp_prio = sc->sc_bridge_priority;
1530 return (0);
1533 static int
1534 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1536 struct ifbrparam *param = arg;
1538 sc->sc_bridge_priority = param->ifbrp_prio;
1540 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1541 bstp_initialization(sc);
1543 return (0);
1546 static int
1547 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1549 struct ifbrparam *param = arg;
1551 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1553 return (0);
1556 static int
1557 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1559 struct ifbrparam *param = arg;
1561 if (param->ifbrp_hellotime == 0)
1562 return (EINVAL);
1563 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1565 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1566 bstp_initialization(sc);
1568 return (0);
1571 static int
1572 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1574 struct ifbrparam *param = arg;
1576 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1578 return (0);
1581 static int
1582 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1584 struct ifbrparam *param = arg;
1586 if (param->ifbrp_fwddelay == 0)
1587 return (EINVAL);
1588 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1590 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1591 bstp_initialization(sc);
1593 return (0);
1596 static int
1597 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1599 struct ifbrparam *param = arg;
1601 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1603 return (0);
1606 static int
1607 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1609 struct ifbrparam *param = arg;
1611 if (param->ifbrp_maxage == 0)
1612 return (EINVAL);
1613 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1615 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1616 bstp_initialization(sc);
1618 return (0);
1621 static int
1622 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1624 struct ifbreq *req = arg;
1625 struct bridge_iflist *bif;
1627 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1628 if (bif == NULL)
1629 return (ENOENT);
1631 bif->bif_priority = req->ifbr_priority;
1633 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1634 bstp_initialization(sc);
1636 return (0);
1639 static int
1640 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1642 struct ifbreq *req = arg;
1643 struct bridge_iflist *bif;
1645 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1646 if (bif == NULL)
1647 return (ENOENT);
1649 bif->bif_path_cost = req->ifbr_path_cost;
1651 if (sc->sc_ifp->if_flags & IFF_RUNNING)
1652 bstp_initialization(sc);
1654 return (0);
1657 static int
1658 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1660 struct ifbreq *req = arg;
1661 struct bridge_iflist *bif;
1662 struct ifnet *ifs;
1664 ifs = ifunit(req->ifbr_ifsname);
1665 if (ifs == NULL)
1666 return (ENOENT);
1668 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1669 if (ifs == bif->bif_ifp)
1670 return (EBUSY);
1672 if (ifs->if_bridge != NULL)
1673 return (EBUSY);
1675 switch (ifs->if_type) {
1676 case IFT_ETHER:
1677 case IFT_GIF:
1678 case IFT_L2VLAN:
1679 break;
1681 default:
1682 return (EINVAL);
1685 bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1686 bif->bif_ifp = ifs;
1687 bif->bif_flags = IFBIF_SPAN;
1688 /* NOTE: span bif does not need bridge_ifinfo */
1690 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1692 sc->sc_span = 1;
1694 return (0);
1697 static int
1698 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1700 struct ifbreq *req = arg;
1701 struct bridge_iflist *bif;
1702 struct ifnet *ifs;
1704 ifs = ifunit(req->ifbr_ifsname);
1705 if (ifs == NULL)
1706 return (ENOENT);
1708 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1709 if (ifs == bif->bif_ifp)
1710 break;
1712 if (bif == NULL)
1713 return (ENOENT);
1715 bridge_delete_span(sc, bif);
1717 if (LIST_EMPTY(&sc->sc_spanlist))
1718 sc->sc_span = 0;
1720 return (0);
1723 static void
1724 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1726 struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1727 struct ifnet *ifp, *bifp;
1728 struct bridge_softc *sc;
1729 struct bridge_iflist *bif;
1731 ifp = lmsg->u.ms_resultp;
1732 sc = ifp->if_bridge;
1734 /* Check if the interface is a bridge member */
1735 if (sc != NULL) {
1736 bifp = sc->sc_ifp;
1738 ifnet_serialize_all(bifp);
1740 bif = bridge_lookup_member_if(sc, ifp);
1741 if (bif != NULL) {
1742 bridge_delete_member(sc, bif, 1);
1743 } else {
1744 /* XXX Why bif will be NULL? */
1747 ifnet_deserialize_all(bifp);
1748 goto reply;
1751 crit_enter(); /* XXX MP */
1753 /* Check if the interface is a span port */
1754 LIST_FOREACH(sc, &bridge_list, sc_list) {
1755 bifp = sc->sc_ifp;
1757 ifnet_serialize_all(bifp);
1759 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1760 if (ifp == bif->bif_ifp) {
1761 bridge_delete_span(sc, bif);
1762 break;
1765 ifnet_deserialize_all(bifp);
1768 crit_exit();
1770 reply:
1771 lwkt_replymsg(lmsg, 0);
1775 * bridge_ifdetach:
1777 * Detach an interface from a bridge. Called when a member
1778 * interface is detaching.
1780 static void
1781 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1783 struct lwkt_msg *lmsg;
1784 struct netmsg nmsg;
1786 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1787 0, bridge_ifdetach_dispatch);
1788 lmsg = &nmsg.nm_lmsg;
1789 lmsg->u.ms_resultp = ifp;
1791 lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1795 * bridge_init:
1797 * Initialize a bridge interface.
1799 static void
1800 bridge_init(void *xsc)
1802 bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1806 * bridge_stop:
1808 * Stop the bridge interface.
1810 static void
1811 bridge_stop(struct ifnet *ifp)
1813 bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1817 * bridge_enqueue:
1819 * Enqueue a packet on a bridge member interface.
1822 void
1823 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1825 struct netmsg_packet *nmp;
1827 nmp = &m->m_hdr.mh_netmsg;
1828 netmsg_init(&nmp->nm_netmsg, NULL, &netisr_apanic_rport,
1829 0, bridge_enqueue_handler);
1830 nmp->nm_packet = m;
1831 nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1833 lwkt_sendmsg(curnetport, &nmp->nm_netmsg.nm_lmsg);
1837 * bridge_output:
1839 * Send output from a bridge member interface. This
1840 * performs the bridging function for locally originated
1841 * packets.
1843 * The mbuf has the Ethernet header already attached. We must
1844 * enqueue or free the mbuf before returning.
1846 static int
1847 bridge_output(struct ifnet *ifp, struct mbuf *m)
1849 struct bridge_softc *sc = ifp->if_bridge;
1850 struct ether_header *eh;
1851 struct ifnet *dst_if, *bifp;
1853 ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
1856 * Make sure that we are still a member of a bridge interface.
1858 if (sc == NULL) {
1859 m_freem(m);
1860 return (0);
1862 bifp = sc->sc_ifp;
1864 if (m->m_len < ETHER_HDR_LEN) {
1865 m = m_pullup(m, ETHER_HDR_LEN);
1866 if (m == NULL)
1867 return (0);
1869 eh = mtod(m, struct ether_header *);
1872 * If bridge is down, but the original output interface is up,
1873 * go ahead and send out that interface. Otherwise, the packet
1874 * is dropped below.
1876 if ((bifp->if_flags & IFF_RUNNING) == 0) {
1877 dst_if = ifp;
1878 goto sendunicast;
1882 * If the packet is a multicast, or we don't know a better way to
1883 * get there, send to all interfaces.
1885 if (ETHER_IS_MULTICAST(eh->ether_dhost))
1886 dst_if = NULL;
1887 else
1888 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1889 if (dst_if == NULL) {
1890 struct bridge_iflist *bif, *nbif;
1891 struct mbuf *mc;
1892 int used = 0;
1894 if (sc->sc_span)
1895 bridge_span(sc, m);
1897 LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1898 bif_next, nbif) {
1899 dst_if = bif->bif_ifp;
1900 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1901 continue;
1904 * If this is not the original output interface,
1905 * and the interface is participating in spanning
1906 * tree, make sure the port is in a state that
1907 * allows forwarding.
1909 if (dst_if != ifp &&
1910 (bif->bif_flags & IFBIF_STP) != 0) {
1911 switch (bif->bif_state) {
1912 case BSTP_IFSTATE_BLOCKING:
1913 case BSTP_IFSTATE_LISTENING:
1914 case BSTP_IFSTATE_DISABLED:
1915 continue;
1919 if (LIST_NEXT(bif, bif_next) == NULL) {
1920 used = 1;
1921 mc = m;
1922 } else {
1923 mc = m_copypacket(m, MB_DONTWAIT);
1924 if (mc == NULL) {
1925 bifp->if_oerrors++;
1926 continue;
1929 bridge_handoff(dst_if, mc);
1931 if (nbif != NULL && !nbif->bif_onlist) {
1932 KKASSERT(bif->bif_onlist);
1933 nbif = LIST_NEXT(bif, bif_next);
1936 if (used == 0)
1937 m_freem(m);
1938 return (0);
1941 sendunicast:
1943 * XXX Spanning tree consideration here?
1945 if (sc->sc_span)
1946 bridge_span(sc, m);
1947 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1948 m_freem(m);
1949 else
1950 bridge_handoff(dst_if, m);
1951 return (0);
1955 * bridge_start:
1957 * Start output on a bridge.
1960 static void
1961 bridge_start(struct ifnet *ifp)
1963 struct bridge_softc *sc = ifp->if_softc;
1965 ASSERT_IFNET_SERIALIZED_TX(ifp);
1967 ifp->if_flags |= IFF_OACTIVE;
1968 for (;;) {
1969 struct ifnet *dst_if = NULL;
1970 struct ether_header *eh;
1971 struct mbuf *m;
1973 m = ifq_dequeue(&ifp->if_snd, NULL);
1974 if (m == NULL)
1975 break;
1977 if (m->m_len < sizeof(*eh)) {
1978 m = m_pullup(m, sizeof(*eh));
1979 if (m == NULL) {
1980 ifp->if_oerrors++;
1981 continue;
1984 eh = mtod(m, struct ether_header *);
1986 BPF_MTAP(ifp, m);
1987 ifp->if_opackets++;
1989 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1990 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1992 if (dst_if == NULL)
1993 bridge_start_bcast(sc, m);
1994 else
1995 bridge_enqueue(dst_if, m);
1997 ifp->if_flags &= ~IFF_OACTIVE;
2001 * bridge_forward:
2003 * The forwarding function of the bridge.
2005 static void
2006 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2008 struct bridge_iflist *bif;
2009 struct ifnet *src_if, *dst_if, *ifp;
2010 struct ether_header *eh;
2012 src_if = m->m_pkthdr.rcvif;
2013 ifp = sc->sc_ifp;
2015 ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2017 ifp->if_ipackets++;
2018 ifp->if_ibytes += m->m_pkthdr.len;
2021 * Look up the bridge_iflist.
2023 bif = bridge_lookup_member_if(sc, src_if);
2024 if (bif == NULL) {
2025 /* Interface is not a bridge member (anymore?) */
2026 m_freem(m);
2027 return;
2030 if (bif->bif_flags & IFBIF_STP) {
2031 switch (bif->bif_state) {
2032 case BSTP_IFSTATE_BLOCKING:
2033 case BSTP_IFSTATE_LISTENING:
2034 case BSTP_IFSTATE_DISABLED:
2035 m_freem(m);
2036 return;
2040 eh = mtod(m, struct ether_header *);
2043 * If the interface is learning, and the source
2044 * address is valid and not multicast, record
2045 * the address.
2047 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2048 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2049 (eh->ether_shost[0] == 0 &&
2050 eh->ether_shost[1] == 0 &&
2051 eh->ether_shost[2] == 0 &&
2052 eh->ether_shost[3] == 0 &&
2053 eh->ether_shost[4] == 0 &&
2054 eh->ether_shost[5] == 0) == 0)
2055 bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2057 if ((bif->bif_flags & IFBIF_STP) != 0 &&
2058 bif->bif_state == BSTP_IFSTATE_LEARNING) {
2059 m_freem(m);
2060 return;
2064 * At this point, the port either doesn't participate
2065 * in spanning tree or it is in the forwarding state.
2069 * If the packet is unicast, destined for someone on
2070 * "this" side of the bridge, drop it.
2072 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2073 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2074 if (src_if == dst_if) {
2075 m_freem(m);
2076 return;
2078 } else {
2079 /* ...forward it to all interfaces. */
2080 ifp->if_imcasts++;
2081 dst_if = NULL;
2084 if (dst_if == NULL) {
2085 bridge_broadcast(sc, src_if, m);
2086 return;
2090 * At this point, we're dealing with a unicast frame
2091 * going to a different interface.
2093 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2094 m_freem(m);
2095 return;
2097 bif = bridge_lookup_member_if(sc, dst_if);
2098 if (bif == NULL) {
2099 /* Not a member of the bridge (anymore?) */
2100 m_freem(m);
2101 return;
2104 if (bif->bif_flags & IFBIF_STP) {
2105 switch (bif->bif_state) {
2106 case BSTP_IFSTATE_DISABLED:
2107 case BSTP_IFSTATE_BLOCKING:
2108 m_freem(m);
2109 return;
2113 if (inet_pfil_hook.ph_hashooks > 0
2114 #ifdef INET6
2115 || inet6_pfil_hook.ph_hashooks > 0
2116 #endif
2118 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2119 return;
2120 if (m == NULL)
2121 return;
2123 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2124 return;
2125 if (m == NULL)
2126 return;
2128 bridge_handoff(dst_if, m);
2132 * bridge_input:
2134 * Receive input from a member interface. Queue the packet for
2135 * bridging if it is not for us.
2137 static struct mbuf *
2138 bridge_input(struct ifnet *ifp, struct mbuf *m)
2140 struct bridge_softc *sc = ifp->if_bridge;
2141 struct bridge_iflist *bif;
2142 struct ifnet *bifp, *new_ifp;
2143 struct ether_header *eh;
2144 struct mbuf *mc, *mc2;
2146 ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2149 * Make sure that we are still a member of a bridge interface.
2151 if (sc == NULL)
2152 return m;
2154 new_ifp = NULL;
2155 bifp = sc->sc_ifp;
2157 if ((bifp->if_flags & IFF_RUNNING) == 0)
2158 goto out;
2161 * Implement support for bridge monitoring. If this flag has been
2162 * set on this interface, discard the packet once we push it through
2163 * the bpf(4) machinery, but before we do, increment various counters
2164 * associated with this bridge.
2166 if (bifp->if_flags & IFF_MONITOR) {
2167 /* Change input interface to this bridge */
2168 m->m_pkthdr.rcvif = bifp;
2170 BPF_MTAP(bifp, m);
2172 /* Update bridge's ifnet statistics */
2173 bifp->if_ipackets++;
2174 bifp->if_ibytes += m->m_pkthdr.len;
2175 if (m->m_flags & (M_MCAST | M_BCAST))
2176 bifp->if_imcasts++;
2178 m_freem(m);
2179 m = NULL;
2180 goto out;
2183 eh = mtod(m, struct ether_header *);
2185 if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2187 * If the packet is for us, set the packets source as the
2188 * bridge, and return the packet back to ifnet.if_input for
2189 * local processing.
2191 KASSERT(bifp->if_bridge == NULL,
2192 ("loop created in bridge_input"));
2193 new_ifp = bifp;
2194 goto out;
2198 * Tap all packets arriving on the bridge, no matter if
2199 * they are local destinations or not. In is in.
2201 BPF_MTAP(bifp, m);
2203 bif = bridge_lookup_member_if(sc, ifp);
2204 if (bif == NULL)
2205 goto out;
2207 if (sc->sc_span)
2208 bridge_span(sc, m);
2210 if (m->m_flags & (M_BCAST | M_MCAST)) {
2211 /* Tap off 802.1D packets; they do not get forwarded. */
2212 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2213 ETHER_ADDR_LEN) == 0) {
2214 ifnet_serialize_all(bifp);
2215 bstp_input(sc, bif, m);
2216 ifnet_deserialize_all(bifp);
2218 /* m is freed by bstp_input */
2219 m = NULL;
2220 goto out;
2223 if (bif->bif_flags & IFBIF_STP) {
2224 switch (bif->bif_state) {
2225 case BSTP_IFSTATE_BLOCKING:
2226 case BSTP_IFSTATE_LISTENING:
2227 case BSTP_IFSTATE_DISABLED:
2228 goto out;
2233 * Make a deep copy of the packet and enqueue the copy
2234 * for bridge processing; return the original packet for
2235 * local processing.
2237 mc = m_dup(m, MB_DONTWAIT);
2238 if (mc == NULL)
2239 goto out;
2241 bridge_forward(sc, mc);
2244 * Reinject the mbuf as arriving on the bridge so we have a
2245 * chance at claiming multicast packets. We can not loop back
2246 * here from ether_input as a bridge is never a member of a
2247 * bridge.
2249 KASSERT(bifp->if_bridge == NULL,
2250 ("loop created in bridge_input"));
2251 mc2 = m_dup(m, MB_DONTWAIT);
2252 #ifdef notyet
2253 if (mc2 != NULL) {
2254 /* Keep the layer3 header aligned */
2255 int i = min(mc2->m_pkthdr.len, max_protohdr);
2256 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2258 #endif
2259 if (mc2 != NULL) {
2261 * Don't tap to bpf(4) again; we have
2262 * already done the tapping.
2264 ether_reinput_oncpu(bifp, mc2, 0);
2267 /* Return the original packet for local processing. */
2268 goto out;
2271 if (bif->bif_flags & IFBIF_STP) {
2272 switch (bif->bif_state) {
2273 case BSTP_IFSTATE_BLOCKING:
2274 case BSTP_IFSTATE_LISTENING:
2275 case BSTP_IFSTATE_DISABLED:
2276 goto out;
2281 * Unicast. Make sure it's not for us.
2283 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2284 * is followed by breaking out of the loop.
2286 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2287 if (bif->bif_ifp->if_type != IFT_ETHER)
2288 continue;
2290 /* It is destined for us. */
2291 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2292 ETHER_ADDR_LEN) == 0) {
2293 if (bif->bif_ifp != ifp) {
2294 /* XXX loop prevention */
2295 m->m_flags |= M_ETHER_BRIDGED;
2296 new_ifp = bif->bif_ifp;
2298 if (bif->bif_flags & IFBIF_LEARNING) {
2299 bridge_rtupdate(sc, eh->ether_shost,
2300 ifp, IFBAF_DYNAMIC);
2302 goto out;
2305 /* We just received a packet that we sent out. */
2306 if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2307 ETHER_ADDR_LEN) == 0) {
2308 m_freem(m);
2309 m = NULL;
2310 goto out;
2314 /* Perform the bridge forwarding function. */
2315 bridge_forward(sc, m);
2316 m = NULL;
2317 out:
2318 if (new_ifp != NULL) {
2319 ether_reinput_oncpu(new_ifp, m, 1);
2320 m = NULL;
2322 return (m);
2326 * bridge_start_bcast:
2328 * Broadcast the packet sent from bridge to all member
2329 * interfaces.
2330 * This is a simplified version of bridge_broadcast(), however,
2331 * this function expects caller to hold bridge's serializer.
2333 static void
2334 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2336 struct bridge_iflist *bif;
2337 struct mbuf *mc;
2338 struct ifnet *dst_if, *bifp;
2339 int used = 0;
2341 bifp = sc->sc_ifp;
2342 ASSERT_IFNET_SERIALIZED_ALL(bifp);
2345 * Following loop is MPSAFE; nothing is blocking
2346 * in the loop body.
2348 LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2349 dst_if = bif->bif_ifp;
2351 if (bif->bif_flags & IFBIF_STP) {
2352 switch (bif->bif_state) {
2353 case BSTP_IFSTATE_BLOCKING:
2354 case BSTP_IFSTATE_DISABLED:
2355 continue;
2359 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2360 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2361 continue;
2363 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2364 continue;
2366 if (LIST_NEXT(bif, bif_next) == NULL) {
2367 mc = m;
2368 used = 1;
2369 } else {
2370 mc = m_copypacket(m, MB_DONTWAIT);
2371 if (mc == NULL) {
2372 bifp->if_oerrors++;
2373 continue;
2376 bridge_enqueue(dst_if, mc);
2378 if (used == 0)
2379 m_freem(m);
2383 * bridge_broadcast:
2385 * Send a frame to all interfaces that are members of
2386 * the bridge, except for the one on which the packet
2387 * arrived.
2389 static void
2390 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2391 struct mbuf *m)
2393 struct bridge_iflist *bif, *nbif;
2394 struct mbuf *mc;
2395 struct ifnet *dst_if, *bifp;
2396 int used = 0;
2398 bifp = sc->sc_ifp;
2399 ASSERT_IFNET_NOT_SERIALIZED_ALL(bifp);
2401 if (inet_pfil_hook.ph_hashooks > 0
2402 #ifdef INET6
2403 || inet6_pfil_hook.ph_hashooks > 0
2404 #endif
2406 if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2407 return;
2408 if (m == NULL)
2409 return;
2411 /* Filter on the bridge interface before broadcasting */
2412 if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2413 return;
2414 if (m == NULL)
2415 return;
2418 LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2419 dst_if = bif->bif_ifp;
2420 if (dst_if == src_if)
2421 continue;
2423 if (bif->bif_flags & IFBIF_STP) {
2424 switch (bif->bif_state) {
2425 case BSTP_IFSTATE_BLOCKING:
2426 case BSTP_IFSTATE_DISABLED:
2427 continue;
2431 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2432 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2433 continue;
2435 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2436 continue;
2438 if (LIST_NEXT(bif, bif_next) == NULL) {
2439 mc = m;
2440 used = 1;
2441 } else {
2442 mc = m_copypacket(m, MB_DONTWAIT);
2443 if (mc == NULL) {
2444 sc->sc_ifp->if_oerrors++;
2445 continue;
2450 * Filter on the output interface. Pass a NULL bridge
2451 * interface pointer so we do not redundantly filter on
2452 * the bridge for each interface we broadcast on.
2454 if (inet_pfil_hook.ph_hashooks > 0
2455 #ifdef INET6
2456 || inet6_pfil_hook.ph_hashooks > 0
2457 #endif
2459 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2460 continue;
2461 if (mc == NULL)
2462 continue;
2464 bridge_handoff(dst_if, mc);
2466 if (nbif != NULL && !nbif->bif_onlist) {
2467 KKASSERT(bif->bif_onlist);
2468 nbif = LIST_NEXT(bif, bif_next);
2471 if (used == 0)
2472 m_freem(m);
2476 * bridge_span:
2478 * Duplicate a packet out one or more interfaces that are in span mode,
2479 * the original mbuf is unmodified.
2481 static void
2482 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2484 struct bridge_iflist *bif;
2485 struct ifnet *dst_if, *bifp;
2486 struct mbuf *mc;
2488 bifp = sc->sc_ifp;
2489 ifnet_serialize_all(bifp);
2491 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2492 dst_if = bif->bif_ifp;
2494 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2495 continue;
2497 mc = m_copypacket(m, MB_DONTWAIT);
2498 if (mc == NULL) {
2499 sc->sc_ifp->if_oerrors++;
2500 continue;
2502 bridge_enqueue(dst_if, mc);
2505 ifnet_deserialize_all(bifp);
2508 static void
2509 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2511 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2514 static void
2515 bridge_rtmsg_sync(struct bridge_softc *sc)
2517 struct netmsg nmsg;
2519 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2521 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2522 0, bridge_rtmsg_sync_handler);
2523 ifnet_domsg(&nmsg.nm_lmsg, 0);
2526 static __inline void
2527 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2528 int setflags, uint8_t flags, uint32_t timeo)
2530 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2531 bri->bri_ifp != dst_if)
2532 bri->bri_ifp = dst_if;
2533 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2534 bri->bri_expire != time_second + timeo)
2535 bri->bri_expire = time_second + timeo;
2536 if (setflags)
2537 bri->bri_flags = flags;
2540 static int
2541 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2542 struct ifnet *dst_if, int setflags, uint8_t flags,
2543 struct bridge_rtinfo **bri0)
2545 struct bridge_rtnode *brt;
2546 struct bridge_rtinfo *bri;
2548 if (mycpuid == 0) {
2549 brt = bridge_rtnode_lookup(sc, dst);
2550 if (brt != NULL) {
2552 * rtnode for 'dst' already exists. We inform the
2553 * caller about this by leaving bri0 as NULL. The
2554 * caller will terminate the intallation upon getting
2555 * NULL bri0. However, we still need to update the
2556 * rtinfo.
2558 KKASSERT(*bri0 == NULL);
2560 /* Update rtinfo */
2561 bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2562 flags, sc->sc_brttimeout);
2563 return 0;
2567 * We only need to check brtcnt on CPU0, since if limit
2568 * is to be exceeded, ENOSPC is returned. Caller knows
2569 * this and will terminate the installation.
2571 if (sc->sc_brtcnt >= sc->sc_brtmax)
2572 return ENOSPC;
2574 KKASSERT(*bri0 == NULL);
2575 bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2576 M_WAITOK | M_ZERO);
2577 *bri0 = bri;
2579 /* Setup rtinfo */
2580 bri->bri_flags = IFBAF_DYNAMIC;
2581 bridge_rtinfo_update(bri, dst_if, setflags, flags,
2582 sc->sc_brttimeout);
2583 } else {
2584 bri = *bri0;
2585 KKASSERT(bri != NULL);
2588 brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2589 M_WAITOK | M_ZERO);
2590 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2591 brt->brt_info = bri;
2593 bridge_rtnode_insert(sc, brt);
2594 return 0;
2597 static void
2598 bridge_rtinstall_handler(struct netmsg *nmsg)
2600 struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2601 int error;
2603 error = bridge_rtinstall_oncpu(brmsg->br_softc,
2604 brmsg->br_dst, brmsg->br_dst_if,
2605 brmsg->br_setflags, brmsg->br_flags,
2606 &brmsg->br_rtinfo);
2607 if (error) {
2608 KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2609 lwkt_replymsg(&nmsg->nm_lmsg, error);
2610 return;
2611 } else if (brmsg->br_rtinfo == NULL) {
2612 /* rtnode already exists for 'dst' */
2613 KKASSERT(mycpuid == 0);
2614 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2615 return;
2617 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2621 * bridge_rtupdate:
2623 * Add/Update a bridge routing entry.
2625 static int
2626 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2627 struct ifnet *dst_if, uint8_t flags)
2629 struct bridge_rtnode *brt;
2632 * A route for this destination might already exist. If so,
2633 * update it, otherwise create a new one.
2635 if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2636 struct netmsg_brsaddr *brmsg;
2638 if (sc->sc_brtcnt >= sc->sc_brtmax)
2639 return ENOSPC;
2641 brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2642 if (brmsg == NULL)
2643 return ENOMEM;
2645 netmsg_init(&brmsg->br_nmsg, NULL, &netisr_afree_rport,
2646 0, bridge_rtinstall_handler);
2647 memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2648 brmsg->br_dst_if = dst_if;
2649 brmsg->br_flags = flags;
2650 brmsg->br_setflags = 0;
2651 brmsg->br_softc = sc;
2652 brmsg->br_rtinfo = NULL;
2654 ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2655 return 0;
2657 bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2658 sc->sc_brttimeout);
2659 return 0;
2662 static int
2663 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2664 struct ifnet *dst_if, uint8_t flags)
2666 struct netmsg_brsaddr brmsg;
2668 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2670 netmsg_init(&brmsg.br_nmsg, NULL, &curthread->td_msgport,
2671 0, bridge_rtinstall_handler);
2672 memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2673 brmsg.br_dst_if = dst_if;
2674 brmsg.br_flags = flags;
2675 brmsg.br_setflags = 1;
2676 brmsg.br_softc = sc;
2677 brmsg.br_rtinfo = NULL;
2679 return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2683 * bridge_rtlookup:
2685 * Lookup the destination interface for an address.
2687 static struct ifnet *
2688 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2690 struct bridge_rtnode *brt;
2692 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2693 return NULL;
2694 return brt->brt_info->bri_ifp;
2697 static void
2698 bridge_rtreap_handler(struct netmsg *nmsg)
2700 struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2701 struct bridge_rtnode *brt, *nbrt;
2703 LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2704 if (brt->brt_info->bri_dead)
2705 bridge_rtnode_destroy(sc, brt);
2707 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2710 static void
2711 bridge_rtreap(struct bridge_softc *sc)
2713 struct netmsg nmsg;
2715 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2717 netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2718 0, bridge_rtreap_handler);
2719 nmsg.nm_lmsg.u.ms_resultp = sc;
2721 ifnet_domsg(&nmsg.nm_lmsg, 0);
2724 static void
2725 bridge_rtreap_async(struct bridge_softc *sc)
2727 struct netmsg *nmsg;
2729 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
2731 netmsg_init(nmsg, NULL, &netisr_afree_rport,
2732 0, bridge_rtreap_handler);
2733 nmsg->nm_lmsg.u.ms_resultp = sc;
2735 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
2739 * bridge_rttrim:
2741 * Trim the routine table so that we have a number
2742 * of routing entries less than or equal to the
2743 * maximum number.
2745 static void
2746 bridge_rttrim(struct bridge_softc *sc)
2748 struct bridge_rtnode *brt;
2749 int dead;
2751 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2753 /* Make sure we actually need to do this. */
2754 if (sc->sc_brtcnt <= sc->sc_brtmax)
2755 return;
2758 * Find out how many rtnodes are dead
2760 dead = bridge_rtage_finddead(sc);
2761 KKASSERT(dead <= sc->sc_brtcnt);
2763 if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2764 /* Enough dead rtnodes are found */
2765 bridge_rtreap(sc);
2766 return;
2770 * Kill some dynamic rtnodes to meet the brtmax
2772 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2773 struct bridge_rtinfo *bri = brt->brt_info;
2775 if (bri->bri_dead) {
2777 * We have counted this rtnode in
2778 * bridge_rtage_finddead()
2780 continue;
2783 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2784 bri->bri_dead = 1;
2785 ++dead;
2786 KKASSERT(dead <= sc->sc_brtcnt);
2788 if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2789 /* Enough rtnodes are collected */
2790 break;
2794 if (dead)
2795 bridge_rtreap(sc);
2799 * bridge_timer:
2801 * Aging timer for the bridge.
2803 static void
2804 bridge_timer(void *arg)
2806 struct bridge_softc *sc = arg;
2807 struct lwkt_msg *lmsg;
2809 KKASSERT(mycpuid == BRIDGE_CFGCPU);
2811 crit_enter();
2813 if (callout_pending(&sc->sc_brcallout) ||
2814 !callout_active(&sc->sc_brcallout)) {
2815 crit_exit();
2816 return;
2818 callout_deactivate(&sc->sc_brcallout);
2820 lmsg = &sc->sc_brtimemsg.nm_lmsg;
2821 KKASSERT(lmsg->ms_flags & MSGF_DONE);
2822 lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2824 crit_exit();
2827 static void
2828 bridge_timer_handler(struct netmsg *nmsg)
2830 struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2832 KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2834 crit_enter();
2835 /* Reply ASAP */
2836 lwkt_replymsg(&nmsg->nm_lmsg, 0);
2837 crit_exit();
2839 bridge_rtage(sc);
2840 if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2841 callout_reset(&sc->sc_brcallout,
2842 bridge_rtable_prune_period * hz, bridge_timer, sc);
2846 static int
2847 bridge_rtage_finddead(struct bridge_softc *sc)
2849 struct bridge_rtnode *brt;
2850 int dead = 0;
2852 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2853 struct bridge_rtinfo *bri = brt->brt_info;
2855 if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2856 time_second >= bri->bri_expire) {
2857 bri->bri_dead = 1;
2858 ++dead;
2859 KKASSERT(dead <= sc->sc_brtcnt);
2862 return dead;
2866 * bridge_rtage:
2868 * Perform an aging cycle.
2870 static void
2871 bridge_rtage(struct bridge_softc *sc)
2873 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2875 if (bridge_rtage_finddead(sc))
2876 bridge_rtreap(sc);
2880 * bridge_rtflush:
2882 * Remove all dynamic addresses from the bridge.
2884 static void
2885 bridge_rtflush(struct bridge_softc *sc, int bf)
2887 struct bridge_rtnode *brt;
2888 int reap;
2890 reap = 0;
2891 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2892 struct bridge_rtinfo *bri = brt->brt_info;
2894 if ((bf & IFBF_FLUSHALL) ||
2895 (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2896 bri->bri_dead = 1;
2897 reap = 1;
2900 if (reap) {
2901 if (bf & IFBF_FLUSHSYNC)
2902 bridge_rtreap(sc);
2903 else
2904 bridge_rtreap_async(sc);
2909 * bridge_rtdaddr:
2911 * Remove an address from the table.
2913 static int
2914 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2916 struct bridge_rtnode *brt;
2918 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2920 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2921 return (ENOENT);
2923 /* TODO: add a cheaper delete operation */
2924 brt->brt_info->bri_dead = 1;
2925 bridge_rtreap(sc);
2926 return (0);
2930 * bridge_rtdelete:
2932 * Delete routes to a speicifc member interface.
2934 void
2935 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2937 struct bridge_rtnode *brt;
2938 int reap;
2940 reap = 0;
2941 LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2942 struct bridge_rtinfo *bri = brt->brt_info;
2944 if (bri->bri_ifp == ifp &&
2945 ((bf & IFBF_FLUSHALL) ||
2946 (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2947 bri->bri_dead = 1;
2948 reap = 1;
2951 if (reap) {
2952 if (bf & IFBF_FLUSHSYNC)
2953 bridge_rtreap(sc);
2954 else
2955 bridge_rtreap_async(sc);
2960 * bridge_rtable_init:
2962 * Initialize the route table for this bridge.
2964 static void
2965 bridge_rtable_init(struct bridge_softc *sc)
2967 int cpu;
2970 * Initialize per-cpu hash tables
2972 sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2973 M_DEVBUF, M_WAITOK);
2974 for (cpu = 0; cpu < ncpus; ++cpu) {
2975 int i;
2977 sc->sc_rthashs[cpu] =
2978 kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2979 M_DEVBUF, M_WAITOK);
2981 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2982 LIST_INIT(&sc->sc_rthashs[cpu][i]);
2984 sc->sc_rthash_key = karc4random();
2987 * Initialize per-cpu lists
2989 sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2990 M_DEVBUF, M_WAITOK);
2991 for (cpu = 0; cpu < ncpus; ++cpu)
2992 LIST_INIT(&sc->sc_rtlists[cpu]);
2996 * bridge_rtable_fini:
2998 * Deconstruct the route table for this bridge.
3000 static void
3001 bridge_rtable_fini(struct bridge_softc *sc)
3003 int cpu;
3006 * Free per-cpu hash tables
3008 for (cpu = 0; cpu < ncpus; ++cpu)
3009 kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3010 kfree(sc->sc_rthashs, M_DEVBUF);
3013 * Free per-cpu lists
3015 kfree(sc->sc_rtlists, M_DEVBUF);
3019 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3020 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3022 #define mix(a, b, c) \
3023 do { \
3024 a -= b; a -= c; a ^= (c >> 13); \
3025 b -= c; b -= a; b ^= (a << 8); \
3026 c -= a; c -= b; c ^= (b >> 13); \
3027 a -= b; a -= c; a ^= (c >> 12); \
3028 b -= c; b -= a; b ^= (a << 16); \
3029 c -= a; c -= b; c ^= (b >> 5); \
3030 a -= b; a -= c; a ^= (c >> 3); \
3031 b -= c; b -= a; b ^= (a << 10); \
3032 c -= a; c -= b; c ^= (b >> 15); \
3033 } while (/*CONSTCOND*/0)
3035 static __inline uint32_t
3036 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3038 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3040 b += addr[5] << 8;
3041 b += addr[4];
3042 a += addr[3] << 24;
3043 a += addr[2] << 16;
3044 a += addr[1] << 8;
3045 a += addr[0];
3047 mix(a, b, c);
3049 return (c & BRIDGE_RTHASH_MASK);
3052 #undef mix
3054 static int
3055 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3057 int i, d;
3059 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3060 d = ((int)a[i]) - ((int)b[i]);
3063 return (d);
3067 * bridge_rtnode_lookup:
3069 * Look up a bridge route node for the specified destination.
3071 static struct bridge_rtnode *
3072 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3074 struct bridge_rtnode *brt;
3075 uint32_t hash;
3076 int dir;
3078 hash = bridge_rthash(sc, addr);
3079 LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3080 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3081 if (dir == 0)
3082 return (brt);
3083 if (dir > 0)
3084 return (NULL);
3087 return (NULL);
3091 * bridge_rtnode_insert:
3093 * Insert the specified bridge node into the route table.
3094 * Caller has to make sure that rtnode does not exist.
3096 static void
3097 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3099 struct bridge_rtnode *lbrt;
3100 uint32_t hash;
3101 int dir;
3103 hash = bridge_rthash(sc, brt->brt_addr);
3105 lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3106 if (lbrt == NULL) {
3107 LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3108 goto out;
3111 do {
3112 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3113 KASSERT(dir != 0, ("rtnode already exist\n"));
3115 if (dir > 0) {
3116 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3117 goto out;
3119 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3120 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3121 goto out;
3123 lbrt = LIST_NEXT(lbrt, brt_hash);
3124 } while (lbrt != NULL);
3126 panic("no suitable position found for rtnode\n");
3127 out:
3128 LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3129 if (mycpuid == 0) {
3131 * Update the brtcnt.
3132 * We only need to do it once and we do it on CPU0.
3134 sc->sc_brtcnt++;
3139 * bridge_rtnode_destroy:
3141 * Destroy a bridge rtnode.
3143 static void
3144 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3146 LIST_REMOVE(brt, brt_hash);
3147 LIST_REMOVE(brt, brt_list);
3149 if (mycpuid + 1 == ncpus) {
3150 /* Free rtinfo associated with rtnode on the last cpu */
3151 kfree(brt->brt_info, M_DEVBUF);
3153 kfree(brt, M_DEVBUF);
3155 if (mycpuid == 0) {
3156 /* Update brtcnt only on CPU0 */
3157 sc->sc_brtcnt--;
3161 static __inline int
3162 bridge_post_pfil(struct mbuf *m)
3164 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3165 return EOPNOTSUPP;
3167 /* Not yet */
3168 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3169 return EOPNOTSUPP;
3171 return 0;
3175 * Send bridge packets through pfil if they are one of the types pfil can deal
3176 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3177 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3178 * that interface.
3180 static int
3181 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3183 int snap, error, i, hlen;
3184 struct ether_header *eh1, eh2;
3185 struct ip *ip;
3186 struct llc llc1;
3187 u_int16_t ether_type;
3189 snap = 0;
3190 error = -1; /* Default error if not error == 0 */
3192 if (pfil_bridge == 0 && pfil_member == 0)
3193 return (0); /* filtering is disabled */
3195 i = min((*mp)->m_pkthdr.len, max_protohdr);
3196 if ((*mp)->m_len < i) {
3197 *mp = m_pullup(*mp, i);
3198 if (*mp == NULL) {
3199 kprintf("%s: m_pullup failed\n", __func__);
3200 return (-1);
3204 eh1 = mtod(*mp, struct ether_header *);
3205 ether_type = ntohs(eh1->ether_type);
3208 * Check for SNAP/LLC.
3210 if (ether_type < ETHERMTU) {
3211 struct llc *llc2 = (struct llc *)(eh1 + 1);
3213 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3214 llc2->llc_dsap == LLC_SNAP_LSAP &&
3215 llc2->llc_ssap == LLC_SNAP_LSAP &&
3216 llc2->llc_control == LLC_UI) {
3217 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3218 snap = 1;
3223 * If we're trying to filter bridge traffic, don't look at anything
3224 * other than IP and ARP traffic. If the filter doesn't understand
3225 * IPv6, don't allow IPv6 through the bridge either. This is lame
3226 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3227 * but of course we don't have an AppleTalk filter to begin with.
3228 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3229 * ARP traffic.)
3231 switch (ether_type) {
3232 case ETHERTYPE_ARP:
3233 case ETHERTYPE_REVARP:
3234 return (0); /* Automatically pass */
3236 case ETHERTYPE_IP:
3237 #ifdef INET6
3238 case ETHERTYPE_IPV6:
3239 #endif /* INET6 */
3240 break;
3242 default:
3244 * Check to see if the user wants to pass non-ip
3245 * packets, these will not be checked by pfil(9)
3246 * and passed unconditionally so the default is to drop.
3248 if (pfil_onlyip)
3249 goto bad;
3252 /* Strip off the Ethernet header and keep a copy. */
3253 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3254 m_adj(*mp, ETHER_HDR_LEN);
3256 /* Strip off snap header, if present */
3257 if (snap) {
3258 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3259 m_adj(*mp, sizeof(struct llc));
3263 * Check the IP header for alignment and errors
3265 if (dir == PFIL_IN) {
3266 switch (ether_type) {
3267 case ETHERTYPE_IP:
3268 error = bridge_ip_checkbasic(mp);
3269 break;
3270 #ifdef INET6
3271 case ETHERTYPE_IPV6:
3272 error = bridge_ip6_checkbasic(mp);
3273 break;
3274 #endif /* INET6 */
3275 default:
3276 error = 0;
3278 if (error)
3279 goto bad;
3282 error = 0;
3285 * Run the packet through pfil
3287 switch (ether_type) {
3288 case ETHERTYPE_IP:
3290 * before calling the firewall, swap fields the same as
3291 * IP does. here we assume the header is contiguous
3293 ip = mtod(*mp, struct ip *);
3295 ip->ip_len = ntohs(ip->ip_len);
3296 ip->ip_off = ntohs(ip->ip_off);
3299 * Run pfil on the member interface and the bridge, both can
3300 * be skipped by clearing pfil_member or pfil_bridge.
3302 * Keep the order:
3303 * in_if -> bridge_if -> out_if
3305 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3306 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3307 if (*mp == NULL || error != 0) /* filter may consume */
3308 break;
3309 error = bridge_post_pfil(*mp);
3310 if (error)
3311 break;
3314 if (pfil_member && ifp != NULL) {
3315 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3316 if (*mp == NULL || error != 0) /* filter may consume */
3317 break;
3318 error = bridge_post_pfil(*mp);
3319 if (error)
3320 break;
3323 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3324 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3325 if (*mp == NULL || error != 0) /* filter may consume */
3326 break;
3327 error = bridge_post_pfil(*mp);
3328 if (error)
3329 break;
3332 /* check if we need to fragment the packet */
3333 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3334 i = (*mp)->m_pkthdr.len;
3335 if (i > ifp->if_mtu) {
3336 error = bridge_fragment(ifp, *mp, &eh2, snap,
3337 &llc1);
3338 return (error);
3342 /* Recalculate the ip checksum and restore byte ordering */
3343 ip = mtod(*mp, struct ip *);
3344 hlen = ip->ip_hl << 2;
3345 if (hlen < sizeof(struct ip))
3346 goto bad;
3347 if (hlen > (*mp)->m_len) {
3348 if ((*mp = m_pullup(*mp, hlen)) == 0)
3349 goto bad;
3350 ip = mtod(*mp, struct ip *);
3351 if (ip == NULL)
3352 goto bad;
3354 ip->ip_len = htons(ip->ip_len);
3355 ip->ip_off = htons(ip->ip_off);
3356 ip->ip_sum = 0;
3357 if (hlen == sizeof(struct ip))
3358 ip->ip_sum = in_cksum_hdr(ip);
3359 else
3360 ip->ip_sum = in_cksum(*mp, hlen);
3362 break;
3363 #ifdef INET6
3364 case ETHERTYPE_IPV6:
3365 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3366 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3367 dir);
3369 if (*mp == NULL || error != 0) /* filter may consume */
3370 break;
3372 if (pfil_member && ifp != NULL)
3373 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3374 dir);
3376 if (*mp == NULL || error != 0) /* filter may consume */
3377 break;
3379 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3380 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3381 dir);
3382 break;
3383 #endif
3384 default:
3385 error = 0;
3386 break;
3389 if (*mp == NULL)
3390 return (error);
3391 if (error != 0)
3392 goto bad;
3394 error = -1;
3397 * Finally, put everything back the way it was and return
3399 if (snap) {
3400 M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3401 if (*mp == NULL)
3402 return (error);
3403 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3406 M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3407 if (*mp == NULL)
3408 return (error);
3409 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3411 return (0);
3413 bad:
3414 m_freem(*mp);
3415 *mp = NULL;
3416 return (error);
3420 * Perform basic checks on header size since
3421 * pfil assumes ip_input has already processed
3422 * it for it. Cut-and-pasted from ip_input.c.
3423 * Given how simple the IPv6 version is,
3424 * does the IPv4 version really need to be
3425 * this complicated?
3427 * XXX Should we update ipstat here, or not?
3428 * XXX Right now we update ipstat but not
3429 * XXX csum_counter.
3431 static int
3432 bridge_ip_checkbasic(struct mbuf **mp)
3434 struct mbuf *m = *mp;
3435 struct ip *ip;
3436 int len, hlen;
3437 u_short sum;
3439 if (*mp == NULL)
3440 return (-1);
3441 #if notyet
3442 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3443 if ((m = m_copyup(m, sizeof(struct ip),
3444 (max_linkhdr + 3) & ~3)) == NULL) {
3445 /* XXXJRT new stat, please */
3446 ipstat.ips_toosmall++;
3447 goto bad;
3449 } else
3450 #endif
3451 #ifndef __predict_false
3452 #define __predict_false(x) x
3453 #endif
3454 if (__predict_false(m->m_len < sizeof (struct ip))) {
3455 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3456 ipstat.ips_toosmall++;
3457 goto bad;
3460 ip = mtod(m, struct ip *);
3461 if (ip == NULL) goto bad;
3463 if (ip->ip_v != IPVERSION) {
3464 ipstat.ips_badvers++;
3465 goto bad;
3467 hlen = ip->ip_hl << 2;
3468 if (hlen < sizeof(struct ip)) { /* minimum header length */
3469 ipstat.ips_badhlen++;
3470 goto bad;
3472 if (hlen > m->m_len) {
3473 if ((m = m_pullup(m, hlen)) == 0) {
3474 ipstat.ips_badhlen++;
3475 goto bad;
3477 ip = mtod(m, struct ip *);
3478 if (ip == NULL) goto bad;
3481 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3482 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3483 } else {
3484 if (hlen == sizeof(struct ip)) {
3485 sum = in_cksum_hdr(ip);
3486 } else {
3487 sum = in_cksum(m, hlen);
3490 if (sum) {
3491 ipstat.ips_badsum++;
3492 goto bad;
3495 /* Retrieve the packet length. */
3496 len = ntohs(ip->ip_len);
3499 * Check for additional length bogosity
3501 if (len < hlen) {
3502 ipstat.ips_badlen++;
3503 goto bad;
3507 * Check that the amount of data in the buffers
3508 * is as at least much as the IP header would have us expect.
3509 * Drop packet if shorter than we expect.
3511 if (m->m_pkthdr.len < len) {
3512 ipstat.ips_tooshort++;
3513 goto bad;
3516 /* Checks out, proceed */
3517 *mp = m;
3518 return (0);
3520 bad:
3521 *mp = m;
3522 return (-1);
3525 #ifdef INET6
3527 * Same as above, but for IPv6.
3528 * Cut-and-pasted from ip6_input.c.
3529 * XXX Should we update ip6stat, or not?
3531 static int
3532 bridge_ip6_checkbasic(struct mbuf **mp)
3534 struct mbuf *m = *mp;
3535 struct ip6_hdr *ip6;
3538 * If the IPv6 header is not aligned, slurp it up into a new
3539 * mbuf with space for link headers, in the event we forward
3540 * it. Otherwise, if it is aligned, make sure the entire base
3541 * IPv6 header is in the first mbuf of the chain.
3543 #if notyet
3544 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3545 struct ifnet *inifp = m->m_pkthdr.rcvif;
3546 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3547 (max_linkhdr + 3) & ~3)) == NULL) {
3548 /* XXXJRT new stat, please */
3549 ip6stat.ip6s_toosmall++;
3550 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3551 goto bad;
3553 } else
3554 #endif
3555 if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3556 struct ifnet *inifp = m->m_pkthdr.rcvif;
3557 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3558 ip6stat.ip6s_toosmall++;
3559 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3560 goto bad;
3564 ip6 = mtod(m, struct ip6_hdr *);
3566 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3567 ip6stat.ip6s_badvers++;
3568 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3569 goto bad;
3572 /* Checks out, proceed */
3573 *mp = m;
3574 return (0);
3576 bad:
3577 *mp = m;
3578 return (-1);
3580 #endif /* INET6 */
3583 * bridge_fragment:
3585 * Return a fragmented mbuf chain.
3587 static int
3588 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3589 int snap, struct llc *llc)
3591 struct mbuf *m0;
3592 struct ip *ip;
3593 int error = -1;
3595 if (m->m_len < sizeof(struct ip) &&
3596 (m = m_pullup(m, sizeof(struct ip))) == NULL)
3597 goto out;
3598 ip = mtod(m, struct ip *);
3600 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3601 CSUM_DELAY_IP);
3602 if (error)
3603 goto out;
3605 /* walk the chain and re-add the Ethernet header */
3606 for (m0 = m; m0; m0 = m0->m_nextpkt) {
3607 if (error == 0) {
3608 if (snap) {
3609 M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3610 if (m0 == NULL) {
3611 error = ENOBUFS;
3612 continue;
3614 bcopy(llc, mtod(m0, caddr_t),
3615 sizeof(struct llc));
3617 M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3618 if (m0 == NULL) {
3619 error = ENOBUFS;
3620 continue;
3622 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3623 } else
3624 m_freem(m);
3627 if (error == 0)
3628 ipstat.ips_fragmented++;
3630 return (error);
3632 out:
3633 if (m != NULL)
3634 m_freem(m);
3635 return (error);
3638 static void
3639 bridge_enqueue_handler(struct netmsg *nmsg)
3641 struct netmsg_packet *nmp;
3642 struct ifnet *dst_ifp;
3643 struct mbuf *m;
3645 nmp = (struct netmsg_packet *)nmsg;
3646 m = nmp->nm_packet;
3647 dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3649 bridge_handoff(dst_ifp, m);
3652 static void
3653 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3655 struct mbuf *m0;
3657 /* We may be sending a fragment so traverse the mbuf */
3658 for (; m; m = m0) {
3659 struct altq_pktattr pktattr;
3661 m0 = m->m_nextpkt;
3662 m->m_nextpkt = NULL;
3664 if (ifq_is_enabled(&dst_ifp->if_snd))
3665 altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3667 ifq_dispatch(dst_ifp, m, &pktattr);
3671 static void
3672 bridge_control_dispatch(struct netmsg *nmsg)
3674 struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3675 struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3676 int error;
3678 ifnet_serialize_all(bifp);
3679 error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3680 ifnet_deserialize_all(bifp);
3682 lwkt_replymsg(&nmsg->nm_lmsg, error);
3685 static int
3686 bridge_control(struct bridge_softc *sc, u_long cmd,
3687 bridge_ctl_t bc_func, void *bc_arg)
3689 struct ifnet *bifp = sc->sc_ifp;
3690 struct netmsg_brctl bc_msg;
3691 struct netmsg *nmsg;
3692 int error;
3694 ASSERT_IFNET_SERIALIZED_ALL(bifp);
3696 bzero(&bc_msg, sizeof(bc_msg));
3697 nmsg = &bc_msg.bc_nmsg;
3699 netmsg_init(nmsg, NULL, &curthread->td_msgport,
3700 0, bridge_control_dispatch);
3701 bc_msg.bc_func = bc_func;
3702 bc_msg.bc_sc = sc;
3703 bc_msg.bc_arg = bc_arg;
3705 ifnet_deserialize_all(bifp);
3706 error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3707 ifnet_serialize_all(bifp);
3708 return error;
3711 static void
3712 bridge_add_bif_handler(struct netmsg *nmsg)
3714 struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3715 struct bridge_softc *sc;
3716 struct bridge_iflist *bif;
3718 sc = amsg->br_softc;
3720 bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3721 bif->bif_ifp = amsg->br_bif_ifp;
3722 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3723 bif->bif_onlist = 1;
3724 bif->bif_info = amsg->br_bif_info;
3726 LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3728 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3731 static void
3732 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3733 struct ifnet *ifp)
3735 struct netmsg_braddbif amsg;
3737 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3739 netmsg_init(&amsg.br_nmsg, NULL, &curthread->td_msgport,
3740 0, bridge_add_bif_handler);
3741 amsg.br_softc = sc;
3742 amsg.br_bif_info = bif_info;
3743 amsg.br_bif_ifp = ifp;
3745 ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3748 static void
3749 bridge_del_bif_handler(struct netmsg *nmsg)
3751 struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3752 struct bridge_softc *sc;
3753 struct bridge_iflist *bif;
3755 sc = dmsg->br_softc;
3758 * Locate the bif associated with the br_bif_info
3759 * on the current CPU
3761 bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3762 KKASSERT(bif != NULL && bif->bif_onlist);
3764 /* Remove the bif from the current CPU's iflist */
3765 bif->bif_onlist = 0;
3766 LIST_REMOVE(bif, bif_next);
3768 /* Save the removed bif for later freeing */
3769 LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3771 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3774 static void
3775 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3776 struct bridge_iflist_head *saved_bifs)
3778 struct netmsg_brdelbif dmsg;
3780 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3782 netmsg_init(&dmsg.br_nmsg, NULL, &curthread->td_msgport,
3783 0, bridge_del_bif_handler);
3784 dmsg.br_softc = sc;
3785 dmsg.br_bif_info = bif_info;
3786 dmsg.br_bif_list = saved_bifs;
3788 ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3791 static void
3792 bridge_set_bifflags_handler(struct netmsg *nmsg)
3794 struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3795 struct bridge_softc *sc;
3796 struct bridge_iflist *bif;
3798 sc = smsg->br_softc;
3801 * Locate the bif associated with the br_bif_info
3802 * on the current CPU
3804 bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3805 KKASSERT(bif != NULL && bif->bif_onlist);
3807 bif->bif_flags = smsg->br_bif_flags;
3809 ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3812 static void
3813 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3814 uint32_t bif_flags)
3816 struct netmsg_brsflags smsg;
3818 ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3820 netmsg_init(&smsg.br_nmsg, NULL, &curthread->td_msgport,
3821 0, bridge_set_bifflags_handler);
3822 smsg.br_softc = sc;
3823 smsg.br_bif_info = bif_info;
3824 smsg.br_bif_flags = bif_flags;
3826 ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);