3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 * Lennert dedicates this file to Kerstin Wurdinger.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
37 #include <net/route.h>
39 #include <asm/uaccess.h>
40 #include "br_private.h"
42 #include <linux/sysctl.h>
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
51 static struct ctl_table_header
*brnf_sysctl_header
;
52 static int brnf_call_iptables __read_mostly
= 1;
53 static int brnf_call_ip6tables __read_mostly
= 1;
54 static int brnf_call_arptables __read_mostly
= 1;
55 static int brnf_filter_vlan_tagged __read_mostly
= 0;
56 static int brnf_filter_pppoe_tagged __read_mostly
= 0;
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
65 static inline __be16
vlan_proto(const struct sk_buff
*skb
)
67 if (vlan_tx_tag_present(skb
))
69 else if (skb
->protocol
== htons(ETH_P_8021Q
))
70 return vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
75 #define IS_VLAN_IP(skb) \
76 (vlan_proto(skb) == htons(ETH_P_IP) && \
77 brnf_filter_vlan_tagged)
79 #define IS_VLAN_IPV6(skb) \
80 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
81 brnf_filter_vlan_tagged)
83 #define IS_VLAN_ARP(skb) \
84 (vlan_proto(skb) == htons(ETH_P_ARP) && \
85 brnf_filter_vlan_tagged)
87 static inline __be16
pppoe_proto(const struct sk_buff
*skb
)
89 return *((__be16
*)(skb_mac_header(skb
) + ETH_HLEN
+
90 sizeof(struct pppoe_hdr
)));
93 #define IS_PPPOE_IP(skb) \
94 (skb->protocol == htons(ETH_P_PPP_SES) && \
95 pppoe_proto(skb) == htons(PPP_IP) && \
96 brnf_filter_pppoe_tagged)
98 #define IS_PPPOE_IPV6(skb) \
99 (skb->protocol == htons(ETH_P_PPP_SES) && \
100 pppoe_proto(skb) == htons(PPP_IPV6) && \
101 brnf_filter_pppoe_tagged)
103 static void fake_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
107 static struct dst_ops fake_dst_ops
= {
109 .protocol
= cpu_to_be16(ETH_P_IP
),
110 .update_pmtu
= fake_update_pmtu
,
114 * Initialize bogus route table used to keep netfilter happy.
115 * Currently, we fill in the PMTU entry because netfilter
116 * refragmentation needs it, and the rt_flags entry because
117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields.
120 static const u32 br_dst_default_metrics
[RTAX_MAX
] = {
121 [RTAX_MTU
- 1] = 1500,
124 void br_netfilter_rtable_init(struct net_bridge
*br
)
126 struct rtable
*rt
= &br
->fake_rtable
;
128 atomic_set(&rt
->dst
.__refcnt
, 1);
129 rt
->dst
.dev
= br
->dev
;
130 rt
->dst
.path
= &rt
->dst
;
131 dst_init_metrics(&rt
->dst
, br_dst_default_metrics
, true);
132 rt
->dst
.flags
= DST_NOXFRM
;
133 rt
->dst
.ops
= &fake_dst_ops
;
136 static inline struct rtable
*bridge_parent_rtable(const struct net_device
*dev
)
138 struct net_bridge_port
*port
;
140 port
= br_port_get_rcu(dev
);
141 return port
? &port
->br
->fake_rtable
: NULL
;
144 static inline struct net_device
*bridge_parent(const struct net_device
*dev
)
146 struct net_bridge_port
*port
;
148 port
= br_port_get_rcu(dev
);
149 return port
? port
->br
->dev
: NULL
;
152 static inline struct nf_bridge_info
*nf_bridge_alloc(struct sk_buff
*skb
)
154 skb
->nf_bridge
= kzalloc(sizeof(struct nf_bridge_info
), GFP_ATOMIC
);
155 if (likely(skb
->nf_bridge
))
156 atomic_set(&(skb
->nf_bridge
->use
), 1);
158 return skb
->nf_bridge
;
161 static inline struct nf_bridge_info
*nf_bridge_unshare(struct sk_buff
*skb
)
163 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
165 if (atomic_read(&nf_bridge
->use
) > 1) {
166 struct nf_bridge_info
*tmp
= nf_bridge_alloc(skb
);
169 memcpy(tmp
, nf_bridge
, sizeof(struct nf_bridge_info
));
170 atomic_set(&tmp
->use
, 1);
172 nf_bridge_put(nf_bridge
);
178 static inline void nf_bridge_push_encap_header(struct sk_buff
*skb
)
180 unsigned int len
= nf_bridge_encap_header_len(skb
);
183 skb
->network_header
-= len
;
186 static inline void nf_bridge_pull_encap_header(struct sk_buff
*skb
)
188 unsigned int len
= nf_bridge_encap_header_len(skb
);
191 skb
->network_header
+= len
;
194 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff
*skb
)
196 unsigned int len
= nf_bridge_encap_header_len(skb
);
198 skb_pull_rcsum(skb
, len
);
199 skb
->network_header
+= len
;
202 static inline void nf_bridge_save_header(struct sk_buff
*skb
)
204 int header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
206 skb_copy_from_linear_data_offset(skb
, -header_size
,
207 skb
->nf_bridge
->data
, header_size
);
210 static inline void nf_bridge_update_protocol(struct sk_buff
*skb
)
212 if (skb
->nf_bridge
->mask
& BRNF_8021Q
)
213 skb
->protocol
= htons(ETH_P_8021Q
);
214 else if (skb
->nf_bridge
->mask
& BRNF_PPPoE
)
215 skb
->protocol
= htons(ETH_P_PPP_SES
);
218 /* When handing a packet over to the IP layer
219 * check whether we have a skb that is in the
223 static int br_parse_ip_options(struct sk_buff
*skb
)
225 struct ip_options
*opt
;
226 const struct iphdr
*iph
;
227 struct net_device
*dev
= skb
->dev
;
231 opt
= &(IPCB(skb
)->opt
);
233 /* Basic sanity checks */
234 if (iph
->ihl
< 5 || iph
->version
!= 4)
237 if (!pskb_may_pull(skb
, iph
->ihl
*4))
241 if (unlikely(ip_fast_csum((u8
*)iph
, iph
->ihl
)))
244 len
= ntohs(iph
->tot_len
);
245 if (skb
->len
< len
) {
246 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INTRUNCATEDPKTS
);
248 } else if (len
< (iph
->ihl
*4))
251 if (pskb_trim_rcsum(skb
, len
)) {
252 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INDISCARDS
);
256 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
260 opt
->optlen
= iph
->ihl
*4 - sizeof(struct iphdr
);
261 if (ip_options_compile(dev_net(dev
), opt
, skb
))
264 /* Check correct handling of SRR option */
265 if (unlikely(opt
->srr
)) {
266 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
267 if (in_dev
&& !IN_DEV_SOURCE_ROUTE(in_dev
))
270 if (ip_options_rcv_srr(skb
))
277 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INHDRERRORS
);
282 /* Fill in the header for fragmented IP packets handled by
283 * the IPv4 connection tracking code.
285 int nf_bridge_copy_header(struct sk_buff
*skb
)
288 unsigned int header_size
;
290 nf_bridge_update_protocol(skb
);
291 header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
292 err
= skb_cow_head(skb
, header_size
);
296 skb_copy_to_linear_data_offset(skb
, -header_size
,
297 skb
->nf_bridge
->data
, header_size
);
298 __skb_push(skb
, nf_bridge_encap_header_len(skb
));
302 /* PF_BRIDGE/PRE_ROUTING *********************************************/
303 /* Undo the changes made for ip6tables PREROUTING and continue the
304 * bridge PRE_ROUTING hook. */
305 static int br_nf_pre_routing_finish_ipv6(struct sk_buff
*skb
)
307 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
310 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
311 skb
->pkt_type
= PACKET_OTHERHOST
;
312 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
314 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
316 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
321 skb_dst_set_noref(skb
, &rt
->dst
);
323 skb
->dev
= nf_bridge
->physindev
;
324 nf_bridge_update_protocol(skb
);
325 nf_bridge_push_encap_header(skb
);
326 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
327 br_handle_frame_finish
, 1);
332 /* Obtain the correct destination MAC address, while preserving the original
333 * source MAC address. If we already know this address, we just copy it. If we
334 * don't, we use the neighbour framework to find out. In both cases, we make
335 * sure that br_handle_frame_finish() is called afterwards.
337 static int br_nf_pre_routing_finish_bridge(struct sk_buff
*skb
)
339 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
340 struct dst_entry
*dst
;
342 skb
->dev
= bridge_parent(skb
->dev
);
347 neigh_hh_bridge(dst
->hh
, skb
);
348 skb
->dev
= nf_bridge
->physindev
;
349 return br_handle_frame_finish(skb
);
350 } else if (dst
->neighbour
) {
351 /* the neighbour function below overwrites the complete
352 * MAC header, so we save the Ethernet source address and
353 * protocol number. */
354 skb_copy_from_linear_data_offset(skb
, -(ETH_HLEN
-ETH_ALEN
), skb
->nf_bridge
->data
, ETH_HLEN
-ETH_ALEN
);
355 /* tell br_dev_xmit to continue with forwarding */
356 nf_bridge
->mask
|= BRNF_BRIDGED_DNAT
;
357 return dst
->neighbour
->output(skb
);
364 /* This requires some explaining. If DNAT has taken place,
365 * we will need to fix up the destination Ethernet address.
367 * There are two cases to consider:
368 * 1. The packet was DNAT'ed to a device in the same bridge
369 * port group as it was received on. We can still bridge
371 * 2. The packet was DNAT'ed to a different device, either
372 * a non-bridged device or another bridge port group.
373 * The packet will need to be routed.
375 * The correct way of distinguishing between these two cases is to
376 * call ip_route_input() and to look at skb->dst->dev, which is
377 * changed to the destination device if ip_route_input() succeeds.
379 * Let's first consider the case that ip_route_input() succeeds:
381 * If the output device equals the logical bridge device the packet
382 * came in on, we can consider this bridging. The corresponding MAC
383 * address will be obtained in br_nf_pre_routing_finish_bridge.
384 * Otherwise, the packet is considered to be routed and we just
385 * change the destination MAC address so that the packet will
386 * later be passed up to the IP stack to be routed. For a redirected
387 * packet, ip_route_input() will give back the localhost as output device,
388 * which differs from the bridge device.
390 * Let's now consider the case that ip_route_input() fails:
392 * This can be because the destination address is martian, in which case
393 * the packet will be dropped.
394 * If IP forwarding is disabled, ip_route_input() will fail, while
395 * ip_route_output_key() can return success. The source
396 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
397 * thinks we're handling a locally generated packet and won't care
398 * if IP forwarding is enabled. If the output device equals the logical bridge
399 * device, we proceed as if ip_route_input() succeeded. If it differs from the
400 * logical bridge port or if ip_route_output_key() fails we drop the packet.
402 static int br_nf_pre_routing_finish(struct sk_buff
*skb
)
404 struct net_device
*dev
= skb
->dev
;
405 struct iphdr
*iph
= ip_hdr(skb
);
406 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
410 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
411 skb
->pkt_type
= PACKET_OTHERHOST
;
412 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
414 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
415 if (dnat_took_place(skb
)) {
416 if ((err
= ip_route_input(skb
, iph
->daddr
, iph
->saddr
, iph
->tos
, dev
))) {
417 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
419 /* If err equals -EHOSTUNREACH the error is due to a
420 * martian destination or due to the fact that
421 * forwarding is disabled. For most martian packets,
422 * ip_route_output_key() will fail. It won't fail for 2 types of
423 * martian destinations: loopback destinations and destination
424 * 0.0.0.0. In both cases the packet will be dropped because the
425 * destination is the loopback device and not the bridge. */
426 if (err
!= -EHOSTUNREACH
|| !in_dev
|| IN_DEV_FORWARD(in_dev
))
429 rt
= ip_route_output(dev_net(dev
), iph
->daddr
, 0,
430 RT_TOS(iph
->tos
), 0);
432 /* - Bridged-and-DNAT'ed traffic doesn't
433 * require ip_forwarding. */
434 if (rt
->dst
.dev
== dev
) {
435 skb_dst_set(skb
, &rt
->dst
);
444 if (skb_dst(skb
)->dev
== dev
) {
446 skb
->dev
= nf_bridge
->physindev
;
447 nf_bridge_update_protocol(skb
);
448 nf_bridge_push_encap_header(skb
);
449 NF_HOOK_THRESH(NFPROTO_BRIDGE
,
452 br_nf_pre_routing_finish_bridge
,
456 memcpy(eth_hdr(skb
)->h_dest
, dev
->dev_addr
, ETH_ALEN
);
457 skb
->pkt_type
= PACKET_HOST
;
460 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
465 skb_dst_set_noref(skb
, &rt
->dst
);
468 skb
->dev
= nf_bridge
->physindev
;
469 nf_bridge_update_protocol(skb
);
470 nf_bridge_push_encap_header(skb
);
471 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
472 br_handle_frame_finish
, 1);
477 /* Some common code for IPv4/IPv6 */
478 static struct net_device
*setup_pre_routing(struct sk_buff
*skb
)
480 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
482 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
483 skb
->pkt_type
= PACKET_HOST
;
484 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
487 nf_bridge
->mask
|= BRNF_NF_BRIDGE_PREROUTING
;
488 nf_bridge
->physindev
= skb
->dev
;
489 skb
->dev
= bridge_parent(skb
->dev
);
490 if (skb
->protocol
== htons(ETH_P_8021Q
))
491 nf_bridge
->mask
|= BRNF_8021Q
;
492 else if (skb
->protocol
== htons(ETH_P_PPP_SES
))
493 nf_bridge
->mask
|= BRNF_PPPoE
;
498 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
499 static int check_hbh_len(struct sk_buff
*skb
)
501 unsigned char *raw
= (u8
*)(ipv6_hdr(skb
) + 1);
503 const unsigned char *nh
= skb_network_header(skb
);
505 int len
= (raw
[1] + 1) << 3;
507 if ((raw
+ len
) - skb
->data
> skb_headlen(skb
))
514 int optlen
= nh
[off
+ 1] + 2;
525 if (nh
[off
+ 1] != 4 || (off
& 3) != 2)
527 pkt_len
= ntohl(*(__be32
*) (nh
+ off
+ 2));
528 if (pkt_len
<= IPV6_MAXPLEN
||
529 ipv6_hdr(skb
)->payload_len
)
531 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
))
533 if (pskb_trim_rcsum(skb
,
534 pkt_len
+ sizeof(struct ipv6hdr
)))
536 nh
= skb_network_header(skb
);
553 /* Replicate the checks that IPv6 does on packet reception and pass the packet
554 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
555 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook
,
557 const struct net_device
*in
,
558 const struct net_device
*out
,
559 int (*okfn
)(struct sk_buff
*))
561 const struct ipv6hdr
*hdr
;
564 if (skb
->len
< sizeof(struct ipv6hdr
))
567 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
572 if (hdr
->version
!= 6)
575 pkt_len
= ntohs(hdr
->payload_len
);
577 if (pkt_len
|| hdr
->nexthdr
!= NEXTHDR_HOP
) {
578 if (pkt_len
+ sizeof(struct ipv6hdr
) > skb
->len
)
580 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
583 if (hdr
->nexthdr
== NEXTHDR_HOP
&& check_hbh_len(skb
))
586 nf_bridge_put(skb
->nf_bridge
);
587 if (!nf_bridge_alloc(skb
))
589 if (!setup_pre_routing(skb
))
592 skb
->protocol
= htons(ETH_P_IPV6
);
593 NF_HOOK(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
594 br_nf_pre_routing_finish_ipv6
);
599 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
600 * Replicate the checks that IPv4 does on packet reception.
601 * Set skb->dev to the bridge device (i.e. parent of the
602 * receiving device) to make netfilter happy, the REDIRECT
603 * target in particular. Save the original destination IP
604 * address to be able to detect DNAT afterwards. */
605 static unsigned int br_nf_pre_routing(unsigned int hook
, struct sk_buff
*skb
,
606 const struct net_device
*in
,
607 const struct net_device
*out
,
608 int (*okfn
)(struct sk_buff
*))
610 struct net_bridge_port
*p
;
611 struct net_bridge
*br
;
612 __u32 len
= nf_bridge_encap_header_len(skb
);
614 if (unlikely(!pskb_may_pull(skb
, len
)))
617 p
= br_port_get_rcu(in
);
622 if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
623 IS_PPPOE_IPV6(skb
)) {
624 if (!brnf_call_ip6tables
&& !br
->nf_call_ip6tables
)
627 nf_bridge_pull_encap_header_rcsum(skb
);
628 return br_nf_pre_routing_ipv6(hook
, skb
, in
, out
, okfn
);
631 if (!brnf_call_iptables
&& !br
->nf_call_iptables
)
634 if (skb
->protocol
!= htons(ETH_P_IP
) && !IS_VLAN_IP(skb
) &&
638 nf_bridge_pull_encap_header_rcsum(skb
);
640 if (br_parse_ip_options(skb
))
643 nf_bridge_put(skb
->nf_bridge
);
644 if (!nf_bridge_alloc(skb
))
646 if (!setup_pre_routing(skb
))
648 store_orig_dstaddr(skb
);
649 skb
->protocol
= htons(ETH_P_IP
);
651 NF_HOOK(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
652 br_nf_pre_routing_finish
);
658 /* PF_BRIDGE/LOCAL_IN ************************************************/
659 /* The packet is locally destined, which requires a real
660 * dst_entry, so detach the fake one. On the way up, the
661 * packet would pass through PRE_ROUTING again (which already
662 * took place when the packet entered the bridge), but we
663 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
664 * prevent this from happening. */
665 static unsigned int br_nf_local_in(unsigned int hook
, struct sk_buff
*skb
,
666 const struct net_device
*in
,
667 const struct net_device
*out
,
668 int (*okfn
)(struct sk_buff
*))
670 struct rtable
*rt
= skb_rtable(skb
);
672 if (rt
&& rt
== bridge_parent_rtable(in
))
678 /* PF_BRIDGE/FORWARD *************************************************/
679 static int br_nf_forward_finish(struct sk_buff
*skb
)
681 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
682 struct net_device
*in
;
684 if (skb
->protocol
!= htons(ETH_P_ARP
) && !IS_VLAN_ARP(skb
)) {
685 in
= nf_bridge
->physindev
;
686 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
687 skb
->pkt_type
= PACKET_OTHERHOST
;
688 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
690 nf_bridge_update_protocol(skb
);
692 in
= *((struct net_device
**)(skb
->cb
));
694 nf_bridge_push_encap_header(skb
);
696 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_FORWARD
, skb
, in
,
697 skb
->dev
, br_forward_finish
, 1);
701 /* This is the 'purely bridged' case. For IP, we pass the packet to
702 * netfilter with indev and outdev set to the bridge device,
703 * but we are still able to filter on the 'real' indev/outdev
704 * because of the physdev module. For ARP, indev and outdev are the
706 static unsigned int br_nf_forward_ip(unsigned int hook
, struct sk_buff
*skb
,
707 const struct net_device
*in
,
708 const struct net_device
*out
,
709 int (*okfn
)(struct sk_buff
*))
711 struct nf_bridge_info
*nf_bridge
;
712 struct net_device
*parent
;
718 /* Need exclusive nf_bridge_info since we might have multiple
719 * different physoutdevs. */
720 if (!nf_bridge_unshare(skb
))
723 parent
= bridge_parent(out
);
727 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
730 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
736 nf_bridge_pull_encap_header(skb
);
738 nf_bridge
= skb
->nf_bridge
;
739 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
740 skb
->pkt_type
= PACKET_HOST
;
741 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
744 if (pf
== PF_INET
&& br_parse_ip_options(skb
))
747 /* The physdev module checks on this */
748 nf_bridge
->mask
|= BRNF_BRIDGED
;
749 nf_bridge
->physoutdev
= skb
->dev
;
751 skb
->protocol
= htons(ETH_P_IP
);
753 skb
->protocol
= htons(ETH_P_IPV6
);
755 NF_HOOK(pf
, NF_INET_FORWARD
, skb
, bridge_parent(in
), parent
,
756 br_nf_forward_finish
);
761 static unsigned int br_nf_forward_arp(unsigned int hook
, struct sk_buff
*skb
,
762 const struct net_device
*in
,
763 const struct net_device
*out
,
764 int (*okfn
)(struct sk_buff
*))
766 struct net_bridge_port
*p
;
767 struct net_bridge
*br
;
768 struct net_device
**d
= (struct net_device
**)(skb
->cb
);
770 p
= br_port_get_rcu(out
);
775 if (!brnf_call_arptables
&& !br
->nf_call_arptables
)
778 if (skb
->protocol
!= htons(ETH_P_ARP
)) {
779 if (!IS_VLAN_ARP(skb
))
781 nf_bridge_pull_encap_header(skb
);
784 if (arp_hdr(skb
)->ar_pln
!= 4) {
785 if (IS_VLAN_ARP(skb
))
786 nf_bridge_push_encap_header(skb
);
789 *d
= (struct net_device
*)in
;
790 NF_HOOK(NFPROTO_ARP
, NF_ARP_FORWARD
, skb
, (struct net_device
*)in
,
791 (struct net_device
*)out
, br_nf_forward_finish
);
796 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
797 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
801 if (skb
->nfct
!= NULL
&& skb
->protocol
== htons(ETH_P_IP
) &&
802 skb
->len
+ nf_bridge_mtu_reduction(skb
) > skb
->dev
->mtu
&&
804 if (br_parse_ip_options(skb
))
805 /* Drop invalid packet */
807 ret
= ip_fragment(skb
, br_dev_queue_push_xmit
);
809 ret
= br_dev_queue_push_xmit(skb
);
814 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
816 return br_dev_queue_push_xmit(skb
);
820 /* PF_BRIDGE/POST_ROUTING ********************************************/
821 static unsigned int br_nf_post_routing(unsigned int hook
, struct sk_buff
*skb
,
822 const struct net_device
*in
,
823 const struct net_device
*out
,
824 int (*okfn
)(struct sk_buff
*))
826 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
827 struct net_device
*realoutdev
= bridge_parent(skb
->dev
);
830 if (!nf_bridge
|| !(nf_bridge
->mask
& BRNF_BRIDGED
))
836 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
839 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
845 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
846 * about the value of skb->pkt_type. */
847 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
848 skb
->pkt_type
= PACKET_HOST
;
849 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
852 nf_bridge_pull_encap_header(skb
);
853 nf_bridge_save_header(skb
);
855 skb
->protocol
= htons(ETH_P_IP
);
857 skb
->protocol
= htons(ETH_P_IPV6
);
859 NF_HOOK(pf
, NF_INET_POST_ROUTING
, skb
, NULL
, realoutdev
,
860 br_nf_dev_queue_xmit
);
865 /* IP/SABOTAGE *****************************************************/
866 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
867 * for the second time. */
868 static unsigned int ip_sabotage_in(unsigned int hook
, struct sk_buff
*skb
,
869 const struct net_device
*in
,
870 const struct net_device
*out
,
871 int (*okfn
)(struct sk_buff
*))
873 if (skb
->nf_bridge
&&
874 !(skb
->nf_bridge
->mask
& BRNF_NF_BRIDGE_PREROUTING
)) {
881 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
882 * br_dev_queue_push_xmit is called afterwards */
883 static struct nf_hook_ops br_nf_ops
[] __read_mostly
= {
885 .hook
= br_nf_pre_routing
,
886 .owner
= THIS_MODULE
,
888 .hooknum
= NF_BR_PRE_ROUTING
,
889 .priority
= NF_BR_PRI_BRNF
,
892 .hook
= br_nf_local_in
,
893 .owner
= THIS_MODULE
,
895 .hooknum
= NF_BR_LOCAL_IN
,
896 .priority
= NF_BR_PRI_BRNF
,
899 .hook
= br_nf_forward_ip
,
900 .owner
= THIS_MODULE
,
902 .hooknum
= NF_BR_FORWARD
,
903 .priority
= NF_BR_PRI_BRNF
- 1,
906 .hook
= br_nf_forward_arp
,
907 .owner
= THIS_MODULE
,
909 .hooknum
= NF_BR_FORWARD
,
910 .priority
= NF_BR_PRI_BRNF
,
913 .hook
= br_nf_post_routing
,
914 .owner
= THIS_MODULE
,
916 .hooknum
= NF_BR_POST_ROUTING
,
917 .priority
= NF_BR_PRI_LAST
,
920 .hook
= ip_sabotage_in
,
921 .owner
= THIS_MODULE
,
923 .hooknum
= NF_INET_PRE_ROUTING
,
924 .priority
= NF_IP_PRI_FIRST
,
927 .hook
= ip_sabotage_in
,
928 .owner
= THIS_MODULE
,
930 .hooknum
= NF_INET_PRE_ROUTING
,
931 .priority
= NF_IP6_PRI_FIRST
,
937 int brnf_sysctl_call_tables(ctl_table
* ctl
, int write
,
938 void __user
* buffer
, size_t * lenp
, loff_t
* ppos
)
942 ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
944 if (write
&& *(int *)(ctl
->data
))
945 *(int *)(ctl
->data
) = 1;
949 static ctl_table brnf_table
[] = {
951 .procname
= "bridge-nf-call-arptables",
952 .data
= &brnf_call_arptables
,
953 .maxlen
= sizeof(int),
955 .proc_handler
= brnf_sysctl_call_tables
,
958 .procname
= "bridge-nf-call-iptables",
959 .data
= &brnf_call_iptables
,
960 .maxlen
= sizeof(int),
962 .proc_handler
= brnf_sysctl_call_tables
,
965 .procname
= "bridge-nf-call-ip6tables",
966 .data
= &brnf_call_ip6tables
,
967 .maxlen
= sizeof(int),
969 .proc_handler
= brnf_sysctl_call_tables
,
972 .procname
= "bridge-nf-filter-vlan-tagged",
973 .data
= &brnf_filter_vlan_tagged
,
974 .maxlen
= sizeof(int),
976 .proc_handler
= brnf_sysctl_call_tables
,
979 .procname
= "bridge-nf-filter-pppoe-tagged",
980 .data
= &brnf_filter_pppoe_tagged
,
981 .maxlen
= sizeof(int),
983 .proc_handler
= brnf_sysctl_call_tables
,
988 static struct ctl_path brnf_path
[] = {
989 { .procname
= "net", },
990 { .procname
= "bridge", },
995 int __init
br_netfilter_init(void)
999 ret
= dst_entries_init(&fake_dst_ops
);
1003 ret
= nf_register_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1005 dst_entries_destroy(&fake_dst_ops
);
1008 #ifdef CONFIG_SYSCTL
1009 brnf_sysctl_header
= register_sysctl_paths(brnf_path
, brnf_table
);
1010 if (brnf_sysctl_header
== NULL
) {
1012 "br_netfilter: can't register to sysctl.\n");
1013 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1014 dst_entries_destroy(&fake_dst_ops
);
1018 printk(KERN_NOTICE
"Bridge firewalling registered\n");
1022 void br_netfilter_fini(void)
1024 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1025 #ifdef CONFIG_SYSCTL
1026 unregister_sysctl_table(brnf_sysctl_header
);
1028 dst_entries_destroy(&fake_dst_ops
);