3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 * Lennert dedicates this file to Kerstin Wurdinger.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
37 #include <net/route.h>
39 #include <asm/uaccess.h>
40 #include "br_private.h"
42 #include <linux/sysctl.h>
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
51 static struct ctl_table_header
*brnf_sysctl_header
;
52 static int brnf_call_iptables __read_mostly
= 1;
53 static int brnf_call_ip6tables __read_mostly
= 1;
54 static int brnf_call_arptables __read_mostly
= 1;
55 static int brnf_filter_vlan_tagged __read_mostly
= 0;
56 static int brnf_filter_pppoe_tagged __read_mostly
= 0;
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
65 static inline __be16
vlan_proto(const struct sk_buff
*skb
)
67 if (vlan_tx_tag_present(skb
))
69 else if (skb
->protocol
== htons(ETH_P_8021Q
))
70 return vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
75 #define IS_VLAN_IP(skb) \
76 (vlan_proto(skb) == htons(ETH_P_IP) && \
77 brnf_filter_vlan_tagged)
79 #define IS_VLAN_IPV6(skb) \
80 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
81 brnf_filter_vlan_tagged)
83 #define IS_VLAN_ARP(skb) \
84 (vlan_proto(skb) == htons(ETH_P_ARP) && \
85 brnf_filter_vlan_tagged)
87 static inline __be16
pppoe_proto(const struct sk_buff
*skb
)
89 return *((__be16
*)(skb_mac_header(skb
) + ETH_HLEN
+
90 sizeof(struct pppoe_hdr
)));
93 #define IS_PPPOE_IP(skb) \
94 (skb->protocol == htons(ETH_P_PPP_SES) && \
95 pppoe_proto(skb) == htons(PPP_IP) && \
96 brnf_filter_pppoe_tagged)
98 #define IS_PPPOE_IPV6(skb) \
99 (skb->protocol == htons(ETH_P_PPP_SES) && \
100 pppoe_proto(skb) == htons(PPP_IPV6) && \
101 brnf_filter_pppoe_tagged)
103 static void fake_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
107 static struct dst_ops fake_dst_ops
= {
109 .protocol
= cpu_to_be16(ETH_P_IP
),
110 .update_pmtu
= fake_update_pmtu
,
114 * Initialize bogus route table used to keep netfilter happy.
115 * Currently, we fill in the PMTU entry because netfilter
116 * refragmentation needs it, and the rt_flags entry because
117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields.
120 void br_netfilter_rtable_init(struct net_bridge
*br
)
122 struct rtable
*rt
= &br
->fake_rtable
;
124 atomic_set(&rt
->dst
.__refcnt
, 1);
125 rt
->dst
.dev
= br
->dev
;
126 rt
->dst
.path
= &rt
->dst
;
127 dst_metric_set(&rt
->dst
, RTAX_MTU
, 1500);
128 rt
->dst
.flags
= DST_NOXFRM
;
129 rt
->dst
.ops
= &fake_dst_ops
;
132 static inline struct rtable
*bridge_parent_rtable(const struct net_device
*dev
)
134 struct net_bridge_port
*port
;
136 port
= br_port_get_rcu(dev
);
137 return port
? &port
->br
->fake_rtable
: NULL
;
140 static inline struct net_device
*bridge_parent(const struct net_device
*dev
)
142 struct net_bridge_port
*port
;
144 port
= br_port_get_rcu(dev
);
145 return port
? port
->br
->dev
: NULL
;
148 static inline struct nf_bridge_info
*nf_bridge_alloc(struct sk_buff
*skb
)
150 skb
->nf_bridge
= kzalloc(sizeof(struct nf_bridge_info
), GFP_ATOMIC
);
151 if (likely(skb
->nf_bridge
))
152 atomic_set(&(skb
->nf_bridge
->use
), 1);
154 return skb
->nf_bridge
;
157 static inline struct nf_bridge_info
*nf_bridge_unshare(struct sk_buff
*skb
)
159 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
161 if (atomic_read(&nf_bridge
->use
) > 1) {
162 struct nf_bridge_info
*tmp
= nf_bridge_alloc(skb
);
165 memcpy(tmp
, nf_bridge
, sizeof(struct nf_bridge_info
));
166 atomic_set(&tmp
->use
, 1);
168 nf_bridge_put(nf_bridge
);
174 static inline void nf_bridge_push_encap_header(struct sk_buff
*skb
)
176 unsigned int len
= nf_bridge_encap_header_len(skb
);
179 skb
->network_header
-= len
;
182 static inline void nf_bridge_pull_encap_header(struct sk_buff
*skb
)
184 unsigned int len
= nf_bridge_encap_header_len(skb
);
187 skb
->network_header
+= len
;
190 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff
*skb
)
192 unsigned int len
= nf_bridge_encap_header_len(skb
);
194 skb_pull_rcsum(skb
, len
);
195 skb
->network_header
+= len
;
198 static inline void nf_bridge_save_header(struct sk_buff
*skb
)
200 int header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
202 skb_copy_from_linear_data_offset(skb
, -header_size
,
203 skb
->nf_bridge
->data
, header_size
);
206 static inline void nf_bridge_update_protocol(struct sk_buff
*skb
)
208 if (skb
->nf_bridge
->mask
& BRNF_8021Q
)
209 skb
->protocol
= htons(ETH_P_8021Q
);
210 else if (skb
->nf_bridge
->mask
& BRNF_PPPoE
)
211 skb
->protocol
= htons(ETH_P_PPP_SES
);
214 /* When handing a packet over to the IP layer
215 * check whether we have a skb that is in the
219 static int br_parse_ip_options(struct sk_buff
*skb
)
221 struct ip_options
*opt
;
223 struct net_device
*dev
= skb
->dev
;
227 opt
= &(IPCB(skb
)->opt
);
229 /* Basic sanity checks */
230 if (iph
->ihl
< 5 || iph
->version
!= 4)
233 if (!pskb_may_pull(skb
, iph
->ihl
*4))
237 if (unlikely(ip_fast_csum((u8
*)iph
, iph
->ihl
)))
240 len
= ntohs(iph
->tot_len
);
241 if (skb
->len
< len
) {
242 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INTRUNCATEDPKTS
);
244 } else if (len
< (iph
->ihl
*4))
247 if (pskb_trim_rcsum(skb
, len
)) {
248 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INDISCARDS
);
252 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
256 opt
->optlen
= iph
->ihl
*4 - sizeof(struct iphdr
);
257 if (ip_options_compile(dev_net(dev
), opt
, skb
))
260 /* Check correct handling of SRR option */
261 if (unlikely(opt
->srr
)) {
262 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
263 if (in_dev
&& !IN_DEV_SOURCE_ROUTE(in_dev
))
266 if (ip_options_rcv_srr(skb
))
273 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INHDRERRORS
);
278 /* Fill in the header for fragmented IP packets handled by
279 * the IPv4 connection tracking code.
281 int nf_bridge_copy_header(struct sk_buff
*skb
)
284 unsigned int header_size
;
286 nf_bridge_update_protocol(skb
);
287 header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
288 err
= skb_cow_head(skb
, header_size
);
292 skb_copy_to_linear_data_offset(skb
, -header_size
,
293 skb
->nf_bridge
->data
, header_size
);
294 __skb_push(skb
, nf_bridge_encap_header_len(skb
));
298 /* PF_BRIDGE/PRE_ROUTING *********************************************/
299 /* Undo the changes made for ip6tables PREROUTING and continue the
300 * bridge PRE_ROUTING hook. */
301 static int br_nf_pre_routing_finish_ipv6(struct sk_buff
*skb
)
303 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
306 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
307 skb
->pkt_type
= PACKET_OTHERHOST
;
308 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
310 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
312 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
317 skb_dst_set_noref(skb
, &rt
->dst
);
319 skb
->dev
= nf_bridge
->physindev
;
320 nf_bridge_update_protocol(skb
);
321 nf_bridge_push_encap_header(skb
);
322 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
323 br_handle_frame_finish
, 1);
328 /* Obtain the correct destination MAC address, while preserving the original
329 * source MAC address. If we already know this address, we just copy it. If we
330 * don't, we use the neighbour framework to find out. In both cases, we make
331 * sure that br_handle_frame_finish() is called afterwards.
333 static int br_nf_pre_routing_finish_bridge(struct sk_buff
*skb
)
335 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
336 struct dst_entry
*dst
;
338 skb
->dev
= bridge_parent(skb
->dev
);
343 neigh_hh_bridge(dst
->hh
, skb
);
344 skb
->dev
= nf_bridge
->physindev
;
345 return br_handle_frame_finish(skb
);
346 } else if (dst
->neighbour
) {
347 /* the neighbour function below overwrites the complete
348 * MAC header, so we save the Ethernet source address and
349 * protocol number. */
350 skb_copy_from_linear_data_offset(skb
, -(ETH_HLEN
-ETH_ALEN
), skb
->nf_bridge
->data
, ETH_HLEN
-ETH_ALEN
);
351 /* tell br_dev_xmit to continue with forwarding */
352 nf_bridge
->mask
|= BRNF_BRIDGED_DNAT
;
353 return dst
->neighbour
->output(skb
);
360 /* This requires some explaining. If DNAT has taken place,
361 * we will need to fix up the destination Ethernet address.
363 * There are two cases to consider:
364 * 1. The packet was DNAT'ed to a device in the same bridge
365 * port group as it was received on. We can still bridge
367 * 2. The packet was DNAT'ed to a different device, either
368 * a non-bridged device or another bridge port group.
369 * The packet will need to be routed.
371 * The correct way of distinguishing between these two cases is to
372 * call ip_route_input() and to look at skb->dst->dev, which is
373 * changed to the destination device if ip_route_input() succeeds.
375 * Let's first consider the case that ip_route_input() succeeds:
377 * If the output device equals the logical bridge device the packet
378 * came in on, we can consider this bridging. The corresponding MAC
379 * address will be obtained in br_nf_pre_routing_finish_bridge.
380 * Otherwise, the packet is considered to be routed and we just
381 * change the destination MAC address so that the packet will
382 * later be passed up to the IP stack to be routed. For a redirected
383 * packet, ip_route_input() will give back the localhost as output device,
384 * which differs from the bridge device.
386 * Let's now consider the case that ip_route_input() fails:
388 * This can be because the destination address is martian, in which case
389 * the packet will be dropped.
390 * If IP forwarding is disabled, ip_route_input() will fail, while
391 * ip_route_output_key() can return success. The source
392 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
393 * thinks we're handling a locally generated packet and won't care
394 * if IP forwarding is enabled. If the output device equals the logical bridge
395 * device, we proceed as if ip_route_input() succeeded. If it differs from the
396 * logical bridge port or if ip_route_output_key() fails we drop the packet.
398 static int br_nf_pre_routing_finish(struct sk_buff
*skb
)
400 struct net_device
*dev
= skb
->dev
;
401 struct iphdr
*iph
= ip_hdr(skb
);
402 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
406 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
407 skb
->pkt_type
= PACKET_OTHERHOST
;
408 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
410 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
411 if (dnat_took_place(skb
)) {
412 if ((err
= ip_route_input(skb
, iph
->daddr
, iph
->saddr
, iph
->tos
, dev
))) {
413 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
415 /* If err equals -EHOSTUNREACH the error is due to a
416 * martian destination or due to the fact that
417 * forwarding is disabled. For most martian packets,
418 * ip_route_output_key() will fail. It won't fail for 2 types of
419 * martian destinations: loopback destinations and destination
420 * 0.0.0.0. In both cases the packet will be dropped because the
421 * destination is the loopback device and not the bridge. */
422 if (err
!= -EHOSTUNREACH
|| !in_dev
|| IN_DEV_FORWARD(in_dev
))
425 rt
= ip_route_output(dev_net(dev
), iph
->daddr
, 0,
426 RT_TOS(iph
->tos
), 0);
428 /* - Bridged-and-DNAT'ed traffic doesn't
429 * require ip_forwarding. */
430 if (rt
->dst
.dev
== dev
) {
431 skb_dst_set(skb
, &rt
->dst
);
440 if (skb_dst(skb
)->dev
== dev
) {
442 skb
->dev
= nf_bridge
->physindev
;
443 nf_bridge_update_protocol(skb
);
444 nf_bridge_push_encap_header(skb
);
445 NF_HOOK_THRESH(NFPROTO_BRIDGE
,
448 br_nf_pre_routing_finish_bridge
,
452 memcpy(eth_hdr(skb
)->h_dest
, dev
->dev_addr
, ETH_ALEN
);
453 skb
->pkt_type
= PACKET_HOST
;
456 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
461 skb_dst_set_noref(skb
, &rt
->dst
);
464 skb
->dev
= nf_bridge
->physindev
;
465 nf_bridge_update_protocol(skb
);
466 nf_bridge_push_encap_header(skb
);
467 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
468 br_handle_frame_finish
, 1);
473 /* Some common code for IPv4/IPv6 */
474 static struct net_device
*setup_pre_routing(struct sk_buff
*skb
)
476 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
478 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
479 skb
->pkt_type
= PACKET_HOST
;
480 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
483 nf_bridge
->mask
|= BRNF_NF_BRIDGE_PREROUTING
;
484 nf_bridge
->physindev
= skb
->dev
;
485 skb
->dev
= bridge_parent(skb
->dev
);
486 if (skb
->protocol
== htons(ETH_P_8021Q
))
487 nf_bridge
->mask
|= BRNF_8021Q
;
488 else if (skb
->protocol
== htons(ETH_P_PPP_SES
))
489 nf_bridge
->mask
|= BRNF_PPPoE
;
494 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
495 static int check_hbh_len(struct sk_buff
*skb
)
497 unsigned char *raw
= (u8
*)(ipv6_hdr(skb
) + 1);
499 const unsigned char *nh
= skb_network_header(skb
);
501 int len
= (raw
[1] + 1) << 3;
503 if ((raw
+ len
) - skb
->data
> skb_headlen(skb
))
510 int optlen
= nh
[off
+ 1] + 2;
521 if (nh
[off
+ 1] != 4 || (off
& 3) != 2)
523 pkt_len
= ntohl(*(__be32
*) (nh
+ off
+ 2));
524 if (pkt_len
<= IPV6_MAXPLEN
||
525 ipv6_hdr(skb
)->payload_len
)
527 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
))
529 if (pskb_trim_rcsum(skb
,
530 pkt_len
+ sizeof(struct ipv6hdr
)))
532 nh
= skb_network_header(skb
);
549 /* Replicate the checks that IPv6 does on packet reception and pass the packet
550 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
551 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook
,
553 const struct net_device
*in
,
554 const struct net_device
*out
,
555 int (*okfn
)(struct sk_buff
*))
560 if (skb
->len
< sizeof(struct ipv6hdr
))
563 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
568 if (hdr
->version
!= 6)
571 pkt_len
= ntohs(hdr
->payload_len
);
573 if (pkt_len
|| hdr
->nexthdr
!= NEXTHDR_HOP
) {
574 if (pkt_len
+ sizeof(struct ipv6hdr
) > skb
->len
)
576 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
579 if (hdr
->nexthdr
== NEXTHDR_HOP
&& check_hbh_len(skb
))
582 nf_bridge_put(skb
->nf_bridge
);
583 if (!nf_bridge_alloc(skb
))
585 if (!setup_pre_routing(skb
))
588 skb
->protocol
= htons(ETH_P_IPV6
);
589 NF_HOOK(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
590 br_nf_pre_routing_finish_ipv6
);
595 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
596 * Replicate the checks that IPv4 does on packet reception.
597 * Set skb->dev to the bridge device (i.e. parent of the
598 * receiving device) to make netfilter happy, the REDIRECT
599 * target in particular. Save the original destination IP
600 * address to be able to detect DNAT afterwards. */
601 static unsigned int br_nf_pre_routing(unsigned int hook
, struct sk_buff
*skb
,
602 const struct net_device
*in
,
603 const struct net_device
*out
,
604 int (*okfn
)(struct sk_buff
*))
606 struct net_bridge_port
*p
;
607 struct net_bridge
*br
;
608 __u32 len
= nf_bridge_encap_header_len(skb
);
610 if (unlikely(!pskb_may_pull(skb
, len
)))
613 p
= br_port_get_rcu(in
);
618 if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
619 IS_PPPOE_IPV6(skb
)) {
620 if (!brnf_call_ip6tables
&& !br
->nf_call_ip6tables
)
623 nf_bridge_pull_encap_header_rcsum(skb
);
624 return br_nf_pre_routing_ipv6(hook
, skb
, in
, out
, okfn
);
627 if (!brnf_call_iptables
&& !br
->nf_call_iptables
)
630 if (skb
->protocol
!= htons(ETH_P_IP
) && !IS_VLAN_IP(skb
) &&
634 nf_bridge_pull_encap_header_rcsum(skb
);
636 if (br_parse_ip_options(skb
))
639 nf_bridge_put(skb
->nf_bridge
);
640 if (!nf_bridge_alloc(skb
))
642 if (!setup_pre_routing(skb
))
644 store_orig_dstaddr(skb
);
645 skb
->protocol
= htons(ETH_P_IP
);
647 NF_HOOK(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
648 br_nf_pre_routing_finish
);
654 /* PF_BRIDGE/LOCAL_IN ************************************************/
655 /* The packet is locally destined, which requires a real
656 * dst_entry, so detach the fake one. On the way up, the
657 * packet would pass through PRE_ROUTING again (which already
658 * took place when the packet entered the bridge), but we
659 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
660 * prevent this from happening. */
661 static unsigned int br_nf_local_in(unsigned int hook
, struct sk_buff
*skb
,
662 const struct net_device
*in
,
663 const struct net_device
*out
,
664 int (*okfn
)(struct sk_buff
*))
666 struct rtable
*rt
= skb_rtable(skb
);
668 if (rt
&& rt
== bridge_parent_rtable(in
))
674 /* PF_BRIDGE/FORWARD *************************************************/
675 static int br_nf_forward_finish(struct sk_buff
*skb
)
677 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
678 struct net_device
*in
;
680 if (skb
->protocol
!= htons(ETH_P_ARP
) && !IS_VLAN_ARP(skb
)) {
681 in
= nf_bridge
->physindev
;
682 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
683 skb
->pkt_type
= PACKET_OTHERHOST
;
684 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
686 nf_bridge_update_protocol(skb
);
688 in
= *((struct net_device
**)(skb
->cb
));
690 nf_bridge_push_encap_header(skb
);
692 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_FORWARD
, skb
, in
,
693 skb
->dev
, br_forward_finish
, 1);
697 /* This is the 'purely bridged' case. For IP, we pass the packet to
698 * netfilter with indev and outdev set to the bridge device,
699 * but we are still able to filter on the 'real' indev/outdev
700 * because of the physdev module. For ARP, indev and outdev are the
702 static unsigned int br_nf_forward_ip(unsigned int hook
, struct sk_buff
*skb
,
703 const struct net_device
*in
,
704 const struct net_device
*out
,
705 int (*okfn
)(struct sk_buff
*))
707 struct nf_bridge_info
*nf_bridge
;
708 struct net_device
*parent
;
714 /* Need exclusive nf_bridge_info since we might have multiple
715 * different physoutdevs. */
716 if (!nf_bridge_unshare(skb
))
719 parent
= bridge_parent(out
);
723 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
726 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
732 nf_bridge_pull_encap_header(skb
);
734 nf_bridge
= skb
->nf_bridge
;
735 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
736 skb
->pkt_type
= PACKET_HOST
;
737 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
740 if (br_parse_ip_options(skb
))
743 /* The physdev module checks on this */
744 nf_bridge
->mask
|= BRNF_BRIDGED
;
745 nf_bridge
->physoutdev
= skb
->dev
;
747 skb
->protocol
= htons(ETH_P_IP
);
749 skb
->protocol
= htons(ETH_P_IPV6
);
751 NF_HOOK(pf
, NF_INET_FORWARD
, skb
, bridge_parent(in
), parent
,
752 br_nf_forward_finish
);
757 static unsigned int br_nf_forward_arp(unsigned int hook
, struct sk_buff
*skb
,
758 const struct net_device
*in
,
759 const struct net_device
*out
,
760 int (*okfn
)(struct sk_buff
*))
762 struct net_bridge_port
*p
;
763 struct net_bridge
*br
;
764 struct net_device
**d
= (struct net_device
**)(skb
->cb
);
766 p
= br_port_get_rcu(out
);
771 if (!brnf_call_arptables
&& !br
->nf_call_arptables
)
774 if (skb
->protocol
!= htons(ETH_P_ARP
)) {
775 if (!IS_VLAN_ARP(skb
))
777 nf_bridge_pull_encap_header(skb
);
780 if (arp_hdr(skb
)->ar_pln
!= 4) {
781 if (IS_VLAN_ARP(skb
))
782 nf_bridge_push_encap_header(skb
);
785 *d
= (struct net_device
*)in
;
786 NF_HOOK(NFPROTO_ARP
, NF_ARP_FORWARD
, skb
, (struct net_device
*)in
,
787 (struct net_device
*)out
, br_nf_forward_finish
);
792 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
793 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
797 if (skb
->nfct
!= NULL
&& skb
->protocol
== htons(ETH_P_IP
) &&
798 skb
->len
+ nf_bridge_mtu_reduction(skb
) > skb
->dev
->mtu
&&
800 if (br_parse_ip_options(skb
))
801 /* Drop invalid packet */
803 ret
= ip_fragment(skb
, br_dev_queue_push_xmit
);
805 ret
= br_dev_queue_push_xmit(skb
);
810 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
812 return br_dev_queue_push_xmit(skb
);
816 /* PF_BRIDGE/POST_ROUTING ********************************************/
817 static unsigned int br_nf_post_routing(unsigned int hook
, struct sk_buff
*skb
,
818 const struct net_device
*in
,
819 const struct net_device
*out
,
820 int (*okfn
)(struct sk_buff
*))
822 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
823 struct net_device
*realoutdev
= bridge_parent(skb
->dev
);
826 if (!nf_bridge
|| !(nf_bridge
->mask
& BRNF_BRIDGED
))
832 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
835 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
841 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
842 * about the value of skb->pkt_type. */
843 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
844 skb
->pkt_type
= PACKET_HOST
;
845 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
848 nf_bridge_pull_encap_header(skb
);
849 nf_bridge_save_header(skb
);
851 skb
->protocol
= htons(ETH_P_IP
);
853 skb
->protocol
= htons(ETH_P_IPV6
);
855 NF_HOOK(pf
, NF_INET_POST_ROUTING
, skb
, NULL
, realoutdev
,
856 br_nf_dev_queue_xmit
);
861 /* IP/SABOTAGE *****************************************************/
862 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
863 * for the second time. */
864 static unsigned int ip_sabotage_in(unsigned int hook
, struct sk_buff
*skb
,
865 const struct net_device
*in
,
866 const struct net_device
*out
,
867 int (*okfn
)(struct sk_buff
*))
869 if (skb
->nf_bridge
&&
870 !(skb
->nf_bridge
->mask
& BRNF_NF_BRIDGE_PREROUTING
)) {
877 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
878 * br_dev_queue_push_xmit is called afterwards */
879 static struct nf_hook_ops br_nf_ops
[] __read_mostly
= {
881 .hook
= br_nf_pre_routing
,
882 .owner
= THIS_MODULE
,
884 .hooknum
= NF_BR_PRE_ROUTING
,
885 .priority
= NF_BR_PRI_BRNF
,
888 .hook
= br_nf_local_in
,
889 .owner
= THIS_MODULE
,
891 .hooknum
= NF_BR_LOCAL_IN
,
892 .priority
= NF_BR_PRI_BRNF
,
895 .hook
= br_nf_forward_ip
,
896 .owner
= THIS_MODULE
,
898 .hooknum
= NF_BR_FORWARD
,
899 .priority
= NF_BR_PRI_BRNF
- 1,
902 .hook
= br_nf_forward_arp
,
903 .owner
= THIS_MODULE
,
905 .hooknum
= NF_BR_FORWARD
,
906 .priority
= NF_BR_PRI_BRNF
,
909 .hook
= br_nf_post_routing
,
910 .owner
= THIS_MODULE
,
912 .hooknum
= NF_BR_POST_ROUTING
,
913 .priority
= NF_BR_PRI_LAST
,
916 .hook
= ip_sabotage_in
,
917 .owner
= THIS_MODULE
,
919 .hooknum
= NF_INET_PRE_ROUTING
,
920 .priority
= NF_IP_PRI_FIRST
,
923 .hook
= ip_sabotage_in
,
924 .owner
= THIS_MODULE
,
926 .hooknum
= NF_INET_PRE_ROUTING
,
927 .priority
= NF_IP6_PRI_FIRST
,
933 int brnf_sysctl_call_tables(ctl_table
* ctl
, int write
,
934 void __user
* buffer
, size_t * lenp
, loff_t
* ppos
)
938 ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
940 if (write
&& *(int *)(ctl
->data
))
941 *(int *)(ctl
->data
) = 1;
945 static ctl_table brnf_table
[] = {
947 .procname
= "bridge-nf-call-arptables",
948 .data
= &brnf_call_arptables
,
949 .maxlen
= sizeof(int),
951 .proc_handler
= brnf_sysctl_call_tables
,
954 .procname
= "bridge-nf-call-iptables",
955 .data
= &brnf_call_iptables
,
956 .maxlen
= sizeof(int),
958 .proc_handler
= brnf_sysctl_call_tables
,
961 .procname
= "bridge-nf-call-ip6tables",
962 .data
= &brnf_call_ip6tables
,
963 .maxlen
= sizeof(int),
965 .proc_handler
= brnf_sysctl_call_tables
,
968 .procname
= "bridge-nf-filter-vlan-tagged",
969 .data
= &brnf_filter_vlan_tagged
,
970 .maxlen
= sizeof(int),
972 .proc_handler
= brnf_sysctl_call_tables
,
975 .procname
= "bridge-nf-filter-pppoe-tagged",
976 .data
= &brnf_filter_pppoe_tagged
,
977 .maxlen
= sizeof(int),
979 .proc_handler
= brnf_sysctl_call_tables
,
984 static struct ctl_path brnf_path
[] = {
985 { .procname
= "net", },
986 { .procname
= "bridge", },
991 int __init
br_netfilter_init(void)
995 ret
= dst_entries_init(&fake_dst_ops
);
999 ret
= nf_register_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1001 dst_entries_destroy(&fake_dst_ops
);
1004 #ifdef CONFIG_SYSCTL
1005 brnf_sysctl_header
= register_sysctl_paths(brnf_path
, brnf_table
);
1006 if (brnf_sysctl_header
== NULL
) {
1008 "br_netfilter: can't register to sysctl.\n");
1009 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1010 dst_entries_destroy(&fake_dst_ops
);
1014 printk(KERN_NOTICE
"Bridge firewalling registered\n");
1018 void br_netfilter_fini(void)
1020 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1021 #ifdef CONFIG_SYSCTL
1022 unregister_sysctl_table(brnf_sysctl_header
);
1024 dst_entries_destroy(&fake_dst_ops
);