2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #if IS_ENABLED(CONFIG_IPV6)
60 #include <net/ip6_fib.h>
61 #include <net/ip6_route.h>
64 static unsigned int ip_tunnel_hash(struct ip_tunnel_net
*itn
,
65 __be32 key
, __be32 remote
)
67 return hash_32((__force u32
)key
^ (__force u32
)remote
,
71 /* Often modified stats are per cpu, other are shared (netdev->stats) */
72 struct rtnl_link_stats64
*ip_tunnel_get_stats64(struct net_device
*dev
,
73 struct rtnl_link_stats64
*tot
)
77 for_each_possible_cpu(i
) {
78 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
79 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
83 start
= u64_stats_fetch_begin_bh(&tstats
->syncp
);
84 rx_packets
= tstats
->rx_packets
;
85 tx_packets
= tstats
->tx_packets
;
86 rx_bytes
= tstats
->rx_bytes
;
87 tx_bytes
= tstats
->tx_bytes
;
88 } while (u64_stats_fetch_retry_bh(&tstats
->syncp
, start
));
90 tot
->rx_packets
+= rx_packets
;
91 tot
->tx_packets
+= tx_packets
;
92 tot
->rx_bytes
+= rx_bytes
;
93 tot
->tx_bytes
+= tx_bytes
;
96 tot
->multicast
= dev
->stats
.multicast
;
98 tot
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
99 tot
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
100 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
101 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
102 tot
->rx_errors
= dev
->stats
.rx_errors
;
104 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
105 tot
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
106 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
107 tot
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
108 tot
->tx_errors
= dev
->stats
.tx_errors
;
110 tot
->collisions
= dev
->stats
.collisions
;
114 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64
);
116 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
117 __be16 flags
, __be32 key
)
119 if (p
->i_flags
& TUNNEL_KEY
) {
120 if (flags
& TUNNEL_KEY
)
121 return key
== p
->i_key
;
123 /* key expected, none present */
126 return !(flags
& TUNNEL_KEY
);
129 /* Fallback tunnel: no source, no destination, no key, no options
132 We require exact key match i.e. if a key is present in packet
133 it will match only tunnel with the same key; if it is not present,
134 it will match only keyless tunnel.
136 All keysless packets, if not matched configured keyless tunnels
137 will match fallback tunnel.
138 Given src, dst and key, find appropriate for input tunnel.
140 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
141 int link
, __be16 flags
,
142 __be32 remote
, __be32 local
,
146 struct ip_tunnel
*t
, *cand
= NULL
;
147 struct hlist_head
*head
;
149 hash
= ip_tunnel_hash(itn
, key
, remote
);
150 head
= &itn
->tunnels
[hash
];
152 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
153 if (local
!= t
->parms
.iph
.saddr
||
154 remote
!= t
->parms
.iph
.daddr
||
155 !(t
->dev
->flags
& IFF_UP
))
158 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
161 if (t
->parms
.link
== link
)
167 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
168 if (remote
!= t
->parms
.iph
.daddr
||
169 !(t
->dev
->flags
& IFF_UP
))
172 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
175 if (t
->parms
.link
== link
)
181 hash
= ip_tunnel_hash(itn
, key
, 0);
182 head
= &itn
->tunnels
[hash
];
184 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
185 if ((local
!= t
->parms
.iph
.saddr
&&
186 (local
!= t
->parms
.iph
.daddr
||
187 !ipv4_is_multicast(local
))) ||
188 !(t
->dev
->flags
& IFF_UP
))
191 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
194 if (t
->parms
.link
== link
)
200 if (flags
& TUNNEL_NO_KEY
)
201 goto skip_key_lookup
;
203 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
204 if (t
->parms
.i_key
!= key
||
205 !(t
->dev
->flags
& IFF_UP
))
208 if (t
->parms
.link
== link
)
218 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
219 return netdev_priv(itn
->fb_tunnel_dev
);
224 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
226 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
227 struct ip_tunnel_parm
*parms
)
232 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
233 remote
= parms
->iph
.daddr
;
237 h
= ip_tunnel_hash(itn
, parms
->i_key
, remote
);
238 return &itn
->tunnels
[h
];
241 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
243 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
245 hlist_add_head_rcu(&t
->hash_node
, head
);
248 static void ip_tunnel_del(struct ip_tunnel
*t
)
250 hlist_del_init_rcu(&t
->hash_node
);
253 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
254 struct ip_tunnel_parm
*parms
,
257 __be32 remote
= parms
->iph
.daddr
;
258 __be32 local
= parms
->iph
.saddr
;
259 __be32 key
= parms
->i_key
;
260 int link
= parms
->link
;
261 struct ip_tunnel
*t
= NULL
;
262 struct hlist_head
*head
= ip_bucket(itn
, parms
);
264 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
265 if (local
== t
->parms
.iph
.saddr
&&
266 remote
== t
->parms
.iph
.daddr
&&
267 key
== t
->parms
.i_key
&&
268 link
== t
->parms
.link
&&
269 type
== t
->dev
->type
)
275 static struct net_device
*__ip_tunnel_create(struct net
*net
,
276 const struct rtnl_link_ops
*ops
,
277 struct ip_tunnel_parm
*parms
)
280 struct ip_tunnel
*tunnel
;
281 struct net_device
*dev
;
285 strlcpy(name
, parms
->name
, IFNAMSIZ
);
287 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3)) {
291 strlcpy(name
, ops
->kind
, IFNAMSIZ
);
292 strncat(name
, "%d", 2);
296 dev
= alloc_netdev(ops
->priv_size
, name
, ops
->setup
);
301 dev_net_set(dev
, net
);
303 dev
->rtnl_link_ops
= ops
;
305 tunnel
= netdev_priv(dev
);
306 tunnel
->parms
= *parms
;
309 err
= register_netdevice(dev
);
321 static inline struct rtable
*ip_route_output_tunnel(struct net
*net
,
324 __be32 daddr
, __be32 saddr
,
325 __be32 key
, __u8 tos
, int oif
)
327 memset(fl4
, 0, sizeof(*fl4
));
328 fl4
->flowi4_oif
= oif
;
331 fl4
->flowi4_tos
= tos
;
332 fl4
->flowi4_proto
= proto
;
333 fl4
->fl4_gre_key
= key
;
334 return ip_route_output_key(net
, fl4
);
337 static int ip_tunnel_bind_dev(struct net_device
*dev
)
339 struct net_device
*tdev
= NULL
;
340 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
341 const struct iphdr
*iph
;
342 int hlen
= LL_MAX_HEADER
;
343 int mtu
= ETH_DATA_LEN
;
344 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
346 iph
= &tunnel
->parms
.iph
;
348 /* Guess output device to choose reasonable mtu and needed_headroom */
353 rt
= ip_route_output_tunnel(tunnel
->net
, &fl4
,
354 tunnel
->parms
.iph
.protocol
,
355 iph
->daddr
, iph
->saddr
,
363 if (dev
->type
!= ARPHRD_ETHER
)
364 dev
->flags
|= IFF_POINTOPOINT
;
367 if (!tdev
&& tunnel
->parms
.link
)
368 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
371 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
374 dev
->iflink
= tunnel
->parms
.link
;
376 dev
->needed_headroom
= t_hlen
+ hlen
;
377 mtu
-= (dev
->hard_header_len
+ t_hlen
);
385 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
386 struct ip_tunnel_net
*itn
,
387 struct ip_tunnel_parm
*parms
)
389 struct ip_tunnel
*nt
, *fbt
;
390 struct net_device
*dev
;
392 BUG_ON(!itn
->fb_tunnel_dev
);
393 fbt
= netdev_priv(itn
->fb_tunnel_dev
);
394 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
398 dev
->mtu
= ip_tunnel_bind_dev(dev
);
400 nt
= netdev_priv(dev
);
401 ip_tunnel_add(itn
, nt
);
405 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
406 const struct tnl_ptk_info
*tpi
, bool log_ecn_error
)
408 struct pcpu_tstats
*tstats
;
409 const struct iphdr
*iph
= ip_hdr(skb
);
412 #ifdef CONFIG_NET_IPGRE_BROADCAST
413 if (ipv4_is_multicast(iph
->daddr
)) {
414 /* Looped back packet, drop it! */
415 if (rt_is_output_route(skb_rtable(skb
)))
417 tunnel
->dev
->stats
.multicast
++;
418 skb
->pkt_type
= PACKET_BROADCAST
;
422 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
423 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
424 tunnel
->dev
->stats
.rx_crc_errors
++;
425 tunnel
->dev
->stats
.rx_errors
++;
429 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
430 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
431 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
432 tunnel
->dev
->stats
.rx_fifo_errors
++;
433 tunnel
->dev
->stats
.rx_errors
++;
436 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
439 err
= IP_ECN_decapsulate(iph
, skb
);
442 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
443 &iph
->saddr
, iph
->tos
);
445 ++tunnel
->dev
->stats
.rx_frame_errors
;
446 ++tunnel
->dev
->stats
.rx_errors
;
451 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
452 u64_stats_update_begin(&tstats
->syncp
);
453 tstats
->rx_packets
++;
454 tstats
->rx_bytes
+= skb
->len
;
455 u64_stats_update_end(&tstats
->syncp
);
457 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
458 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
459 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
461 skb
->dev
= tunnel
->dev
;
464 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
466 gro_cells_receive(&tunnel
->gro_cells
, skb
);
473 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
475 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
476 struct rtable
*rt
, __be16 df
)
478 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
479 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
483 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
484 - sizeof(struct iphdr
) - tunnel
->hlen
;
486 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
489 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
491 if (skb
->protocol
== htons(ETH_P_IP
)) {
492 if (!skb_is_gso(skb
) &&
493 (df
& htons(IP_DF
)) && mtu
< pkt_size
) {
494 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
495 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
499 #if IS_ENABLED(CONFIG_IPV6)
500 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
501 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
503 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
504 mtu
>= IPV6_MIN_MTU
) {
505 if ((tunnel
->parms
.iph
.daddr
&&
506 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
507 rt6
->rt6i_dst
.plen
== 128) {
508 rt6
->rt6i_flags
|= RTF_MODIFIED
;
509 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
513 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
515 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
523 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
524 const struct iphdr
*tnl_params
, const u8 protocol
)
526 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
527 const struct iphdr
*inner_iph
;
531 struct rtable
*rt
; /* Route to the other host */
532 unsigned int max_headroom
; /* The extra header space needed */
536 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
538 dst
= tnl_params
->daddr
;
542 if (skb_dst(skb
) == NULL
) {
543 dev
->stats
.tx_fifo_errors
++;
547 if (skb
->protocol
== htons(ETH_P_IP
)) {
548 rt
= skb_rtable(skb
);
549 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
551 #if IS_ENABLED(CONFIG_IPV6)
552 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
553 const struct in6_addr
*addr6
;
554 struct neighbour
*neigh
;
555 bool do_tx_error_icmp
;
558 neigh
= dst_neigh_lookup(skb_dst(skb
),
559 &ipv6_hdr(skb
)->daddr
);
563 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
564 addr_type
= ipv6_addr_type(addr6
);
566 if (addr_type
== IPV6_ADDR_ANY
) {
567 addr6
= &ipv6_hdr(skb
)->daddr
;
568 addr_type
= ipv6_addr_type(addr6
);
571 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
572 do_tx_error_icmp
= true;
574 do_tx_error_icmp
= false;
575 dst
= addr6
->s6_addr32
[3];
577 neigh_release(neigh
);
578 if (do_tx_error_icmp
)
586 tos
= tnl_params
->tos
;
589 if (skb
->protocol
== htons(ETH_P_IP
))
590 tos
= inner_iph
->tos
;
591 else if (skb
->protocol
== htons(ETH_P_IPV6
))
592 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
595 rt
= ip_route_output_tunnel(tunnel
->net
, &fl4
,
597 dst
, tnl_params
->saddr
,
602 dev
->stats
.tx_carrier_errors
++;
605 if (rt
->dst
.dev
== dev
) {
607 dev
->stats
.collisions
++;
611 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
)) {
616 if (tunnel
->err_count
> 0) {
617 if (time_before(jiffies
,
618 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
621 dst_link_failure(skb
);
623 tunnel
->err_count
= 0;
626 ttl
= tnl_params
->ttl
;
628 if (skb
->protocol
== htons(ETH_P_IP
))
629 ttl
= inner_iph
->ttl
;
630 #if IS_ENABLED(CONFIG_IPV6)
631 else if (skb
->protocol
== htons(ETH_P_IPV6
))
632 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
635 ttl
= ip4_dst_hoplimit(&rt
->dst
);
638 df
= tnl_params
->frag_off
;
639 if (skb
->protocol
== htons(ETH_P_IP
))
640 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
642 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
643 + rt
->dst
.header_len
;
644 if (max_headroom
> dev
->needed_headroom
) {
645 dev
->needed_headroom
= max_headroom
;
646 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
647 dev
->stats
.tx_dropped
++;
653 err
= iptunnel_xmit(rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
,
654 ip_tunnel_ecn_encap(tos
, inner_iph
, skb
), ttl
, df
,
655 !net_eq(tunnel
->net
, dev_net(dev
)));
656 iptunnel_xmit_stats(err
, &dev
->stats
, dev
->tstats
);
660 #if IS_ENABLED(CONFIG_IPV6)
662 dst_link_failure(skb
);
665 dev
->stats
.tx_errors
++;
668 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
670 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
672 struct net_device
*dev
,
673 struct ip_tunnel_parm
*p
,
677 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
678 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
679 t
->parms
.i_key
= p
->i_key
;
680 t
->parms
.o_key
= p
->o_key
;
681 if (dev
->type
!= ARPHRD_ETHER
) {
682 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
683 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
685 ip_tunnel_add(itn
, t
);
687 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
688 t
->parms
.iph
.tos
= p
->iph
.tos
;
689 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
691 if (t
->parms
.link
!= p
->link
) {
694 t
->parms
.link
= p
->link
;
695 mtu
= ip_tunnel_bind_dev(dev
);
699 netdev_state_change(dev
);
702 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
706 struct net
*net
= dev_net(dev
);
707 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
708 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
710 BUG_ON(!itn
->fb_tunnel_dev
);
714 if (dev
== itn
->fb_tunnel_dev
)
715 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
717 t
= netdev_priv(dev
);
718 memcpy(p
, &t
->parms
, sizeof(*p
));
724 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
727 p
->iph
.frag_off
|= htons(IP_DF
);
728 if (!(p
->i_flags
&TUNNEL_KEY
))
730 if (!(p
->o_flags
&TUNNEL_KEY
))
733 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
735 if (!t
&& (cmd
== SIOCADDTUNNEL
))
736 t
= ip_tunnel_create(net
, itn
, p
);
738 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
745 unsigned int nflags
= 0;
747 if (ipv4_is_multicast(p
->iph
.daddr
))
748 nflags
= IFF_BROADCAST
;
749 else if (p
->iph
.daddr
)
750 nflags
= IFF_POINTOPOINT
;
752 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
757 t
= netdev_priv(dev
);
763 ip_tunnel_update(itn
, t
, dev
, p
, true);
765 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
770 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
773 if (dev
== itn
->fb_tunnel_dev
) {
775 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
779 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
783 unregister_netdevice(dev
);
794 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
796 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
798 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
799 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
802 new_mtu
> 0xFFF8 - dev
->hard_header_len
- t_hlen
)
807 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
809 static void ip_tunnel_dev_free(struct net_device
*dev
)
811 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
813 gro_cells_destroy(&tunnel
->gro_cells
);
814 free_percpu(dev
->tstats
);
818 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
820 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
821 struct ip_tunnel_net
*itn
;
823 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
825 if (itn
->fb_tunnel_dev
!= dev
) {
826 ip_tunnel_del(netdev_priv(dev
));
827 unregister_netdevice_queue(dev
, head
);
830 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
832 int ip_tunnel_init_net(struct net
*net
, int ip_tnl_net_id
,
833 struct rtnl_link_ops
*ops
, char *devname
)
835 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
836 struct ip_tunnel_parm parms
;
839 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
840 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
843 itn
->fb_tunnel_dev
= NULL
;
847 memset(&parms
, 0, sizeof(parms
));
849 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
852 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
853 /* FB netdevice is special: we have one, and only one per netns.
854 * Allowing to move it to another netns is clearly unsafe.
856 if (!IS_ERR(itn
->fb_tunnel_dev
))
857 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
860 return PTR_RET(itn
->fb_tunnel_dev
);
862 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
864 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
865 struct rtnl_link_ops
*ops
)
867 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
868 struct net_device
*dev
, *aux
;
871 for_each_netdev_safe(net
, dev
, aux
)
872 if (dev
->rtnl_link_ops
== ops
)
873 unregister_netdevice_queue(dev
, head
);
875 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
877 struct hlist_node
*n
;
878 struct hlist_head
*thead
= &itn
->tunnels
[h
];
880 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
881 /* If dev is in the same netns, it has already
882 * been added to the list by the previous loop.
884 if (!net_eq(dev_net(t
->dev
), net
))
885 unregister_netdevice_queue(t
->dev
, head
);
887 if (itn
->fb_tunnel_dev
)
888 unregister_netdevice_queue(itn
->fb_tunnel_dev
, head
);
891 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
896 ip_tunnel_destroy(itn
, &list
, ops
);
897 unregister_netdevice_many(&list
);
900 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
902 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
903 struct ip_tunnel_parm
*p
)
905 struct ip_tunnel
*nt
;
906 struct net
*net
= dev_net(dev
);
907 struct ip_tunnel_net
*itn
;
911 nt
= netdev_priv(dev
);
912 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
914 if (ip_tunnel_find(itn
, p
, dev
->type
))
919 err
= register_netdevice(dev
);
923 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
924 eth_hw_addr_random(dev
);
926 mtu
= ip_tunnel_bind_dev(dev
);
930 ip_tunnel_add(itn
, nt
);
935 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
937 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
938 struct ip_tunnel_parm
*p
)
941 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
942 struct net
*net
= tunnel
->net
;
943 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
945 if (dev
== itn
->fb_tunnel_dev
)
948 t
= ip_tunnel_find(itn
, p
, dev
->type
);
956 if (dev
->type
!= ARPHRD_ETHER
) {
957 unsigned int nflags
= 0;
959 if (ipv4_is_multicast(p
->iph
.daddr
))
960 nflags
= IFF_BROADCAST
;
961 else if (p
->iph
.daddr
)
962 nflags
= IFF_POINTOPOINT
;
964 if ((dev
->flags
^ nflags
) &
965 (IFF_POINTOPOINT
| IFF_BROADCAST
))
970 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
973 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
975 int ip_tunnel_init(struct net_device
*dev
)
977 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
978 struct iphdr
*iph
= &tunnel
->parms
.iph
;
981 dev
->destructor
= ip_tunnel_dev_free
;
982 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
986 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
988 free_percpu(dev
->tstats
);
993 tunnel
->net
= dev_net(dev
);
994 strcpy(tunnel
->parms
.name
, dev
->name
);
1000 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1002 void ip_tunnel_uninit(struct net_device
*dev
)
1004 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1005 struct net
*net
= tunnel
->net
;
1006 struct ip_tunnel_net
*itn
;
1008 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1009 /* fb_tunnel_dev will be unregisted in net-exit call. */
1010 if (itn
->fb_tunnel_dev
!= dev
)
1011 ip_tunnel_del(netdev_priv(dev
));
1013 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1015 /* Do least required initialization, rest of init is done in tunnel_init call */
1016 void ip_tunnel_setup(struct net_device
*dev
, int net_id
)
1018 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1019 tunnel
->ip_tnl_net_id
= net_id
;
1021 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1023 MODULE_LICENSE("GPL");