2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
51 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
61 1. The most important issue is detecting local dead loops.
62 They would cause complete host lockup in transmit, which
63 would be "resolved" by stack overflow or, if queueing is enabled,
64 with infinite looping in net_bh.
66 We cannot track such dead loops during route installation,
67 it is infeasible task. The most general solutions would be
68 to keep skb->encapsulation counter (sort of local ttl),
69 and silently drop packet when it expires. It is a good
70 solution, but it supposes maintaining new variable in ALL
71 skb, even if no tunneling is used.
73 Current solution: xmit_recursion breaks dead loops. This is a percpu
74 counter, since when we enter the first ndo_xmit(), cpu migration is
75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 2. Networking dead loops would not kill routers, but would really
78 kill network. IP hop limit plays role of "t->recursion" in this case,
79 if we copy it from packet being encapsulated to upper header.
80 It is very good solution, but it introduces two problems:
82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83 do not work over tunnels.
84 - traceroute does not work. I planned to relay ICMP from tunnel,
85 so that this problem would be solved and traceroute output
86 would even more informative. This idea appeared to be wrong:
87 only Linux complies to rfc1812 now (yes, guys, Linux is the only
88 true router now :-)), all routers (at least, in neighbourhood of mine)
89 return only 8 bytes of payload. It is the end.
91 Hence, if we want that OSPF worked or traceroute said something reasonable,
92 we should search for another solution.
94 One of them is to parse packet trying to detect inner encapsulation
95 made by our node. It is difficult or even impossible, especially,
96 taking into account fragmentation. TO be short, ttl is not solution at all.
98 Current solution: The solution was UNEXPECTEDLY SIMPLE.
99 We force DF flag on tunnels with preconfigured hop limit,
100 that is ALL. :-) Well, it does not remove the problem completely,
101 but exponential growth of network traffic is changed to linear
102 (branches, that exceed pmtu are pruned) and tunnel mtu
103 rapidly degrades to value <68, where looping stops.
104 Yes, it is not good if there exists a router in the loop,
105 which does not force DF, even when encapsulating packets have DF set.
106 But it is not our problem! Nobody could accuse us, we made
107 all that we could make. Even if it is your gated who injected
108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-)
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
123 static bool log_ecn_error
= true;
124 module_param(log_ecn_error
, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
128 static int ipgre_tunnel_init(struct net_device
*dev
);
129 static void ipgre_tunnel_setup(struct net_device
*dev
);
130 static int ipgre_tunnel_bind_dev(struct net_device
*dev
);
132 /* Fallback tunnel: no source, no destination, no key, no options */
136 static int ipgre_net_id __read_mostly
;
138 struct ip_tunnel __rcu
*tunnels
[4][HASH_SIZE
];
140 struct net_device
*fb_tunnel_dev
;
143 /* Tunnel hash table */
153 We require exact key match i.e. if a key is present in packet
154 it will match only tunnel with the same key; if it is not present,
155 it will match only keyless tunnel.
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
163 #define tunnels_r_l tunnels[3]
164 #define tunnels_r tunnels[2]
165 #define tunnels_l tunnels[1]
166 #define tunnels_wc tunnels[0]
168 static struct rtnl_link_stats64
*ipgre_get_stats64(struct net_device
*dev
,
169 struct rtnl_link_stats64
*tot
)
173 for_each_possible_cpu(i
) {
174 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
175 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
179 start
= u64_stats_fetch_begin_bh(&tstats
->syncp
);
180 rx_packets
= tstats
->rx_packets
;
181 tx_packets
= tstats
->tx_packets
;
182 rx_bytes
= tstats
->rx_bytes
;
183 tx_bytes
= tstats
->tx_bytes
;
184 } while (u64_stats_fetch_retry_bh(&tstats
->syncp
, start
));
186 tot
->rx_packets
+= rx_packets
;
187 tot
->tx_packets
+= tx_packets
;
188 tot
->rx_bytes
+= rx_bytes
;
189 tot
->tx_bytes
+= tx_bytes
;
192 tot
->multicast
= dev
->stats
.multicast
;
193 tot
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
194 tot
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
195 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
196 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
197 tot
->rx_errors
= dev
->stats
.rx_errors
;
199 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
200 tot
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
201 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
202 tot
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
203 tot
->tx_errors
= dev
->stats
.tx_errors
;
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm
*p
,
210 __be16 flags
, __be32 key
)
212 if (p
->i_flags
& GRE_KEY
) {
214 return key
== p
->i_key
;
216 return false; /* key expected, none present */
218 return !(flags
& GRE_KEY
);
221 /* Given src, dst and key, find appropriate for input tunnel. */
223 static struct ip_tunnel
*ipgre_tunnel_lookup(struct net_device
*dev
,
224 __be32 remote
, __be32 local
,
225 __be16 flags
, __be32 key
,
228 struct net
*net
= dev_net(dev
);
229 int link
= dev
->ifindex
;
230 unsigned int h0
= HASH(remote
);
231 unsigned int h1
= HASH(key
);
232 struct ip_tunnel
*t
, *cand
= NULL
;
233 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
234 int dev_type
= (gre_proto
== htons(ETH_P_TEB
)) ?
235 ARPHRD_ETHER
: ARPHRD_IPGRE
;
236 int score
, cand_score
= 4;
238 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r_l
[h0
^ h1
]) {
239 if (local
!= t
->parms
.iph
.saddr
||
240 remote
!= t
->parms
.iph
.daddr
||
241 !(t
->dev
->flags
& IFF_UP
))
244 if (!ipgre_key_match(&t
->parms
, flags
, key
))
247 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
248 t
->dev
->type
!= dev_type
)
252 if (t
->parms
.link
!= link
)
254 if (t
->dev
->type
!= dev_type
)
259 if (score
< cand_score
) {
265 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r
[h0
^ h1
]) {
266 if (remote
!= t
->parms
.iph
.daddr
||
267 !(t
->dev
->flags
& IFF_UP
))
270 if (!ipgre_key_match(&t
->parms
, flags
, key
))
273 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
274 t
->dev
->type
!= dev_type
)
278 if (t
->parms
.link
!= link
)
280 if (t
->dev
->type
!= dev_type
)
285 if (score
< cand_score
) {
291 for_each_ip_tunnel_rcu(t
, ign
->tunnels_l
[h1
]) {
292 if ((local
!= t
->parms
.iph
.saddr
&&
293 (local
!= t
->parms
.iph
.daddr
||
294 !ipv4_is_multicast(local
))) ||
295 !(t
->dev
->flags
& IFF_UP
))
298 if (!ipgre_key_match(&t
->parms
, flags
, key
))
301 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
302 t
->dev
->type
!= dev_type
)
306 if (t
->parms
.link
!= link
)
308 if (t
->dev
->type
!= dev_type
)
313 if (score
< cand_score
) {
319 for_each_ip_tunnel_rcu(t
, ign
->tunnels_wc
[h1
]) {
320 if (t
->parms
.i_key
!= key
||
321 !(t
->dev
->flags
& IFF_UP
))
324 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
325 t
->dev
->type
!= dev_type
)
329 if (t
->parms
.link
!= link
)
331 if (t
->dev
->type
!= dev_type
)
336 if (score
< cand_score
) {
345 dev
= ign
->fb_tunnel_dev
;
346 if (dev
->flags
& IFF_UP
)
347 return netdev_priv(dev
);
352 static struct ip_tunnel __rcu
**__ipgre_bucket(struct ipgre_net
*ign
,
353 struct ip_tunnel_parm
*parms
)
355 __be32 remote
= parms
->iph
.daddr
;
356 __be32 local
= parms
->iph
.saddr
;
357 __be32 key
= parms
->i_key
;
358 unsigned int h
= HASH(key
);
363 if (remote
&& !ipv4_is_multicast(remote
)) {
368 return &ign
->tunnels
[prio
][h
];
371 static inline struct ip_tunnel __rcu
**ipgre_bucket(struct ipgre_net
*ign
,
374 return __ipgre_bucket(ign
, &t
->parms
);
377 static void ipgre_tunnel_link(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
379 struct ip_tunnel __rcu
**tp
= ipgre_bucket(ign
, t
);
381 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
382 rcu_assign_pointer(*tp
, t
);
385 static void ipgre_tunnel_unlink(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
387 struct ip_tunnel __rcu
**tp
;
388 struct ip_tunnel
*iter
;
390 for (tp
= ipgre_bucket(ign
, t
);
391 (iter
= rtnl_dereference(*tp
)) != NULL
;
394 rcu_assign_pointer(*tp
, t
->next
);
400 static struct ip_tunnel
*ipgre_tunnel_find(struct net
*net
,
401 struct ip_tunnel_parm
*parms
,
404 __be32 remote
= parms
->iph
.daddr
;
405 __be32 local
= parms
->iph
.saddr
;
406 __be32 key
= parms
->i_key
;
407 int link
= parms
->link
;
409 struct ip_tunnel __rcu
**tp
;
410 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
412 for (tp
= __ipgre_bucket(ign
, parms
);
413 (t
= rtnl_dereference(*tp
)) != NULL
;
415 if (local
== t
->parms
.iph
.saddr
&&
416 remote
== t
->parms
.iph
.daddr
&&
417 key
== t
->parms
.i_key
&&
418 link
== t
->parms
.link
&&
419 type
== t
->dev
->type
)
425 static struct ip_tunnel
*ipgre_tunnel_locate(struct net
*net
,
426 struct ip_tunnel_parm
*parms
, int create
)
428 struct ip_tunnel
*t
, *nt
;
429 struct net_device
*dev
;
431 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
433 t
= ipgre_tunnel_find(net
, parms
, ARPHRD_IPGRE
);
438 strlcpy(name
, parms
->name
, IFNAMSIZ
);
440 strcpy(name
, "gre%d");
442 dev
= alloc_netdev(sizeof(*t
), name
, ipgre_tunnel_setup
);
446 dev_net_set(dev
, net
);
448 nt
= netdev_priv(dev
);
450 dev
->rtnl_link_ops
= &ipgre_link_ops
;
452 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
454 if (register_netdevice(dev
) < 0)
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
459 dev
->features
|= NETIF_F_LLTX
;
462 ipgre_tunnel_link(ign
, nt
);
470 static void ipgre_tunnel_uninit(struct net_device
*dev
)
472 struct net
*net
= dev_net(dev
);
473 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
475 ipgre_tunnel_unlink(ign
, netdev_priv(dev
));
480 static void ipgre_err(struct sk_buff
*skb
, u32 info
)
483 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible.
487 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed
489 GRE tunnels with enabled checksum. Tell them "thank you".
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
496 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
497 __be16
*p
= (__be16
*)(skb
->data
+(iph
->ihl
<<2));
498 int grehlen
= (iph
->ihl
<<2) + 4;
499 const int type
= icmp_hdr(skb
)->type
;
500 const int code
= icmp_hdr(skb
)->code
;
506 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
|GRE_ROUTING
|GRE_VERSION
)) {
507 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb
) < grehlen
)
521 key
= *(((__be32
*)p
) + (grehlen
/ 4) - 1);
525 case ICMP_PARAMETERPROB
:
528 case ICMP_DEST_UNREACH
:
531 case ICMP_PORT_UNREACH
:
532 /* Impossible event. */
535 /* All others are translated to HOST_UNREACH.
536 rfc2003 contains "deep thoughts" about NET_UNREACH,
537 I believe they are just ether pollution. --ANK
542 case ICMP_TIME_EXCEEDED
:
543 if (code
!= ICMP_EXC_TTL
)
551 t
= ipgre_tunnel_lookup(skb
->dev
, iph
->daddr
, iph
->saddr
,
557 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
558 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
559 t
->parms
.link
, 0, IPPROTO_GRE
, 0);
562 if (type
== ICMP_REDIRECT
) {
563 ipv4_redirect(skb
, dev_net(skb
->dev
), t
->parms
.link
, 0,
567 if (t
->parms
.iph
.daddr
== 0 ||
568 ipv4_is_multicast(t
->parms
.iph
.daddr
))
571 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
574 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
578 t
->err_time
= jiffies
;
582 ipgre_ecn_encapsulate(u8 tos
, const struct iphdr
*old_iph
, struct sk_buff
*skb
)
585 if (skb
->protocol
== htons(ETH_P_IP
))
586 inner
= old_iph
->tos
;
587 else if (skb
->protocol
== htons(ETH_P_IPV6
))
588 inner
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
589 return INET_ECN_encapsulate(tos
, inner
);
592 static int ipgre_rcv(struct sk_buff
*skb
)
594 const struct iphdr
*iph
;
600 struct ip_tunnel
*tunnel
;
605 if (!pskb_may_pull(skb
, 16))
610 flags
= *(__be16
*)h
;
612 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_ROUTING
|GRE_SEQ
|GRE_VERSION
)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
616 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
619 if (flags
&GRE_CSUM
) {
620 switch (skb
->ip_summed
) {
621 case CHECKSUM_COMPLETE
:
622 csum
= csum_fold(skb
->csum
);
628 csum
= __skb_checksum_complete(skb
);
629 skb
->ip_summed
= CHECKSUM_COMPLETE
;
634 key
= *(__be32
*)(h
+ offset
);
638 seqno
= ntohl(*(__be32
*)(h
+ offset
));
643 gre_proto
= *(__be16
*)(h
+ 2);
645 tunnel
= ipgre_tunnel_lookup(skb
->dev
,
646 iph
->saddr
, iph
->daddr
, flags
, key
,
649 struct pcpu_tstats
*tstats
;
653 skb
->protocol
= gre_proto
;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
658 if (flags
== 0 && gre_proto
== htons(ETH_P_WCCP
)) {
659 skb
->protocol
= htons(ETH_P_IP
);
660 if ((*(h
+ offset
) & 0xF0) != 0x40)
664 skb
->mac_header
= skb
->network_header
;
665 __pskb_pull(skb
, offset
);
666 skb_postpull_rcsum(skb
, skb_transport_header(skb
), offset
);
667 skb
->pkt_type
= PACKET_HOST
;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph
->daddr
)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb
)))
673 tunnel
->dev
->stats
.multicast
++;
674 skb
->pkt_type
= PACKET_BROADCAST
;
678 if (((flags
&GRE_CSUM
) && csum
) ||
679 (!(flags
&GRE_CSUM
) && tunnel
->parms
.i_flags
&GRE_CSUM
)) {
680 tunnel
->dev
->stats
.rx_crc_errors
++;
681 tunnel
->dev
->stats
.rx_errors
++;
684 if (tunnel
->parms
.i_flags
&GRE_SEQ
) {
685 if (!(flags
&GRE_SEQ
) ||
686 (tunnel
->i_seqno
&& (s32
)(seqno
- tunnel
->i_seqno
) < 0)) {
687 tunnel
->dev
->stats
.rx_fifo_errors
++;
688 tunnel
->dev
->stats
.rx_errors
++;
691 tunnel
->i_seqno
= seqno
+ 1;
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
696 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
697 tunnel
->dev
->stats
.rx_length_errors
++;
698 tunnel
->dev
->stats
.rx_errors
++;
703 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
704 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
707 __skb_tunnel_rx(skb
, tunnel
->dev
);
709 skb_reset_network_header(skb
);
710 err
= IP_ECN_decapsulate(iph
, skb
);
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph
->saddr
, iph
->tos
);
716 ++tunnel
->dev
->stats
.rx_frame_errors
;
717 ++tunnel
->dev
->stats
.rx_errors
;
722 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
723 u64_stats_update_begin(&tstats
->syncp
);
724 tstats
->rx_packets
++;
725 tstats
->rx_bytes
+= skb
->len
;
726 u64_stats_update_end(&tstats
->syncp
);
728 gro_cells_receive(&tunnel
->gro_cells
, skb
);
731 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
738 static netdev_tx_t
ipgre_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
740 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
741 const struct iphdr
*old_iph
= ip_hdr(skb
);
742 const struct iphdr
*tiph
;
746 struct rtable
*rt
; /* Route to the other host */
747 struct net_device
*tdev
; /* Device to other host */
748 struct iphdr
*iph
; /* Our new IP header */
749 unsigned int max_headroom
; /* The extra header space needed */
755 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
756 skb_checksum_help(skb
))
759 if (dev
->type
== ARPHRD_ETHER
)
760 IPCB(skb
)->flags
= 0;
762 if (dev
->header_ops
&& dev
->type
== ARPHRD_IPGRE
) {
764 if (skb
->protocol
== htons(ETH_P_IP
))
765 tiph
= (const struct iphdr
*)skb
->data
;
767 tiph
= &tunnel
->parms
.iph
;
769 gre_hlen
= tunnel
->hlen
;
770 tiph
= &tunnel
->parms
.iph
;
773 if ((dst
= tiph
->daddr
) == 0) {
776 if (skb_dst(skb
) == NULL
) {
777 dev
->stats
.tx_fifo_errors
++;
781 if (skb
->protocol
== htons(ETH_P_IP
)) {
782 rt
= skb_rtable(skb
);
783 dst
= rt_nexthop(rt
, old_iph
->daddr
);
785 #if IS_ENABLED(CONFIG_IPV6)
786 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
787 const struct in6_addr
*addr6
;
788 struct neighbour
*neigh
;
789 bool do_tx_error_icmp
;
792 neigh
= dst_neigh_lookup(skb_dst(skb
), &ipv6_hdr(skb
)->daddr
);
796 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
797 addr_type
= ipv6_addr_type(addr6
);
799 if (addr_type
== IPV6_ADDR_ANY
) {
800 addr6
= &ipv6_hdr(skb
)->daddr
;
801 addr_type
= ipv6_addr_type(addr6
);
804 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
805 do_tx_error_icmp
= true;
807 do_tx_error_icmp
= false;
808 dst
= addr6
->s6_addr32
[3];
810 neigh_release(neigh
);
811 if (do_tx_error_icmp
)
823 if (skb
->protocol
== htons(ETH_P_IP
))
825 else if (skb
->protocol
== htons(ETH_P_IPV6
))
826 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
829 rt
= ip_route_output_gre(dev_net(dev
), &fl4
, dst
, tiph
->saddr
,
830 tunnel
->parms
.o_key
, RT_TOS(tos
),
833 dev
->stats
.tx_carrier_errors
++;
840 dev
->stats
.collisions
++;
846 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
- tunnel
->hlen
;
848 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
851 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
853 if (skb
->protocol
== htons(ETH_P_IP
)) {
854 df
|= (old_iph
->frag_off
&htons(IP_DF
));
856 if ((old_iph
->frag_off
&htons(IP_DF
)) &&
857 mtu
< ntohs(old_iph
->tot_len
)) {
858 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
863 #if IS_ENABLED(CONFIG_IPV6)
864 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
865 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
867 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) && mtu
>= IPV6_MIN_MTU
) {
868 if ((tunnel
->parms
.iph
.daddr
&&
869 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
870 rt6
->rt6i_dst
.plen
== 128) {
871 rt6
->rt6i_flags
|= RTF_MODIFIED
;
872 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
876 if (mtu
>= IPV6_MIN_MTU
&& mtu
< skb
->len
- tunnel
->hlen
+ gre_hlen
) {
877 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
884 if (tunnel
->err_count
> 0) {
885 if (time_before(jiffies
,
886 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
889 dst_link_failure(skb
);
891 tunnel
->err_count
= 0;
894 max_headroom
= LL_RESERVED_SPACE(tdev
) + gre_hlen
+ rt
->dst
.header_len
;
896 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
)||
897 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
898 struct sk_buff
*new_skb
= skb_realloc_headroom(skb
, max_headroom
);
899 if (max_headroom
> dev
->needed_headroom
)
900 dev
->needed_headroom
= max_headroom
;
903 dev
->stats
.tx_dropped
++;
908 skb_set_owner_w(new_skb
, skb
->sk
);
911 old_iph
= ip_hdr(skb
);
912 /* Warning : tiph value might point to freed memory */
915 skb_push(skb
, gre_hlen
);
916 skb_reset_network_header(skb
);
917 skb_set_transport_header(skb
, sizeof(*iph
));
918 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
919 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
922 skb_dst_set(skb
, &rt
->dst
);
925 * Push down and install the IPIP header.
930 iph
->ihl
= sizeof(struct iphdr
) >> 2;
932 iph
->protocol
= IPPROTO_GRE
;
933 iph
->tos
= ipgre_ecn_encapsulate(tos
, old_iph
, skb
);
934 iph
->daddr
= fl4
.daddr
;
935 iph
->saddr
= fl4
.saddr
;
939 if (skb
->protocol
== htons(ETH_P_IP
))
940 iph
->ttl
= old_iph
->ttl
;
941 #if IS_ENABLED(CONFIG_IPV6)
942 else if (skb
->protocol
== htons(ETH_P_IPV6
))
943 iph
->ttl
= ((const struct ipv6hdr
*)old_iph
)->hop_limit
;
946 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
949 ((__be16
*)(iph
+ 1))[0] = tunnel
->parms
.o_flags
;
950 ((__be16
*)(iph
+ 1))[1] = (dev
->type
== ARPHRD_ETHER
) ?
951 htons(ETH_P_TEB
) : skb
->protocol
;
953 if (tunnel
->parms
.o_flags
&(GRE_KEY
|GRE_CSUM
|GRE_SEQ
)) {
954 __be32
*ptr
= (__be32
*)(((u8
*)iph
) + tunnel
->hlen
- 4);
956 if (tunnel
->parms
.o_flags
&GRE_SEQ
) {
958 *ptr
= htonl(tunnel
->o_seqno
);
961 if (tunnel
->parms
.o_flags
&GRE_KEY
) {
962 *ptr
= tunnel
->parms
.o_key
;
965 if (tunnel
->parms
.o_flags
&GRE_CSUM
) {
967 *(__sum16
*)ptr
= ip_compute_csum((void *)(iph
+1), skb
->len
- sizeof(struct iphdr
));
971 iptunnel_xmit(skb
, dev
);
974 #if IS_ENABLED(CONFIG_IPV6)
976 dst_link_failure(skb
);
979 dev
->stats
.tx_errors
++;
984 static int ipgre_tunnel_bind_dev(struct net_device
*dev
)
986 struct net_device
*tdev
= NULL
;
987 struct ip_tunnel
*tunnel
;
988 const struct iphdr
*iph
;
989 int hlen
= LL_MAX_HEADER
;
990 int mtu
= ETH_DATA_LEN
;
991 int addend
= sizeof(struct iphdr
) + 4;
993 tunnel
= netdev_priv(dev
);
994 iph
= &tunnel
->parms
.iph
;
996 /* Guess output device to choose reasonable mtu and needed_headroom */
1002 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1003 iph
->daddr
, iph
->saddr
,
1004 tunnel
->parms
.o_key
,
1006 tunnel
->parms
.link
);
1012 if (dev
->type
!= ARPHRD_ETHER
)
1013 dev
->flags
|= IFF_POINTOPOINT
;
1016 if (!tdev
&& tunnel
->parms
.link
)
1017 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
1020 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
1023 dev
->iflink
= tunnel
->parms
.link
;
1025 /* Precalculate GRE options length */
1026 if (tunnel
->parms
.o_flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
)) {
1027 if (tunnel
->parms
.o_flags
&GRE_CSUM
)
1029 if (tunnel
->parms
.o_flags
&GRE_KEY
)
1031 if (tunnel
->parms
.o_flags
&GRE_SEQ
)
1034 dev
->needed_headroom
= addend
+ hlen
;
1035 mtu
-= dev
->hard_header_len
+ addend
;
1040 tunnel
->hlen
= addend
;
1046 ipgre_tunnel_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1049 struct ip_tunnel_parm p
;
1050 struct ip_tunnel
*t
;
1051 struct net
*net
= dev_net(dev
);
1052 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1057 if (dev
== ign
->fb_tunnel_dev
) {
1058 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1062 t
= ipgre_tunnel_locate(net
, &p
, 0);
1065 t
= netdev_priv(dev
);
1066 memcpy(&p
, &t
->parms
, sizeof(p
));
1067 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1074 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1078 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1082 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
1083 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
1084 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
1087 p
.iph
.frag_off
|= htons(IP_DF
);
1089 if (!(p
.i_flags
&GRE_KEY
))
1091 if (!(p
.o_flags
&GRE_KEY
))
1094 t
= ipgre_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
1096 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1098 if (t
->dev
!= dev
) {
1103 unsigned int nflags
= 0;
1105 t
= netdev_priv(dev
);
1107 if (ipv4_is_multicast(p
.iph
.daddr
))
1108 nflags
= IFF_BROADCAST
;
1109 else if (p
.iph
.daddr
)
1110 nflags
= IFF_POINTOPOINT
;
1112 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
1116 ipgre_tunnel_unlink(ign
, t
);
1118 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1119 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1120 t
->parms
.i_key
= p
.i_key
;
1121 t
->parms
.o_key
= p
.o_key
;
1122 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1123 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1124 ipgre_tunnel_link(ign
, t
);
1125 netdev_state_change(dev
);
1131 if (cmd
== SIOCCHGTUNNEL
) {
1132 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1133 t
->parms
.iph
.tos
= p
.iph
.tos
;
1134 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1135 if (t
->parms
.link
!= p
.link
) {
1136 t
->parms
.link
= p
.link
;
1137 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
1138 netdev_state_change(dev
);
1141 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
, sizeof(p
)))
1144 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1149 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1152 if (dev
== ign
->fb_tunnel_dev
) {
1154 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1157 if ((t
= ipgre_tunnel_locate(net
, &p
, 0)) == NULL
)
1160 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1164 unregister_netdevice(dev
);
1176 static int ipgre_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
1178 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1180 new_mtu
> 0xFFF8 - dev
->hard_header_len
- tunnel
->hlen
)
1186 /* Nice toy. Unfortunately, useless in real life :-)
1187 It allows to construct virtual multiprotocol broadcast "LAN"
1188 over the Internet, provided multicast routing is tuned.
1191 I have no idea was this bicycle invented before me,
1192 so that I had to set ARPHRD_IPGRE to a random value.
1193 I have an impression, that Cisco could make something similar,
1194 but this feature is apparently missing in IOS<=11.2(8).
1196 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1197 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1199 ping -t 255 224.66.66.66
1201 If nobody answers, mbone does not work.
1203 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1204 ip addr add 10.66.66.<somewhat>/24 dev Universe
1205 ifconfig Universe up
1206 ifconfig Universe add fe80::<Your_real_addr>/10
1207 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1210 ftp fec0:6666:6666::193.233.7.65
1215 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1216 unsigned short type
,
1217 const void *daddr
, const void *saddr
, unsigned int len
)
1219 struct ip_tunnel
*t
= netdev_priv(dev
);
1220 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
);
1221 __be16
*p
= (__be16
*)(iph
+1);
1223 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1224 p
[0] = t
->parms
.o_flags
;
1228 * Set the source hardware address.
1232 memcpy(&iph
->saddr
, saddr
, 4);
1234 memcpy(&iph
->daddr
, daddr
, 4);
1241 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1243 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
1244 memcpy(haddr
, &iph
->saddr
, 4);
1248 static const struct header_ops ipgre_header_ops
= {
1249 .create
= ipgre_header
,
1250 .parse
= ipgre_header_parse
,
1253 #ifdef CONFIG_NET_IPGRE_BROADCAST
1254 static int ipgre_open(struct net_device
*dev
)
1256 struct ip_tunnel
*t
= netdev_priv(dev
);
1258 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
1262 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1266 RT_TOS(t
->parms
.iph
.tos
),
1269 return -EADDRNOTAVAIL
;
1272 if (__in_dev_get_rtnl(dev
) == NULL
)
1273 return -EADDRNOTAVAIL
;
1274 t
->mlink
= dev
->ifindex
;
1275 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
1280 static int ipgre_close(struct net_device
*dev
)
1282 struct ip_tunnel
*t
= netdev_priv(dev
);
1284 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
1285 struct in_device
*in_dev
;
1286 in_dev
= inetdev_by_index(dev_net(dev
), t
->mlink
);
1288 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
1295 static const struct net_device_ops ipgre_netdev_ops
= {
1296 .ndo_init
= ipgre_tunnel_init
,
1297 .ndo_uninit
= ipgre_tunnel_uninit
,
1298 #ifdef CONFIG_NET_IPGRE_BROADCAST
1299 .ndo_open
= ipgre_open
,
1300 .ndo_stop
= ipgre_close
,
1302 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1303 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
1304 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1305 .ndo_get_stats64
= ipgre_get_stats64
,
1308 static void ipgre_dev_free(struct net_device
*dev
)
1310 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1312 gro_cells_destroy(&tunnel
->gro_cells
);
1313 free_percpu(dev
->tstats
);
1317 #define GRE_FEATURES (NETIF_F_SG | \
1318 NETIF_F_FRAGLIST | \
1322 static void ipgre_tunnel_setup(struct net_device
*dev
)
1324 dev
->netdev_ops
= &ipgre_netdev_ops
;
1325 dev
->destructor
= ipgre_dev_free
;
1327 dev
->type
= ARPHRD_IPGRE
;
1328 dev
->needed_headroom
= LL_MAX_HEADER
+ sizeof(struct iphdr
) + 4;
1329 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 4;
1330 dev
->flags
= IFF_NOARP
;
1333 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1334 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1336 dev
->features
|= GRE_FEATURES
;
1337 dev
->hw_features
|= GRE_FEATURES
;
1340 static int ipgre_tunnel_init(struct net_device
*dev
)
1342 struct ip_tunnel
*tunnel
;
1346 tunnel
= netdev_priv(dev
);
1347 iph
= &tunnel
->parms
.iph
;
1350 strcpy(tunnel
->parms
.name
, dev
->name
);
1352 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
1353 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
1356 #ifdef CONFIG_NET_IPGRE_BROADCAST
1357 if (ipv4_is_multicast(iph
->daddr
)) {
1360 dev
->flags
= IFF_BROADCAST
;
1361 dev
->header_ops
= &ipgre_header_ops
;
1365 dev
->header_ops
= &ipgre_header_ops
;
1367 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1371 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1373 free_percpu(dev
->tstats
);
1380 static void ipgre_fb_tunnel_init(struct net_device
*dev
)
1382 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1383 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1386 strcpy(tunnel
->parms
.name
, dev
->name
);
1389 iph
->protocol
= IPPROTO_GRE
;
1391 tunnel
->hlen
= sizeof(struct iphdr
) + 4;
1397 static const struct gre_protocol ipgre_protocol
= {
1398 .handler
= ipgre_rcv
,
1399 .err_handler
= ipgre_err
,
1402 static void ipgre_destroy_tunnels(struct ipgre_net
*ign
, struct list_head
*head
)
1406 for (prio
= 0; prio
< 4; prio
++) {
1408 for (h
= 0; h
< HASH_SIZE
; h
++) {
1409 struct ip_tunnel
*t
;
1411 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1414 unregister_netdevice_queue(t
->dev
, head
);
1415 t
= rtnl_dereference(t
->next
);
1421 static int __net_init
ipgre_init_net(struct net
*net
)
1423 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1426 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
), "gre0",
1427 ipgre_tunnel_setup
);
1428 if (!ign
->fb_tunnel_dev
) {
1432 dev_net_set(ign
->fb_tunnel_dev
, net
);
1434 ipgre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1435 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ipgre_link_ops
;
1437 if ((err
= register_netdev(ign
->fb_tunnel_dev
)))
1440 rcu_assign_pointer(ign
->tunnels_wc
[0],
1441 netdev_priv(ign
->fb_tunnel_dev
));
1445 ipgre_dev_free(ign
->fb_tunnel_dev
);
1450 static void __net_exit
ipgre_exit_net(struct net
*net
)
1452 struct ipgre_net
*ign
;
1455 ign
= net_generic(net
, ipgre_net_id
);
1457 ipgre_destroy_tunnels(ign
, &list
);
1458 unregister_netdevice_many(&list
);
1462 static struct pernet_operations ipgre_net_ops
= {
1463 .init
= ipgre_init_net
,
1464 .exit
= ipgre_exit_net
,
1465 .id
= &ipgre_net_id
,
1466 .size
= sizeof(struct ipgre_net
),
1469 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1477 if (data
[IFLA_GRE_IFLAGS
])
1478 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1479 if (data
[IFLA_GRE_OFLAGS
])
1480 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1481 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1487 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1491 if (tb
[IFLA_ADDRESS
]) {
1492 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1494 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1495 return -EADDRNOTAVAIL
;
1501 if (data
[IFLA_GRE_REMOTE
]) {
1502 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
1508 return ipgre_tunnel_validate(tb
, data
);
1511 static void ipgre_netlink_parms(struct nlattr
*data
[],
1512 struct ip_tunnel_parm
*parms
)
1514 memset(parms
, 0, sizeof(*parms
));
1516 parms
->iph
.protocol
= IPPROTO_GRE
;
1521 if (data
[IFLA_GRE_LINK
])
1522 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1524 if (data
[IFLA_GRE_IFLAGS
])
1525 parms
->i_flags
= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1527 if (data
[IFLA_GRE_OFLAGS
])
1528 parms
->o_flags
= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1530 if (data
[IFLA_GRE_IKEY
])
1531 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1533 if (data
[IFLA_GRE_OKEY
])
1534 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1536 if (data
[IFLA_GRE_LOCAL
])
1537 parms
->iph
.saddr
= nla_get_be32(data
[IFLA_GRE_LOCAL
]);
1539 if (data
[IFLA_GRE_REMOTE
])
1540 parms
->iph
.daddr
= nla_get_be32(data
[IFLA_GRE_REMOTE
]);
1542 if (data
[IFLA_GRE_TTL
])
1543 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1545 if (data
[IFLA_GRE_TOS
])
1546 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
1548 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
]))
1549 parms
->iph
.frag_off
= htons(IP_DF
);
1552 static int ipgre_tap_init(struct net_device
*dev
)
1554 struct ip_tunnel
*tunnel
;
1556 tunnel
= netdev_priv(dev
);
1559 strcpy(tunnel
->parms
.name
, dev
->name
);
1561 ipgre_tunnel_bind_dev(dev
);
1563 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1570 static const struct net_device_ops ipgre_tap_netdev_ops
= {
1571 .ndo_init
= ipgre_tap_init
,
1572 .ndo_uninit
= ipgre_tunnel_uninit
,
1573 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1574 .ndo_set_mac_address
= eth_mac_addr
,
1575 .ndo_validate_addr
= eth_validate_addr
,
1576 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1577 .ndo_get_stats64
= ipgre_get_stats64
,
1580 static void ipgre_tap_setup(struct net_device
*dev
)
1585 dev
->netdev_ops
= &ipgre_tap_netdev_ops
;
1586 dev
->destructor
= ipgre_dev_free
;
1589 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1592 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
, struct nlattr
*tb
[],
1593 struct nlattr
*data
[])
1595 struct ip_tunnel
*nt
;
1596 struct net
*net
= dev_net(dev
);
1597 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1601 nt
= netdev_priv(dev
);
1602 ipgre_netlink_parms(data
, &nt
->parms
);
1604 if (ipgre_tunnel_find(net
, &nt
->parms
, dev
->type
))
1607 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1608 eth_hw_addr_random(dev
);
1610 mtu
= ipgre_tunnel_bind_dev(dev
);
1614 /* Can use a lockless transmit, unless we generate output sequences */
1615 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
1616 dev
->features
|= NETIF_F_LLTX
;
1618 err
= register_netdevice(dev
);
1623 ipgre_tunnel_link(ign
, nt
);
1629 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1630 struct nlattr
*data
[])
1632 struct ip_tunnel
*t
, *nt
;
1633 struct net
*net
= dev_net(dev
);
1634 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1635 struct ip_tunnel_parm p
;
1638 if (dev
== ign
->fb_tunnel_dev
)
1641 nt
= netdev_priv(dev
);
1642 ipgre_netlink_parms(data
, &p
);
1644 t
= ipgre_tunnel_locate(net
, &p
, 0);
1652 if (dev
->type
!= ARPHRD_ETHER
) {
1653 unsigned int nflags
= 0;
1655 if (ipv4_is_multicast(p
.iph
.daddr
))
1656 nflags
= IFF_BROADCAST
;
1657 else if (p
.iph
.daddr
)
1658 nflags
= IFF_POINTOPOINT
;
1660 if ((dev
->flags
^ nflags
) &
1661 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1665 ipgre_tunnel_unlink(ign
, t
);
1666 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1667 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1668 t
->parms
.i_key
= p
.i_key
;
1669 if (dev
->type
!= ARPHRD_ETHER
) {
1670 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1671 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1673 ipgre_tunnel_link(ign
, t
);
1674 netdev_state_change(dev
);
1677 t
->parms
.o_key
= p
.o_key
;
1678 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1679 t
->parms
.iph
.tos
= p
.iph
.tos
;
1680 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1682 if (t
->parms
.link
!= p
.link
) {
1683 t
->parms
.link
= p
.link
;
1684 mtu
= ipgre_tunnel_bind_dev(dev
);
1687 netdev_state_change(dev
);
1693 static size_t ipgre_get_size(const struct net_device
*dev
)
1698 /* IFLA_GRE_IFLAGS */
1700 /* IFLA_GRE_OFLAGS */
1706 /* IFLA_GRE_LOCAL */
1708 /* IFLA_GRE_REMOTE */
1714 /* IFLA_GRE_PMTUDISC */
1719 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1721 struct ip_tunnel
*t
= netdev_priv(dev
);
1722 struct ip_tunnel_parm
*p
= &t
->parms
;
1724 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1725 nla_put_be16(skb
, IFLA_GRE_IFLAGS
, p
->i_flags
) ||
1726 nla_put_be16(skb
, IFLA_GRE_OFLAGS
, p
->o_flags
) ||
1727 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1728 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1729 nla_put_be32(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1730 nla_put_be32(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1731 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1732 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1733 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1734 !!(p
->iph
.frag_off
& htons(IP_DF
))))
1735 goto nla_put_failure
;
1742 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1743 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1744 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1745 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1746 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1747 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1748 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1749 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1750 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1751 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1752 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1755 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1757 .maxtype
= IFLA_GRE_MAX
,
1758 .policy
= ipgre_policy
,
1759 .priv_size
= sizeof(struct ip_tunnel
),
1760 .setup
= ipgre_tunnel_setup
,
1761 .validate
= ipgre_tunnel_validate
,
1762 .newlink
= ipgre_newlink
,
1763 .changelink
= ipgre_changelink
,
1764 .get_size
= ipgre_get_size
,
1765 .fill_info
= ipgre_fill_info
,
1768 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1770 .maxtype
= IFLA_GRE_MAX
,
1771 .policy
= ipgre_policy
,
1772 .priv_size
= sizeof(struct ip_tunnel
),
1773 .setup
= ipgre_tap_setup
,
1774 .validate
= ipgre_tap_validate
,
1775 .newlink
= ipgre_newlink
,
1776 .changelink
= ipgre_changelink
,
1777 .get_size
= ipgre_get_size
,
1778 .fill_info
= ipgre_fill_info
,
1782 * And now the modules code and kernel interface.
1785 static int __init
ipgre_init(void)
1789 pr_info("GRE over IPv4 tunneling driver\n");
1791 err
= register_pernet_device(&ipgre_net_ops
);
1795 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1797 pr_info("%s: can't add protocol\n", __func__
);
1798 goto add_proto_failed
;
1801 err
= rtnl_link_register(&ipgre_link_ops
);
1803 goto rtnl_link_failed
;
1805 err
= rtnl_link_register(&ipgre_tap_ops
);
1807 goto tap_ops_failed
;
1813 rtnl_link_unregister(&ipgre_link_ops
);
1815 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1817 unregister_pernet_device(&ipgre_net_ops
);
1821 static void __exit
ipgre_fini(void)
1823 rtnl_link_unregister(&ipgre_tap_ops
);
1824 rtnl_link_unregister(&ipgre_link_ops
);
1825 if (gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
) < 0)
1826 pr_info("%s: can't remove protocol\n", __func__
);
1827 unregister_pernet_device(&ipgre_net_ops
);
1830 module_init(ipgre_init
);
1831 module_exit(ipgre_fini
);
1832 MODULE_LICENSE("GPL");
1833 MODULE_ALIAS_RTNL_LINK("gre");
1834 MODULE_ALIAS_RTNL_LINK("gretap");
1835 MODULE_ALIAS_NETDEV("gre0");