2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <asm/uaccess.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/if_arp.h>
25 #include <linux/mroute.h>
26 #include <linux/init.h>
27 #include <linux/in6.h>
28 #include <linux/inetdevice.h>
29 #include <linux/igmp.h>
30 #include <linux/netfilter_ipv4.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
37 #include <net/protocol.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
49 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
59 1. The most important issue is detecting local dead loops.
60 They would cause complete host lockup in transmit, which
61 would be "resolved" by stack overflow or, if queueing is enabled,
62 with infinite looping in net_bh.
64 We cannot track such dead loops during route installation,
65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL
69 skb, even if no tunneling is used.
71 Current solution: xmit_recursion breaks dead loops. This is a percpu
72 counter, since when we enter the first ndo_xmit(), cpu migration is
73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
75 2. Networking dead loops would not kill routers, but would really
76 kill network. IP hop limit plays role of "t->recursion" in this case,
77 if we copy it from packet being encapsulated to upper header.
78 It is very good solution, but it introduces two problems:
80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
81 do not work over tunnels.
82 - traceroute does not work. I planned to relay ICMP from tunnel,
83 so that this problem would be solved and traceroute output
84 would even more informative. This idea appeared to be wrong:
85 only Linux complies to rfc1812 now (yes, guys, Linux is the only
86 true router now :-)), all routers (at least, in neighbourhood of mine)
87 return only 8 bytes of payload. It is the end.
89 Hence, if we want that OSPF worked or traceroute said something reasonable,
90 we should search for another solution.
92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all.
96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made
105 all that we could make. Even if it is your gated who injected
106 fatal route to network, even if it were you who configured
107 fatal static route: you are innocent. :-)
111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
112 practically identical code. It would be good to glue them
113 together, but it is not very evident, how to make them modular.
114 sit is integral part of IPv6, ipip and gre are naturally modular.
115 We could extract common parts (hash table, ioctl etc)
116 to a separate module (ip_tunnel.c).
121 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
122 static int ipgre_tunnel_init(struct net_device
*dev
);
123 static void ipgre_tunnel_setup(struct net_device
*dev
);
124 static int ipgre_tunnel_bind_dev(struct net_device
*dev
);
126 /* Fallback tunnel: no source, no destination, no key, no options */
130 static int ipgre_net_id __read_mostly
;
132 struct ip_tunnel __rcu
*tunnels
[4][HASH_SIZE
];
134 struct net_device
*fb_tunnel_dev
;
137 /* Tunnel hash table */
147 We require exact key match i.e. if a key is present in packet
148 it will match only tunnel with the same key; if it is not present,
149 it will match only keyless tunnel.
151 All keysless packets, if not matched configured keyless tunnels
152 will match fallback tunnel.
155 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
157 #define tunnels_r_l tunnels[3]
158 #define tunnels_r tunnels[2]
159 #define tunnels_l tunnels[1]
160 #define tunnels_wc tunnels[0]
162 * Locking : hash tables are protected by RCU and RTNL
165 #define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
168 /* often modified stats are per cpu, other are shared (netdev->stats) */
170 unsigned long rx_packets
;
171 unsigned long rx_bytes
;
172 unsigned long tx_packets
;
173 unsigned long tx_bytes
;
176 static struct net_device_stats
*ipgre_get_stats(struct net_device
*dev
)
178 struct pcpu_tstats sum
= { 0 };
181 for_each_possible_cpu(i
) {
182 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
184 sum
.rx_packets
+= tstats
->rx_packets
;
185 sum
.rx_bytes
+= tstats
->rx_bytes
;
186 sum
.tx_packets
+= tstats
->tx_packets
;
187 sum
.tx_bytes
+= tstats
->tx_bytes
;
189 dev
->stats
.rx_packets
= sum
.rx_packets
;
190 dev
->stats
.rx_bytes
= sum
.rx_bytes
;
191 dev
->stats
.tx_packets
= sum
.tx_packets
;
192 dev
->stats
.tx_bytes
= sum
.tx_bytes
;
196 /* Given src, dst and key, find appropriate for input tunnel. */
198 static struct ip_tunnel
* ipgre_tunnel_lookup(struct net_device
*dev
,
199 __be32 remote
, __be32 local
,
200 __be32 key
, __be16 gre_proto
)
202 struct net
*net
= dev_net(dev
);
203 int link
= dev
->ifindex
;
204 unsigned int h0
= HASH(remote
);
205 unsigned int h1
= HASH(key
);
206 struct ip_tunnel
*t
, *cand
= NULL
;
207 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
208 int dev_type
= (gre_proto
== htons(ETH_P_TEB
)) ?
209 ARPHRD_ETHER
: ARPHRD_IPGRE
;
210 int score
, cand_score
= 4;
212 for_each_ip_tunnel_rcu(ign
->tunnels_r_l
[h0
^ h1
]) {
213 if (local
!= t
->parms
.iph
.saddr
||
214 remote
!= t
->parms
.iph
.daddr
||
215 key
!= t
->parms
.i_key
||
216 !(t
->dev
->flags
& IFF_UP
))
219 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
220 t
->dev
->type
!= dev_type
)
224 if (t
->parms
.link
!= link
)
226 if (t
->dev
->type
!= dev_type
)
231 if (score
< cand_score
) {
237 for_each_ip_tunnel_rcu(ign
->tunnels_r
[h0
^ h1
]) {
238 if (remote
!= t
->parms
.iph
.daddr
||
239 key
!= t
->parms
.i_key
||
240 !(t
->dev
->flags
& IFF_UP
))
243 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
244 t
->dev
->type
!= dev_type
)
248 if (t
->parms
.link
!= link
)
250 if (t
->dev
->type
!= dev_type
)
255 if (score
< cand_score
) {
261 for_each_ip_tunnel_rcu(ign
->tunnels_l
[h1
]) {
262 if ((local
!= t
->parms
.iph
.saddr
&&
263 (local
!= t
->parms
.iph
.daddr
||
264 !ipv4_is_multicast(local
))) ||
265 key
!= t
->parms
.i_key
||
266 !(t
->dev
->flags
& IFF_UP
))
269 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
270 t
->dev
->type
!= dev_type
)
274 if (t
->parms
.link
!= link
)
276 if (t
->dev
->type
!= dev_type
)
281 if (score
< cand_score
) {
287 for_each_ip_tunnel_rcu(ign
->tunnels_wc
[h1
]) {
288 if (t
->parms
.i_key
!= key
||
289 !(t
->dev
->flags
& IFF_UP
))
292 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
293 t
->dev
->type
!= dev_type
)
297 if (t
->parms
.link
!= link
)
299 if (t
->dev
->type
!= dev_type
)
304 if (score
< cand_score
) {
313 dev
= ign
->fb_tunnel_dev
;
314 if (dev
->flags
& IFF_UP
)
315 return netdev_priv(dev
);
320 static struct ip_tunnel __rcu
**__ipgre_bucket(struct ipgre_net
*ign
,
321 struct ip_tunnel_parm
*parms
)
323 __be32 remote
= parms
->iph
.daddr
;
324 __be32 local
= parms
->iph
.saddr
;
325 __be32 key
= parms
->i_key
;
326 unsigned int h
= HASH(key
);
331 if (remote
&& !ipv4_is_multicast(remote
)) {
336 return &ign
->tunnels
[prio
][h
];
339 static inline struct ip_tunnel __rcu
**ipgre_bucket(struct ipgre_net
*ign
,
342 return __ipgre_bucket(ign
, &t
->parms
);
345 static void ipgre_tunnel_link(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
347 struct ip_tunnel __rcu
**tp
= ipgre_bucket(ign
, t
);
349 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
350 rcu_assign_pointer(*tp
, t
);
353 static void ipgre_tunnel_unlink(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
355 struct ip_tunnel __rcu
**tp
;
356 struct ip_tunnel
*iter
;
358 for (tp
= ipgre_bucket(ign
, t
);
359 (iter
= rtnl_dereference(*tp
)) != NULL
;
362 rcu_assign_pointer(*tp
, t
->next
);
368 static struct ip_tunnel
*ipgre_tunnel_find(struct net
*net
,
369 struct ip_tunnel_parm
*parms
,
372 __be32 remote
= parms
->iph
.daddr
;
373 __be32 local
= parms
->iph
.saddr
;
374 __be32 key
= parms
->i_key
;
375 int link
= parms
->link
;
377 struct ip_tunnel __rcu
**tp
;
378 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
380 for (tp
= __ipgre_bucket(ign
, parms
);
381 (t
= rtnl_dereference(*tp
)) != NULL
;
383 if (local
== t
->parms
.iph
.saddr
&&
384 remote
== t
->parms
.iph
.daddr
&&
385 key
== t
->parms
.i_key
&&
386 link
== t
->parms
.link
&&
387 type
== t
->dev
->type
)
393 static struct ip_tunnel
*ipgre_tunnel_locate(struct net
*net
,
394 struct ip_tunnel_parm
*parms
, int create
)
396 struct ip_tunnel
*t
, *nt
;
397 struct net_device
*dev
;
399 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
401 t
= ipgre_tunnel_find(net
, parms
, ARPHRD_IPGRE
);
406 strlcpy(name
, parms
->name
, IFNAMSIZ
);
408 sprintf(name
, "gre%%d");
410 dev
= alloc_netdev(sizeof(*t
), name
, ipgre_tunnel_setup
);
414 dev_net_set(dev
, net
);
416 if (strchr(name
, '%')) {
417 if (dev_alloc_name(dev
, name
) < 0)
421 nt
= netdev_priv(dev
);
423 dev
->rtnl_link_ops
= &ipgre_link_ops
;
425 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
427 if (register_netdevice(dev
) < 0)
431 ipgre_tunnel_link(ign
, nt
);
439 static void ipgre_tunnel_uninit(struct net_device
*dev
)
441 struct net
*net
= dev_net(dev
);
442 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
444 ipgre_tunnel_unlink(ign
, netdev_priv(dev
));
449 static void ipgre_err(struct sk_buff
*skb
, u32 info
)
452 /* All the routers (except for Linux) return only
453 8 bytes of packet payload. It means, that precise relaying of
454 ICMP in the real Internet is absolutely infeasible.
456 Moreover, Cisco "wise men" put GRE key to the third word
457 in GRE header. It makes impossible maintaining even soft state for keyed
458 GRE tunnels with enabled checksum. Tell them "thank you".
460 Well, I wonder, rfc1812 was written by Cisco employee,
461 what the hell these idiots break standrads established
465 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
466 __be16
*p
= (__be16
*)(skb
->data
+(iph
->ihl
<<2));
467 int grehlen
= (iph
->ihl
<<2) + 4;
468 const int type
= icmp_hdr(skb
)->type
;
469 const int code
= icmp_hdr(skb
)->code
;
474 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
|GRE_ROUTING
|GRE_VERSION
)) {
475 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
484 /* If only 8 bytes returned, keyed message will be dropped here */
485 if (skb_headlen(skb
) < grehlen
)
490 case ICMP_PARAMETERPROB
:
493 case ICMP_DEST_UNREACH
:
496 case ICMP_PORT_UNREACH
:
497 /* Impossible event. */
499 case ICMP_FRAG_NEEDED
:
500 /* Soft state for pmtu is maintained by IP core. */
503 /* All others are translated to HOST_UNREACH.
504 rfc2003 contains "deep thoughts" about NET_UNREACH,
505 I believe they are just ether pollution. --ANK
510 case ICMP_TIME_EXCEEDED
:
511 if (code
!= ICMP_EXC_TTL
)
517 t
= ipgre_tunnel_lookup(skb
->dev
, iph
->daddr
, iph
->saddr
,
519 *(((__be32
*)p
) + (grehlen
/ 4) - 1) : 0,
521 if (t
== NULL
|| t
->parms
.iph
.daddr
== 0 ||
522 ipv4_is_multicast(t
->parms
.iph
.daddr
))
525 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
528 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
532 t
->err_time
= jiffies
;
537 static inline void ipgre_ecn_decapsulate(struct iphdr
*iph
, struct sk_buff
*skb
)
539 if (INET_ECN_is_ce(iph
->tos
)) {
540 if (skb
->protocol
== htons(ETH_P_IP
)) {
541 IP_ECN_set_ce(ip_hdr(skb
));
542 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
543 IP6_ECN_set_ce(ipv6_hdr(skb
));
549 ipgre_ecn_encapsulate(u8 tos
, struct iphdr
*old_iph
, struct sk_buff
*skb
)
552 if (skb
->protocol
== htons(ETH_P_IP
))
553 inner
= old_iph
->tos
;
554 else if (skb
->protocol
== htons(ETH_P_IPV6
))
555 inner
= ipv6_get_dsfield((struct ipv6hdr
*)old_iph
);
556 return INET_ECN_encapsulate(tos
, inner
);
559 static int ipgre_rcv(struct sk_buff
*skb
)
567 struct ip_tunnel
*tunnel
;
571 if (!pskb_may_pull(skb
, 16))
578 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_ROUTING
|GRE_SEQ
|GRE_VERSION
)) {
579 /* - Version must be 0.
580 - We do not support routing headers.
582 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
585 if (flags
&GRE_CSUM
) {
586 switch (skb
->ip_summed
) {
587 case CHECKSUM_COMPLETE
:
588 csum
= csum_fold(skb
->csum
);
594 csum
= __skb_checksum_complete(skb
);
595 skb
->ip_summed
= CHECKSUM_COMPLETE
;
600 key
= *(__be32
*)(h
+ offset
);
604 seqno
= ntohl(*(__be32
*)(h
+ offset
));
609 gre_proto
= *(__be16
*)(h
+ 2);
612 if ((tunnel
= ipgre_tunnel_lookup(skb
->dev
,
613 iph
->saddr
, iph
->daddr
, key
,
615 struct pcpu_tstats
*tstats
;
619 skb
->protocol
= gre_proto
;
620 /* WCCP version 1 and 2 protocol decoding.
621 * - Change protocol to IP
622 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
624 if (flags
== 0 && gre_proto
== htons(ETH_P_WCCP
)) {
625 skb
->protocol
= htons(ETH_P_IP
);
626 if ((*(h
+ offset
) & 0xF0) != 0x40)
630 skb
->mac_header
= skb
->network_header
;
631 __pskb_pull(skb
, offset
);
632 skb_postpull_rcsum(skb
, skb_transport_header(skb
), offset
);
633 skb
->pkt_type
= PACKET_HOST
;
634 #ifdef CONFIG_NET_IPGRE_BROADCAST
635 if (ipv4_is_multicast(iph
->daddr
)) {
636 /* Looped back packet, drop it! */
637 if (skb_rtable(skb
)->fl
.iif
== 0)
639 tunnel
->dev
->stats
.multicast
++;
640 skb
->pkt_type
= PACKET_BROADCAST
;
644 if (((flags
&GRE_CSUM
) && csum
) ||
645 (!(flags
&GRE_CSUM
) && tunnel
->parms
.i_flags
&GRE_CSUM
)) {
646 tunnel
->dev
->stats
.rx_crc_errors
++;
647 tunnel
->dev
->stats
.rx_errors
++;
650 if (tunnel
->parms
.i_flags
&GRE_SEQ
) {
651 if (!(flags
&GRE_SEQ
) ||
652 (tunnel
->i_seqno
&& (s32
)(seqno
- tunnel
->i_seqno
) < 0)) {
653 tunnel
->dev
->stats
.rx_fifo_errors
++;
654 tunnel
->dev
->stats
.rx_errors
++;
657 tunnel
->i_seqno
= seqno
+ 1;
660 /* Warning: All skb pointers will be invalidated! */
661 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
662 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
663 tunnel
->dev
->stats
.rx_length_errors
++;
664 tunnel
->dev
->stats
.rx_errors
++;
669 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
670 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
673 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
674 tstats
->rx_packets
++;
675 tstats
->rx_bytes
+= skb
->len
;
677 __skb_tunnel_rx(skb
, tunnel
->dev
);
679 skb_reset_network_header(skb
);
680 ipgre_ecn_decapsulate(iph
, skb
);
682 if (netif_rx(skb
) == NET_RX_DROP
)
683 tunnel
->dev
->stats
.rx_dropped
++;
688 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
697 static netdev_tx_t
ipgre_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
699 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
700 struct pcpu_tstats
*tstats
;
701 struct iphdr
*old_iph
= ip_hdr(skb
);
705 struct rtable
*rt
; /* Route to the other host */
706 struct net_device
*tdev
; /* Device to other host */
707 struct iphdr
*iph
; /* Our new IP header */
708 unsigned int max_headroom
; /* The extra header space needed */
713 if (dev
->type
== ARPHRD_ETHER
)
714 IPCB(skb
)->flags
= 0;
716 if (dev
->header_ops
&& dev
->type
== ARPHRD_IPGRE
) {
718 tiph
= (struct iphdr
*)skb
->data
;
720 gre_hlen
= tunnel
->hlen
;
721 tiph
= &tunnel
->parms
.iph
;
724 if ((dst
= tiph
->daddr
) == 0) {
727 if (skb_dst(skb
) == NULL
) {
728 dev
->stats
.tx_fifo_errors
++;
732 if (skb
->protocol
== htons(ETH_P_IP
)) {
733 rt
= skb_rtable(skb
);
734 if ((dst
= rt
->rt_gateway
) == 0)
737 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
738 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
739 struct in6_addr
*addr6
;
741 struct neighbour
*neigh
= skb_dst(skb
)->neighbour
;
746 addr6
= (struct in6_addr
*)&neigh
->primary_key
;
747 addr_type
= ipv6_addr_type(addr6
);
749 if (addr_type
== IPV6_ADDR_ANY
) {
750 addr6
= &ipv6_hdr(skb
)->daddr
;
751 addr_type
= ipv6_addr_type(addr6
);
754 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
757 dst
= addr6
->s6_addr32
[3];
767 if (skb
->protocol
== htons(ETH_P_IP
))
769 else if (skb
->protocol
== htons(ETH_P_IPV6
))
770 tos
= ipv6_get_dsfield((struct ipv6hdr
*)old_iph
);
775 .oif
= tunnel
->parms
.link
,
779 .saddr
= tiph
->saddr
,
786 if (ip_route_output_key(dev_net(dev
), &rt
, &fl
)) {
787 dev
->stats
.tx_carrier_errors
++;
795 dev
->stats
.collisions
++;
801 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
- tunnel
->hlen
;
803 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
806 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), mtu
);
808 if (skb
->protocol
== htons(ETH_P_IP
)) {
809 df
|= (old_iph
->frag_off
&htons(IP_DF
));
811 if ((old_iph
->frag_off
&htons(IP_DF
)) &&
812 mtu
< ntohs(old_iph
->tot_len
)) {
813 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
818 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
819 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
820 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
822 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) && mtu
>= IPV6_MIN_MTU
) {
823 if ((tunnel
->parms
.iph
.daddr
&&
824 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
825 rt6
->rt6i_dst
.plen
== 128) {
826 rt6
->rt6i_flags
|= RTF_MODIFIED
;
827 skb_dst(skb
)->metrics
[RTAX_MTU
-1] = mtu
;
831 if (mtu
>= IPV6_MIN_MTU
&& mtu
< skb
->len
- tunnel
->hlen
+ gre_hlen
) {
832 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
839 if (tunnel
->err_count
> 0) {
840 if (time_before(jiffies
,
841 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
844 dst_link_failure(skb
);
846 tunnel
->err_count
= 0;
849 max_headroom
= LL_RESERVED_SPACE(tdev
) + gre_hlen
+ rt
->dst
.header_len
;
851 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
)||
852 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
853 struct sk_buff
*new_skb
= skb_realloc_headroom(skb
, max_headroom
);
854 if (max_headroom
> dev
->needed_headroom
)
855 dev
->needed_headroom
= max_headroom
;
858 dev
->stats
.tx_dropped
++;
863 skb_set_owner_w(new_skb
, skb
->sk
);
866 old_iph
= ip_hdr(skb
);
869 skb_reset_transport_header(skb
);
870 skb_push(skb
, gre_hlen
);
871 skb_reset_network_header(skb
);
872 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
873 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
876 skb_dst_set(skb
, &rt
->dst
);
879 * Push down and install the IPIP header.
884 iph
->ihl
= sizeof(struct iphdr
) >> 2;
886 iph
->protocol
= IPPROTO_GRE
;
887 iph
->tos
= ipgre_ecn_encapsulate(tos
, old_iph
, skb
);
888 iph
->daddr
= rt
->rt_dst
;
889 iph
->saddr
= rt
->rt_src
;
891 if ((iph
->ttl
= tiph
->ttl
) == 0) {
892 if (skb
->protocol
== htons(ETH_P_IP
))
893 iph
->ttl
= old_iph
->ttl
;
894 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
895 else if (skb
->protocol
== htons(ETH_P_IPV6
))
896 iph
->ttl
= ((struct ipv6hdr
*)old_iph
)->hop_limit
;
899 iph
->ttl
= dst_metric(&rt
->dst
, RTAX_HOPLIMIT
);
902 ((__be16
*)(iph
+ 1))[0] = tunnel
->parms
.o_flags
;
903 ((__be16
*)(iph
+ 1))[1] = (dev
->type
== ARPHRD_ETHER
) ?
904 htons(ETH_P_TEB
) : skb
->protocol
;
906 if (tunnel
->parms
.o_flags
&(GRE_KEY
|GRE_CSUM
|GRE_SEQ
)) {
907 __be32
*ptr
= (__be32
*)(((u8
*)iph
) + tunnel
->hlen
- 4);
909 if (tunnel
->parms
.o_flags
&GRE_SEQ
) {
911 *ptr
= htonl(tunnel
->o_seqno
);
914 if (tunnel
->parms
.o_flags
&GRE_KEY
) {
915 *ptr
= tunnel
->parms
.o_key
;
918 if (tunnel
->parms
.o_flags
&GRE_CSUM
) {
920 *(__sum16
*)ptr
= ip_compute_csum((void*)(iph
+1), skb
->len
- sizeof(struct iphdr
));
925 tstats
= this_cpu_ptr(dev
->tstats
);
926 __IPTUNNEL_XMIT(tstats
, &dev
->stats
);
930 dst_link_failure(skb
);
933 dev
->stats
.tx_errors
++;
938 static int ipgre_tunnel_bind_dev(struct net_device
*dev
)
940 struct net_device
*tdev
= NULL
;
941 struct ip_tunnel
*tunnel
;
943 int hlen
= LL_MAX_HEADER
;
944 int mtu
= ETH_DATA_LEN
;
945 int addend
= sizeof(struct iphdr
) + 4;
947 tunnel
= netdev_priv(dev
);
948 iph
= &tunnel
->parms
.iph
;
950 /* Guess output device to choose reasonable mtu and needed_headroom */
954 .oif
= tunnel
->parms
.link
,
959 .tos
= RT_TOS(iph
->tos
)
966 if (!ip_route_output_key(dev_net(dev
), &rt
, &fl
)) {
971 if (dev
->type
!= ARPHRD_ETHER
)
972 dev
->flags
|= IFF_POINTOPOINT
;
975 if (!tdev
&& tunnel
->parms
.link
)
976 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
979 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
982 dev
->iflink
= tunnel
->parms
.link
;
984 /* Precalculate GRE options length */
985 if (tunnel
->parms
.o_flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
)) {
986 if (tunnel
->parms
.o_flags
&GRE_CSUM
)
988 if (tunnel
->parms
.o_flags
&GRE_KEY
)
990 if (tunnel
->parms
.o_flags
&GRE_SEQ
)
993 dev
->needed_headroom
= addend
+ hlen
;
994 mtu
-= dev
->hard_header_len
+ addend
;
999 tunnel
->hlen
= addend
;
1005 ipgre_tunnel_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1008 struct ip_tunnel_parm p
;
1009 struct ip_tunnel
*t
;
1010 struct net
*net
= dev_net(dev
);
1011 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1016 if (dev
== ign
->fb_tunnel_dev
) {
1017 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1021 t
= ipgre_tunnel_locate(net
, &p
, 0);
1024 t
= netdev_priv(dev
);
1025 memcpy(&p
, &t
->parms
, sizeof(p
));
1026 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1033 if (!capable(CAP_NET_ADMIN
))
1037 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1041 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
1042 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
1043 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
1046 p
.iph
.frag_off
|= htons(IP_DF
);
1048 if (!(p
.i_flags
&GRE_KEY
))
1050 if (!(p
.o_flags
&GRE_KEY
))
1053 t
= ipgre_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
1055 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1057 if (t
->dev
!= dev
) {
1062 unsigned int nflags
= 0;
1064 t
= netdev_priv(dev
);
1066 if (ipv4_is_multicast(p
.iph
.daddr
))
1067 nflags
= IFF_BROADCAST
;
1068 else if (p
.iph
.daddr
)
1069 nflags
= IFF_POINTOPOINT
;
1071 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
1075 ipgre_tunnel_unlink(ign
, t
);
1076 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1077 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1078 t
->parms
.i_key
= p
.i_key
;
1079 t
->parms
.o_key
= p
.o_key
;
1080 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1081 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1082 ipgre_tunnel_link(ign
, t
);
1083 netdev_state_change(dev
);
1089 if (cmd
== SIOCCHGTUNNEL
) {
1090 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1091 t
->parms
.iph
.tos
= p
.iph
.tos
;
1092 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1093 if (t
->parms
.link
!= p
.link
) {
1094 t
->parms
.link
= p
.link
;
1095 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
1096 netdev_state_change(dev
);
1099 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
, sizeof(p
)))
1102 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1107 if (!capable(CAP_NET_ADMIN
))
1110 if (dev
== ign
->fb_tunnel_dev
) {
1112 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1115 if ((t
= ipgre_tunnel_locate(net
, &p
, 0)) == NULL
)
1118 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1122 unregister_netdevice(dev
);
1134 static int ipgre_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
1136 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1138 new_mtu
> 0xFFF8 - dev
->hard_header_len
- tunnel
->hlen
)
1144 /* Nice toy. Unfortunately, useless in real life :-)
1145 It allows to construct virtual multiprotocol broadcast "LAN"
1146 over the Internet, provided multicast routing is tuned.
1149 I have no idea was this bicycle invented before me,
1150 so that I had to set ARPHRD_IPGRE to a random value.
1151 I have an impression, that Cisco could make something similar,
1152 but this feature is apparently missing in IOS<=11.2(8).
1154 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1155 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1157 ping -t 255 224.66.66.66
1159 If nobody answers, mbone does not work.
1161 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1162 ip addr add 10.66.66.<somewhat>/24 dev Universe
1163 ifconfig Universe up
1164 ifconfig Universe add fe80::<Your_real_addr>/10
1165 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1168 ftp fec0:6666:6666::193.233.7.65
1173 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1174 unsigned short type
,
1175 const void *daddr
, const void *saddr
, unsigned int len
)
1177 struct ip_tunnel
*t
= netdev_priv(dev
);
1178 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
);
1179 __be16
*p
= (__be16
*)(iph
+1);
1181 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1182 p
[0] = t
->parms
.o_flags
;
1186 * Set the source hardware address.
1190 memcpy(&iph
->saddr
, saddr
, 4);
1192 memcpy(&iph
->daddr
, daddr
, 4);
1199 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1201 struct iphdr
*iph
= (struct iphdr
*) skb_mac_header(skb
);
1202 memcpy(haddr
, &iph
->saddr
, 4);
1206 static const struct header_ops ipgre_header_ops
= {
1207 .create
= ipgre_header
,
1208 .parse
= ipgre_header_parse
,
1211 #ifdef CONFIG_NET_IPGRE_BROADCAST
1212 static int ipgre_open(struct net_device
*dev
)
1214 struct ip_tunnel
*t
= netdev_priv(dev
);
1216 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
1218 .oif
= t
->parms
.link
,
1221 .daddr
= t
->parms
.iph
.daddr
,
1222 .saddr
= t
->parms
.iph
.saddr
,
1223 .tos
= RT_TOS(t
->parms
.iph
.tos
)
1226 .proto
= IPPROTO_GRE
1230 if (ip_route_output_key(dev_net(dev
), &rt
, &fl
))
1231 return -EADDRNOTAVAIL
;
1234 if (__in_dev_get_rtnl(dev
) == NULL
)
1235 return -EADDRNOTAVAIL
;
1236 t
->mlink
= dev
->ifindex
;
1237 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
1242 static int ipgre_close(struct net_device
*dev
)
1244 struct ip_tunnel
*t
= netdev_priv(dev
);
1246 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
1247 struct in_device
*in_dev
;
1248 in_dev
= inetdev_by_index(dev_net(dev
), t
->mlink
);
1250 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
1259 static const struct net_device_ops ipgre_netdev_ops
= {
1260 .ndo_init
= ipgre_tunnel_init
,
1261 .ndo_uninit
= ipgre_tunnel_uninit
,
1262 #ifdef CONFIG_NET_IPGRE_BROADCAST
1263 .ndo_open
= ipgre_open
,
1264 .ndo_stop
= ipgre_close
,
1266 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1267 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
1268 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1269 .ndo_get_stats
= ipgre_get_stats
,
1272 static void ipgre_dev_free(struct net_device
*dev
)
1274 free_percpu(dev
->tstats
);
1278 static void ipgre_tunnel_setup(struct net_device
*dev
)
1280 dev
->netdev_ops
= &ipgre_netdev_ops
;
1281 dev
->destructor
= ipgre_dev_free
;
1283 dev
->type
= ARPHRD_IPGRE
;
1284 dev
->needed_headroom
= LL_MAX_HEADER
+ sizeof(struct iphdr
) + 4;
1285 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 4;
1286 dev
->flags
= IFF_NOARP
;
1289 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1290 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1293 static int ipgre_tunnel_init(struct net_device
*dev
)
1295 struct ip_tunnel
*tunnel
;
1298 tunnel
= netdev_priv(dev
);
1299 iph
= &tunnel
->parms
.iph
;
1302 strcpy(tunnel
->parms
.name
, dev
->name
);
1304 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
1305 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
1308 #ifdef CONFIG_NET_IPGRE_BROADCAST
1309 if (ipv4_is_multicast(iph
->daddr
)) {
1312 dev
->flags
= IFF_BROADCAST
;
1313 dev
->header_ops
= &ipgre_header_ops
;
1317 dev
->header_ops
= &ipgre_header_ops
;
1319 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1326 static void ipgre_fb_tunnel_init(struct net_device
*dev
)
1328 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1329 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1330 struct ipgre_net
*ign
= net_generic(dev_net(dev
), ipgre_net_id
);
1333 strcpy(tunnel
->parms
.name
, dev
->name
);
1336 iph
->protocol
= IPPROTO_GRE
;
1338 tunnel
->hlen
= sizeof(struct iphdr
) + 4;
1341 rcu_assign_pointer(ign
->tunnels_wc
[0], tunnel
);
1345 static const struct gre_protocol ipgre_protocol
= {
1346 .handler
= ipgre_rcv
,
1347 .err_handler
= ipgre_err
,
1350 static void ipgre_destroy_tunnels(struct ipgre_net
*ign
, struct list_head
*head
)
1354 for (prio
= 0; prio
< 4; prio
++) {
1356 for (h
= 0; h
< HASH_SIZE
; h
++) {
1357 struct ip_tunnel
*t
;
1359 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1362 unregister_netdevice_queue(t
->dev
, head
);
1363 t
= rtnl_dereference(t
->next
);
1369 static int __net_init
ipgre_init_net(struct net
*net
)
1371 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1374 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
), "gre0",
1375 ipgre_tunnel_setup
);
1376 if (!ign
->fb_tunnel_dev
) {
1380 dev_net_set(ign
->fb_tunnel_dev
, net
);
1382 ipgre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1383 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ipgre_link_ops
;
1385 if ((err
= register_netdev(ign
->fb_tunnel_dev
)))
1391 free_netdev(ign
->fb_tunnel_dev
);
1396 static void __net_exit
ipgre_exit_net(struct net
*net
)
1398 struct ipgre_net
*ign
;
1401 ign
= net_generic(net
, ipgre_net_id
);
1403 ipgre_destroy_tunnels(ign
, &list
);
1404 unregister_netdevice_many(&list
);
1408 static struct pernet_operations ipgre_net_ops
= {
1409 .init
= ipgre_init_net
,
1410 .exit
= ipgre_exit_net
,
1411 .id
= &ipgre_net_id
,
1412 .size
= sizeof(struct ipgre_net
),
1415 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1423 if (data
[IFLA_GRE_IFLAGS
])
1424 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1425 if (data
[IFLA_GRE_OFLAGS
])
1426 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1427 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1433 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1437 if (tb
[IFLA_ADDRESS
]) {
1438 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1440 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1441 return -EADDRNOTAVAIL
;
1447 if (data
[IFLA_GRE_REMOTE
]) {
1448 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
1454 return ipgre_tunnel_validate(tb
, data
);
1457 static void ipgre_netlink_parms(struct nlattr
*data
[],
1458 struct ip_tunnel_parm
*parms
)
1460 memset(parms
, 0, sizeof(*parms
));
1462 parms
->iph
.protocol
= IPPROTO_GRE
;
1467 if (data
[IFLA_GRE_LINK
])
1468 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1470 if (data
[IFLA_GRE_IFLAGS
])
1471 parms
->i_flags
= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1473 if (data
[IFLA_GRE_OFLAGS
])
1474 parms
->o_flags
= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1476 if (data
[IFLA_GRE_IKEY
])
1477 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1479 if (data
[IFLA_GRE_OKEY
])
1480 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1482 if (data
[IFLA_GRE_LOCAL
])
1483 parms
->iph
.saddr
= nla_get_be32(data
[IFLA_GRE_LOCAL
]);
1485 if (data
[IFLA_GRE_REMOTE
])
1486 parms
->iph
.daddr
= nla_get_be32(data
[IFLA_GRE_REMOTE
]);
1488 if (data
[IFLA_GRE_TTL
])
1489 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1491 if (data
[IFLA_GRE_TOS
])
1492 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
1494 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
]))
1495 parms
->iph
.frag_off
= htons(IP_DF
);
1498 static int ipgre_tap_init(struct net_device
*dev
)
1500 struct ip_tunnel
*tunnel
;
1502 tunnel
= netdev_priv(dev
);
1505 strcpy(tunnel
->parms
.name
, dev
->name
);
1507 ipgre_tunnel_bind_dev(dev
);
1509 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1516 static const struct net_device_ops ipgre_tap_netdev_ops
= {
1517 .ndo_init
= ipgre_tap_init
,
1518 .ndo_uninit
= ipgre_tunnel_uninit
,
1519 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1520 .ndo_set_mac_address
= eth_mac_addr
,
1521 .ndo_validate_addr
= eth_validate_addr
,
1522 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1523 .ndo_get_stats
= ipgre_get_stats
,
1526 static void ipgre_tap_setup(struct net_device
*dev
)
1531 dev
->netdev_ops
= &ipgre_tap_netdev_ops
;
1532 dev
->destructor
= ipgre_dev_free
;
1535 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1538 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
, struct nlattr
*tb
[],
1539 struct nlattr
*data
[])
1541 struct ip_tunnel
*nt
;
1542 struct net
*net
= dev_net(dev
);
1543 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1547 nt
= netdev_priv(dev
);
1548 ipgre_netlink_parms(data
, &nt
->parms
);
1550 if (ipgre_tunnel_find(net
, &nt
->parms
, dev
->type
))
1553 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1554 random_ether_addr(dev
->dev_addr
);
1556 mtu
= ipgre_tunnel_bind_dev(dev
);
1560 /* Can use a lockless transmit, unless we generate output sequences */
1561 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
1562 dev
->features
|= NETIF_F_LLTX
;
1564 err
= register_netdevice(dev
);
1569 ipgre_tunnel_link(ign
, nt
);
1575 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1576 struct nlattr
*data
[])
1578 struct ip_tunnel
*t
, *nt
;
1579 struct net
*net
= dev_net(dev
);
1580 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1581 struct ip_tunnel_parm p
;
1584 if (dev
== ign
->fb_tunnel_dev
)
1587 nt
= netdev_priv(dev
);
1588 ipgre_netlink_parms(data
, &p
);
1590 t
= ipgre_tunnel_locate(net
, &p
, 0);
1598 if (dev
->type
!= ARPHRD_ETHER
) {
1599 unsigned int nflags
= 0;
1601 if (ipv4_is_multicast(p
.iph
.daddr
))
1602 nflags
= IFF_BROADCAST
;
1603 else if (p
.iph
.daddr
)
1604 nflags
= IFF_POINTOPOINT
;
1606 if ((dev
->flags
^ nflags
) &
1607 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1611 ipgre_tunnel_unlink(ign
, t
);
1612 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1613 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1614 t
->parms
.i_key
= p
.i_key
;
1615 if (dev
->type
!= ARPHRD_ETHER
) {
1616 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1617 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1619 ipgre_tunnel_link(ign
, t
);
1620 netdev_state_change(dev
);
1623 t
->parms
.o_key
= p
.o_key
;
1624 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1625 t
->parms
.iph
.tos
= p
.iph
.tos
;
1626 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1628 if (t
->parms
.link
!= p
.link
) {
1629 t
->parms
.link
= p
.link
;
1630 mtu
= ipgre_tunnel_bind_dev(dev
);
1633 netdev_state_change(dev
);
1639 static size_t ipgre_get_size(const struct net_device
*dev
)
1644 /* IFLA_GRE_IFLAGS */
1646 /* IFLA_GRE_OFLAGS */
1652 /* IFLA_GRE_LOCAL */
1654 /* IFLA_GRE_REMOTE */
1660 /* IFLA_GRE_PMTUDISC */
1665 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1667 struct ip_tunnel
*t
= netdev_priv(dev
);
1668 struct ip_tunnel_parm
*p
= &t
->parms
;
1670 NLA_PUT_U32(skb
, IFLA_GRE_LINK
, p
->link
);
1671 NLA_PUT_BE16(skb
, IFLA_GRE_IFLAGS
, p
->i_flags
);
1672 NLA_PUT_BE16(skb
, IFLA_GRE_OFLAGS
, p
->o_flags
);
1673 NLA_PUT_BE32(skb
, IFLA_GRE_IKEY
, p
->i_key
);
1674 NLA_PUT_BE32(skb
, IFLA_GRE_OKEY
, p
->o_key
);
1675 NLA_PUT_BE32(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
);
1676 NLA_PUT_BE32(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
);
1677 NLA_PUT_U8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
);
1678 NLA_PUT_U8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
);
1679 NLA_PUT_U8(skb
, IFLA_GRE_PMTUDISC
, !!(p
->iph
.frag_off
& htons(IP_DF
)));
1687 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1688 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1689 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1690 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1691 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1692 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1693 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1694 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1695 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1696 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1697 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1700 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1702 .maxtype
= IFLA_GRE_MAX
,
1703 .policy
= ipgre_policy
,
1704 .priv_size
= sizeof(struct ip_tunnel
),
1705 .setup
= ipgre_tunnel_setup
,
1706 .validate
= ipgre_tunnel_validate
,
1707 .newlink
= ipgre_newlink
,
1708 .changelink
= ipgre_changelink
,
1709 .get_size
= ipgre_get_size
,
1710 .fill_info
= ipgre_fill_info
,
1713 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1715 .maxtype
= IFLA_GRE_MAX
,
1716 .policy
= ipgre_policy
,
1717 .priv_size
= sizeof(struct ip_tunnel
),
1718 .setup
= ipgre_tap_setup
,
1719 .validate
= ipgre_tap_validate
,
1720 .newlink
= ipgre_newlink
,
1721 .changelink
= ipgre_changelink
,
1722 .get_size
= ipgre_get_size
,
1723 .fill_info
= ipgre_fill_info
,
1727 * And now the modules code and kernel interface.
1730 static int __init
ipgre_init(void)
1734 printk(KERN_INFO
"GRE over IPv4 tunneling driver\n");
1736 err
= register_pernet_device(&ipgre_net_ops
);
1740 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1742 printk(KERN_INFO
"ipgre init: can't add protocol\n");
1743 goto add_proto_failed
;
1746 err
= rtnl_link_register(&ipgre_link_ops
);
1748 goto rtnl_link_failed
;
1750 err
= rtnl_link_register(&ipgre_tap_ops
);
1752 goto tap_ops_failed
;
1758 rtnl_link_unregister(&ipgre_link_ops
);
1760 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1762 unregister_pernet_device(&ipgre_net_ops
);
1766 static void __exit
ipgre_fini(void)
1768 rtnl_link_unregister(&ipgre_tap_ops
);
1769 rtnl_link_unregister(&ipgre_link_ops
);
1770 if (gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
) < 0)
1771 printk(KERN_INFO
"ipgre close: can't remove protocol\n");
1772 unregister_pernet_device(&ipgre_net_ops
);
1775 module_init(ipgre_init
);
1776 module_exit(ipgre_fini
);
1777 MODULE_LICENSE("GPL");
1778 MODULE_ALIAS_RTNL_LINK("gre");
1779 MODULE_ALIAS_RTNL_LINK("gretap");