2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <asm/uaccess.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
21 #include <linux/tcp.h>
22 #include <linux/udp.h>
23 #include <linux/if_arp.h>
24 #include <linux/mroute.h>
25 #include <linux/init.h>
26 #include <linux/in6.h>
27 #include <linux/inetdevice.h>
28 #include <linux/igmp.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/if_ether.h>
35 #include <net/protocol.h>
38 #include <net/checksum.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
55 1. The most important issue is detecting local dead loops.
56 They would cause complete host lockup in transmit, which
57 would be "resolved" by stack overflow or, if queueing is enabled,
58 with infinite looping in net_bh.
60 We cannot track such dead loops during route installation,
61 it is infeasible task. The most general solutions would be
62 to keep skb->encapsulation counter (sort of local ttl),
63 and silently drop packet when it expires. It is the best
64 solution, but it supposes maintaing new variable in ALL
65 skb, even if no tunneling is used.
67 Current solution: t->recursion lock breaks dead loops. It looks
68 like dev->tbusy flag, but I preferred new variable, because
69 the semantics is different. One day, when hard_start_xmit
70 will be multithreaded we will have to use skb->encapsulation.
74 2. Networking dead loops would not kill routers, but would really
75 kill network. IP hop limit plays role of "t->recursion" in this case,
76 if we copy it from packet being encapsulated to upper header.
77 It is very good solution, but it introduces two problems:
79 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
80 do not work over tunnels.
81 - traceroute does not work. I planned to relay ICMP from tunnel,
82 so that this problem would be solved and traceroute output
83 would even more informative. This idea appeared to be wrong:
84 only Linux complies to rfc1812 now (yes, guys, Linux is the only
85 true router now :-)), all routers (at least, in neighbourhood of mine)
86 return only 8 bytes of payload. It is the end.
88 Hence, if we want that OSPF worked or traceroute said something reasonable,
89 we should search for another solution.
91 One of them is to parse packet trying to detect inner encapsulation
92 made by our node. It is difficult or even impossible, especially,
93 taking into account fragmentation. TO be short, tt is not solution at all.
95 Current solution: The solution was UNEXPECTEDLY SIMPLE.
96 We force DF flag on tunnels with preconfigured hop limit,
97 that is ALL. :-) Well, it does not remove the problem completely,
98 but exponential growth of network traffic is changed to linear
99 (branches, that exceed pmtu are pruned) and tunnel mtu
100 fastly degrades to value <68, where looping stops.
101 Yes, it is not good if there exists a router in the loop,
102 which does not force DF, even when encapsulating packets have DF set.
103 But it is not our problem! Nobody could accuse us, we made
104 all that we could make. Even if it is your gated who injected
105 fatal route to network, even if it were you who configured
106 fatal static route: you are innocent. :-)
110 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
111 practically identical code. It would be good to glue them
112 together, but it is not very evident, how to make them modular.
113 sit is integral part of IPv6, ipip and gre are naturally modular.
114 We could extract common parts (hash table, ioctl etc)
115 to a separate module (ip_tunnel.c).
120 static int ipgre_tunnel_init(struct net_device
*dev
);
121 static void ipgre_tunnel_setup(struct net_device
*dev
);
123 /* Fallback tunnel: no source, no destination, no key, no options */
125 static int ipgre_fb_tunnel_init(struct net_device
*dev
);
129 static int ipgre_net_id
;
131 struct ip_tunnel
*tunnels
[4][HASH_SIZE
];
133 struct net_device
*fb_tunnel_dev
;
136 /* Tunnel hash table */
146 We require exact key match i.e. if a key is present in packet
147 it will match only tunnel with the same key; if it is not present,
148 it will match only keyless tunnel.
150 All keysless packets, if not matched configured keyless tunnels
151 will match fallback tunnel.
154 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
156 #define tunnels_r_l tunnels[3]
157 #define tunnels_r tunnels[2]
158 #define tunnels_l tunnels[1]
159 #define tunnels_wc tunnels[0]
161 static DEFINE_RWLOCK(ipgre_lock
);
163 /* Given src, dst and key, find appropriate for input tunnel. */
165 static struct ip_tunnel
* ipgre_tunnel_lookup(struct net
*net
,
166 __be32 remote
, __be32 local
, __be32 key
)
168 unsigned h0
= HASH(remote
);
169 unsigned h1
= HASH(key
);
171 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
173 for (t
= ign
->tunnels_r_l
[h0
^h1
]; t
; t
= t
->next
) {
174 if (local
== t
->parms
.iph
.saddr
&& remote
== t
->parms
.iph
.daddr
) {
175 if (t
->parms
.i_key
== key
&& (t
->dev
->flags
&IFF_UP
))
179 for (t
= ign
->tunnels_r
[h0
^h1
]; t
; t
= t
->next
) {
180 if (remote
== t
->parms
.iph
.daddr
) {
181 if (t
->parms
.i_key
== key
&& (t
->dev
->flags
&IFF_UP
))
185 for (t
= ign
->tunnels_l
[h1
]; t
; t
= t
->next
) {
186 if (local
== t
->parms
.iph
.saddr
||
187 (local
== t
->parms
.iph
.daddr
&&
188 ipv4_is_multicast(local
))) {
189 if (t
->parms
.i_key
== key
&& (t
->dev
->flags
&IFF_UP
))
193 for (t
= ign
->tunnels_wc
[h1
]; t
; t
= t
->next
) {
194 if (t
->parms
.i_key
== key
&& (t
->dev
->flags
&IFF_UP
))
198 if (ign
->fb_tunnel_dev
->flags
&IFF_UP
)
199 return netdev_priv(ign
->fb_tunnel_dev
);
203 static struct ip_tunnel
**__ipgre_bucket(struct ipgre_net
*ign
,
204 struct ip_tunnel_parm
*parms
)
206 __be32 remote
= parms
->iph
.daddr
;
207 __be32 local
= parms
->iph
.saddr
;
208 __be32 key
= parms
->i_key
;
209 unsigned h
= HASH(key
);
214 if (remote
&& !ipv4_is_multicast(remote
)) {
219 return &ign
->tunnels
[prio
][h
];
222 static inline struct ip_tunnel
**ipgre_bucket(struct ipgre_net
*ign
,
225 return __ipgre_bucket(ign
, &t
->parms
);
228 static void ipgre_tunnel_link(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
230 struct ip_tunnel
**tp
= ipgre_bucket(ign
, t
);
233 write_lock_bh(&ipgre_lock
);
235 write_unlock_bh(&ipgre_lock
);
238 static void ipgre_tunnel_unlink(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
240 struct ip_tunnel
**tp
;
242 for (tp
= ipgre_bucket(ign
, t
); *tp
; tp
= &(*tp
)->next
) {
244 write_lock_bh(&ipgre_lock
);
246 write_unlock_bh(&ipgre_lock
);
252 static struct ip_tunnel
* ipgre_tunnel_locate(struct net
*net
,
253 struct ip_tunnel_parm
*parms
, int create
)
255 __be32 remote
= parms
->iph
.daddr
;
256 __be32 local
= parms
->iph
.saddr
;
257 __be32 key
= parms
->i_key
;
258 struct ip_tunnel
*t
, **tp
, *nt
;
259 struct net_device
*dev
;
261 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
263 for (tp
= __ipgre_bucket(ign
, parms
); (t
= *tp
) != NULL
; tp
= &t
->next
) {
264 if (local
== t
->parms
.iph
.saddr
&& remote
== t
->parms
.iph
.daddr
) {
265 if (key
== t
->parms
.i_key
)
273 strlcpy(name
, parms
->name
, IFNAMSIZ
);
275 sprintf(name
, "gre%%d");
277 dev
= alloc_netdev(sizeof(*t
), name
, ipgre_tunnel_setup
);
281 dev_net_set(dev
, net
);
283 if (strchr(name
, '%')) {
284 if (dev_alloc_name(dev
, name
) < 0)
288 dev
->init
= ipgre_tunnel_init
;
289 nt
= netdev_priv(dev
);
292 if (register_netdevice(dev
) < 0)
296 ipgre_tunnel_link(ign
, nt
);
304 static void ipgre_tunnel_uninit(struct net_device
*dev
)
306 struct net
*net
= dev_net(dev
);
307 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
309 ipgre_tunnel_unlink(ign
, netdev_priv(dev
));
314 static void ipgre_err(struct sk_buff
*skb
, u32 info
)
317 /* All the routers (except for Linux) return only
318 8 bytes of packet payload. It means, that precise relaying of
319 ICMP in the real Internet is absolutely infeasible.
321 Moreover, Cisco "wise men" put GRE key to the third word
322 in GRE header. It makes impossible maintaining even soft state for keyed
323 GRE tunnels with enabled checksum. Tell them "thank you".
325 Well, I wonder, rfc1812 was written by Cisco employee,
326 what the hell these idiots break standrads established
330 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
331 __be16
*p
= (__be16
*)(skb
->data
+(iph
->ihl
<<2));
332 int grehlen
= (iph
->ihl
<<2) + 4;
333 const int type
= icmp_hdr(skb
)->type
;
334 const int code
= icmp_hdr(skb
)->code
;
339 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
|GRE_ROUTING
|GRE_VERSION
)) {
340 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
349 /* If only 8 bytes returned, keyed message will be dropped here */
350 if (skb_headlen(skb
) < grehlen
)
355 case ICMP_PARAMETERPROB
:
358 case ICMP_DEST_UNREACH
:
361 case ICMP_PORT_UNREACH
:
362 /* Impossible event. */
364 case ICMP_FRAG_NEEDED
:
365 /* Soft state for pmtu is maintained by IP core. */
368 /* All others are translated to HOST_UNREACH.
369 rfc2003 contains "deep thoughts" about NET_UNREACH,
370 I believe they are just ether pollution. --ANK
375 case ICMP_TIME_EXCEEDED
:
376 if (code
!= ICMP_EXC_TTL
)
381 read_lock(&ipgre_lock
);
382 t
= ipgre_tunnel_lookup(dev_net(skb
->dev
), iph
->daddr
, iph
->saddr
,
384 *(((__be32
*)p
) + (grehlen
>>2) - 1) : 0);
385 if (t
== NULL
|| t
->parms
.iph
.daddr
== 0 ||
386 ipv4_is_multicast(t
->parms
.iph
.daddr
))
389 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
392 if (jiffies
- t
->err_time
< IPTUNNEL_ERR_TIMEO
)
396 t
->err_time
= jiffies
;
398 read_unlock(&ipgre_lock
);
402 static inline void ipgre_ecn_decapsulate(struct iphdr
*iph
, struct sk_buff
*skb
)
404 if (INET_ECN_is_ce(iph
->tos
)) {
405 if (skb
->protocol
== htons(ETH_P_IP
)) {
406 IP_ECN_set_ce(ip_hdr(skb
));
407 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
408 IP6_ECN_set_ce(ipv6_hdr(skb
));
414 ipgre_ecn_encapsulate(u8 tos
, struct iphdr
*old_iph
, struct sk_buff
*skb
)
417 if (skb
->protocol
== htons(ETH_P_IP
))
418 inner
= old_iph
->tos
;
419 else if (skb
->protocol
== htons(ETH_P_IPV6
))
420 inner
= ipv6_get_dsfield((struct ipv6hdr
*)old_iph
);
421 return INET_ECN_encapsulate(tos
, inner
);
424 static int ipgre_rcv(struct sk_buff
*skb
)
432 struct ip_tunnel
*tunnel
;
435 if (!pskb_may_pull(skb
, 16))
442 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_ROUTING
|GRE_SEQ
|GRE_VERSION
)) {
443 /* - Version must be 0.
444 - We do not support routing headers.
446 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
449 if (flags
&GRE_CSUM
) {
450 switch (skb
->ip_summed
) {
451 case CHECKSUM_COMPLETE
:
452 csum
= csum_fold(skb
->csum
);
458 csum
= __skb_checksum_complete(skb
);
459 skb
->ip_summed
= CHECKSUM_COMPLETE
;
464 key
= *(__be32
*)(h
+ offset
);
468 seqno
= ntohl(*(__be32
*)(h
+ offset
));
473 read_lock(&ipgre_lock
);
474 if ((tunnel
= ipgre_tunnel_lookup(dev_net(skb
->dev
),
475 iph
->saddr
, iph
->daddr
, key
)) != NULL
) {
476 struct net_device_stats
*stats
= &tunnel
->dev
->stats
;
480 skb
->protocol
= *(__be16
*)(h
+ 2);
481 /* WCCP version 1 and 2 protocol decoding.
482 * - Change protocol to IP
483 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
486 skb
->protocol
== htons(ETH_P_WCCP
)) {
487 skb
->protocol
= htons(ETH_P_IP
);
488 if ((*(h
+ offset
) & 0xF0) != 0x40)
492 skb
->mac_header
= skb
->network_header
;
493 __pskb_pull(skb
, offset
);
494 skb_reset_network_header(skb
);
495 skb_postpull_rcsum(skb
, skb_transport_header(skb
), offset
);
496 skb
->pkt_type
= PACKET_HOST
;
497 #ifdef CONFIG_NET_IPGRE_BROADCAST
498 if (ipv4_is_multicast(iph
->daddr
)) {
499 /* Looped back packet, drop it! */
500 if (skb
->rtable
->fl
.iif
== 0)
503 skb
->pkt_type
= PACKET_BROADCAST
;
507 if (((flags
&GRE_CSUM
) && csum
) ||
508 (!(flags
&GRE_CSUM
) && tunnel
->parms
.i_flags
&GRE_CSUM
)) {
509 stats
->rx_crc_errors
++;
513 if (tunnel
->parms
.i_flags
&GRE_SEQ
) {
514 if (!(flags
&GRE_SEQ
) ||
515 (tunnel
->i_seqno
&& (s32
)(seqno
- tunnel
->i_seqno
) < 0)) {
516 stats
->rx_fifo_errors
++;
520 tunnel
->i_seqno
= seqno
+ 1;
523 stats
->rx_bytes
+= skb
->len
;
524 skb
->dev
= tunnel
->dev
;
525 dst_release(skb
->dst
);
528 ipgre_ecn_decapsulate(iph
, skb
);
530 read_unlock(&ipgre_lock
);
533 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
536 read_unlock(&ipgre_lock
);
542 static int ipgre_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
544 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
545 struct net_device_stats
*stats
= &tunnel
->dev
->stats
;
546 struct iphdr
*old_iph
= ip_hdr(skb
);
550 struct rtable
*rt
; /* Route to the other host */
551 struct net_device
*tdev
; /* Device to other host */
552 struct iphdr
*iph
; /* Our new IP header */
553 unsigned int max_headroom
; /* The extra header space needed */
558 if (tunnel
->recursion
++) {
563 if (dev
->header_ops
) {
565 tiph
= (struct iphdr
*)skb
->data
;
567 gre_hlen
= tunnel
->hlen
;
568 tiph
= &tunnel
->parms
.iph
;
571 if ((dst
= tiph
->daddr
) == 0) {
574 if (skb
->dst
== NULL
) {
575 stats
->tx_fifo_errors
++;
579 if (skb
->protocol
== htons(ETH_P_IP
)) {
581 if ((dst
= rt
->rt_gateway
) == 0)
585 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
586 struct in6_addr
*addr6
;
588 struct neighbour
*neigh
= skb
->dst
->neighbour
;
593 addr6
= (struct in6_addr
*)&neigh
->primary_key
;
594 addr_type
= ipv6_addr_type(addr6
);
596 if (addr_type
== IPV6_ADDR_ANY
) {
597 addr6
= &ipv6_hdr(skb
)->daddr
;
598 addr_type
= ipv6_addr_type(addr6
);
601 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
604 dst
= addr6
->s6_addr32
[3];
613 if (skb
->protocol
== htons(ETH_P_IP
))
619 struct flowi fl
= { .oif
= tunnel
->parms
.link
,
622 .saddr
= tiph
->saddr
,
623 .tos
= RT_TOS(tos
) } },
624 .proto
= IPPROTO_GRE
};
625 if (ip_route_output_key(dev_net(dev
), &rt
, &fl
)) {
626 stats
->tx_carrier_errors
++;
630 tdev
= rt
->u
.dst
.dev
;
640 mtu
= dst_mtu(&rt
->u
.dst
) - tunnel
->hlen
;
642 mtu
= skb
->dst
? dst_mtu(skb
->dst
) : dev
->mtu
;
645 skb
->dst
->ops
->update_pmtu(skb
->dst
, mtu
);
647 if (skb
->protocol
== htons(ETH_P_IP
)) {
648 df
|= (old_iph
->frag_off
&htons(IP_DF
));
650 if ((old_iph
->frag_off
&htons(IP_DF
)) &&
651 mtu
< ntohs(old_iph
->tot_len
)) {
652 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
658 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
659 struct rt6_info
*rt6
= (struct rt6_info
*)skb
->dst
;
661 if (rt6
&& mtu
< dst_mtu(skb
->dst
) && mtu
>= IPV6_MIN_MTU
) {
662 if ((tunnel
->parms
.iph
.daddr
&&
663 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
664 rt6
->rt6i_dst
.plen
== 128) {
665 rt6
->rt6i_flags
|= RTF_MODIFIED
;
666 skb
->dst
->metrics
[RTAX_MTU
-1] = mtu
;
670 if (mtu
>= IPV6_MIN_MTU
&& mtu
< skb
->len
- tunnel
->hlen
+ gre_hlen
) {
671 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, dev
);
678 if (tunnel
->err_count
> 0) {
679 if (jiffies
- tunnel
->err_time
< IPTUNNEL_ERR_TIMEO
) {
682 dst_link_failure(skb
);
684 tunnel
->err_count
= 0;
687 max_headroom
= LL_RESERVED_SPACE(tdev
) + gre_hlen
;
689 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
)||
690 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
691 struct sk_buff
*new_skb
= skb_realloc_headroom(skb
, max_headroom
);
700 skb_set_owner_w(new_skb
, skb
->sk
);
703 old_iph
= ip_hdr(skb
);
706 skb
->transport_header
= skb
->network_header
;
707 skb_push(skb
, gre_hlen
);
708 skb_reset_network_header(skb
);
709 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
710 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
712 dst_release(skb
->dst
);
713 skb
->dst
= &rt
->u
.dst
;
716 * Push down and install the IPIP header.
721 iph
->ihl
= sizeof(struct iphdr
) >> 2;
723 iph
->protocol
= IPPROTO_GRE
;
724 iph
->tos
= ipgre_ecn_encapsulate(tos
, old_iph
, skb
);
725 iph
->daddr
= rt
->rt_dst
;
726 iph
->saddr
= rt
->rt_src
;
728 if ((iph
->ttl
= tiph
->ttl
) == 0) {
729 if (skb
->protocol
== htons(ETH_P_IP
))
730 iph
->ttl
= old_iph
->ttl
;
732 else if (skb
->protocol
== htons(ETH_P_IPV6
))
733 iph
->ttl
= ((struct ipv6hdr
*)old_iph
)->hop_limit
;
736 iph
->ttl
= dst_metric(&rt
->u
.dst
, RTAX_HOPLIMIT
);
739 ((__be16
*)(iph
+1))[0] = tunnel
->parms
.o_flags
;
740 ((__be16
*)(iph
+1))[1] = skb
->protocol
;
742 if (tunnel
->parms
.o_flags
&(GRE_KEY
|GRE_CSUM
|GRE_SEQ
)) {
743 __be32
*ptr
= (__be32
*)(((u8
*)iph
) + tunnel
->hlen
- 4);
745 if (tunnel
->parms
.o_flags
&GRE_SEQ
) {
747 *ptr
= htonl(tunnel
->o_seqno
);
750 if (tunnel
->parms
.o_flags
&GRE_KEY
) {
751 *ptr
= tunnel
->parms
.o_key
;
754 if (tunnel
->parms
.o_flags
&GRE_CSUM
) {
756 *(__sum16
*)ptr
= ip_compute_csum((void*)(iph
+1), skb
->len
- sizeof(struct iphdr
));
767 dst_link_failure(skb
);
776 static void ipgre_tunnel_bind_dev(struct net_device
*dev
)
778 struct net_device
*tdev
= NULL
;
779 struct ip_tunnel
*tunnel
;
781 int hlen
= LL_MAX_HEADER
;
782 int mtu
= ETH_DATA_LEN
;
783 int addend
= sizeof(struct iphdr
) + 4;
785 tunnel
= netdev_priv(dev
);
786 iph
= &tunnel
->parms
.iph
;
788 /* Guess output device to choose reasonable mtu and hard_header_len */
791 struct flowi fl
= { .oif
= tunnel
->parms
.link
,
793 { .daddr
= iph
->daddr
,
795 .tos
= RT_TOS(iph
->tos
) } },
796 .proto
= IPPROTO_GRE
};
798 if (!ip_route_output_key(dev_net(dev
), &rt
, &fl
)) {
799 tdev
= rt
->u
.dst
.dev
;
802 dev
->flags
|= IFF_POINTOPOINT
;
805 if (!tdev
&& tunnel
->parms
.link
)
806 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
809 hlen
= tdev
->hard_header_len
;
812 dev
->iflink
= tunnel
->parms
.link
;
814 /* Precalculate GRE options length */
815 if (tunnel
->parms
.o_flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
)) {
816 if (tunnel
->parms
.o_flags
&GRE_CSUM
)
818 if (tunnel
->parms
.o_flags
&GRE_KEY
)
820 if (tunnel
->parms
.o_flags
&GRE_SEQ
)
823 dev
->hard_header_len
= hlen
+ addend
;
824 dev
->mtu
= mtu
- addend
;
825 tunnel
->hlen
= addend
;
830 ipgre_tunnel_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
833 struct ip_tunnel_parm p
;
835 struct net
*net
= dev_net(dev
);
836 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
841 if (dev
== ign
->fb_tunnel_dev
) {
842 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
846 t
= ipgre_tunnel_locate(net
, &p
, 0);
849 t
= netdev_priv(dev
);
850 memcpy(&p
, &t
->parms
, sizeof(p
));
851 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
858 if (!capable(CAP_NET_ADMIN
))
862 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
866 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
867 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
868 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
871 p
.iph
.frag_off
|= htons(IP_DF
);
873 if (!(p
.i_flags
&GRE_KEY
))
875 if (!(p
.o_flags
&GRE_KEY
))
878 t
= ipgre_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
880 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
889 t
= netdev_priv(dev
);
891 if (ipv4_is_multicast(p
.iph
.daddr
))
892 nflags
= IFF_BROADCAST
;
893 else if (p
.iph
.daddr
)
894 nflags
= IFF_POINTOPOINT
;
896 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
900 ipgre_tunnel_unlink(ign
, t
);
901 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
902 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
903 t
->parms
.i_key
= p
.i_key
;
904 t
->parms
.o_key
= p
.o_key
;
905 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
906 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
907 ipgre_tunnel_link(ign
, t
);
908 netdev_state_change(dev
);
914 if (cmd
== SIOCCHGTUNNEL
) {
915 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
916 t
->parms
.iph
.tos
= p
.iph
.tos
;
917 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
918 if (t
->parms
.link
!= p
.link
) {
919 t
->parms
.link
= p
.link
;
920 ipgre_tunnel_bind_dev(dev
);
921 netdev_state_change(dev
);
924 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
, sizeof(p
)))
927 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
932 if (!capable(CAP_NET_ADMIN
))
935 if (dev
== ign
->fb_tunnel_dev
) {
937 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
940 if ((t
= ipgre_tunnel_locate(net
, &p
, 0)) == NULL
)
943 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
947 unregister_netdevice(dev
);
959 static int ipgre_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
961 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
962 if (new_mtu
< 68 || new_mtu
> 0xFFF8 - tunnel
->hlen
)
968 /* Nice toy. Unfortunately, useless in real life :-)
969 It allows to construct virtual multiprotocol broadcast "LAN"
970 over the Internet, provided multicast routing is tuned.
973 I have no idea was this bicycle invented before me,
974 so that I had to set ARPHRD_IPGRE to a random value.
975 I have an impression, that Cisco could make something similar,
976 but this feature is apparently missing in IOS<=11.2(8).
978 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
979 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
981 ping -t 255 224.66.66.66
983 If nobody answers, mbone does not work.
985 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
986 ip addr add 10.66.66.<somewhat>/24 dev Universe
988 ifconfig Universe add fe80::<Your_real_addr>/10
989 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
992 ftp fec0:6666:6666::193.233.7.65
997 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
999 const void *daddr
, const void *saddr
, unsigned len
)
1001 struct ip_tunnel
*t
= netdev_priv(dev
);
1002 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
);
1003 __be16
*p
= (__be16
*)(iph
+1);
1005 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1006 p
[0] = t
->parms
.o_flags
;
1010 * Set the source hardware address.
1014 memcpy(&iph
->saddr
, saddr
, 4);
1017 memcpy(&iph
->daddr
, daddr
, 4);
1020 if (iph
->daddr
&& !ipv4_is_multicast(iph
->daddr
))
1026 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1028 struct iphdr
*iph
= (struct iphdr
*) skb_mac_header(skb
);
1029 memcpy(haddr
, &iph
->saddr
, 4);
1033 static const struct header_ops ipgre_header_ops
= {
1034 .create
= ipgre_header
,
1035 .parse
= ipgre_header_parse
,
1038 #ifdef CONFIG_NET_IPGRE_BROADCAST
1039 static int ipgre_open(struct net_device
*dev
)
1041 struct ip_tunnel
*t
= netdev_priv(dev
);
1043 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
1044 struct flowi fl
= { .oif
= t
->parms
.link
,
1046 { .daddr
= t
->parms
.iph
.daddr
,
1047 .saddr
= t
->parms
.iph
.saddr
,
1048 .tos
= RT_TOS(t
->parms
.iph
.tos
) } },
1049 .proto
= IPPROTO_GRE
};
1051 if (ip_route_output_key(dev_net(dev
), &rt
, &fl
))
1052 return -EADDRNOTAVAIL
;
1053 dev
= rt
->u
.dst
.dev
;
1055 if (__in_dev_get_rtnl(dev
) == NULL
)
1056 return -EADDRNOTAVAIL
;
1057 t
->mlink
= dev
->ifindex
;
1058 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
1063 static int ipgre_close(struct net_device
*dev
)
1065 struct ip_tunnel
*t
= netdev_priv(dev
);
1066 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
1067 struct in_device
*in_dev
;
1068 in_dev
= inetdev_by_index(dev_net(dev
), t
->mlink
);
1070 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
1079 static void ipgre_tunnel_setup(struct net_device
*dev
)
1081 dev
->uninit
= ipgre_tunnel_uninit
;
1082 dev
->destructor
= free_netdev
;
1083 dev
->hard_start_xmit
= ipgre_tunnel_xmit
;
1084 dev
->do_ioctl
= ipgre_tunnel_ioctl
;
1085 dev
->change_mtu
= ipgre_tunnel_change_mtu
;
1087 dev
->type
= ARPHRD_IPGRE
;
1088 dev
->hard_header_len
= LL_MAX_HEADER
+ sizeof(struct iphdr
) + 4;
1089 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 4;
1090 dev
->flags
= IFF_NOARP
;
1093 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1096 static int ipgre_tunnel_init(struct net_device
*dev
)
1098 struct ip_tunnel
*tunnel
;
1101 tunnel
= netdev_priv(dev
);
1102 iph
= &tunnel
->parms
.iph
;
1105 strcpy(tunnel
->parms
.name
, dev
->name
);
1107 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
1108 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
1110 ipgre_tunnel_bind_dev(dev
);
1113 #ifdef CONFIG_NET_IPGRE_BROADCAST
1114 if (ipv4_is_multicast(iph
->daddr
)) {
1117 dev
->flags
= IFF_BROADCAST
;
1118 dev
->header_ops
= &ipgre_header_ops
;
1119 dev
->open
= ipgre_open
;
1120 dev
->stop
= ipgre_close
;
1124 dev
->header_ops
= &ipgre_header_ops
;
1129 static int ipgre_fb_tunnel_init(struct net_device
*dev
)
1131 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1132 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1133 struct ipgre_net
*ign
= net_generic(dev_net(dev
), ipgre_net_id
);
1136 strcpy(tunnel
->parms
.name
, dev
->name
);
1139 iph
->protocol
= IPPROTO_GRE
;
1141 tunnel
->hlen
= sizeof(struct iphdr
) + 4;
1144 ign
->tunnels_wc
[0] = tunnel
;
1149 static struct net_protocol ipgre_protocol
= {
1150 .handler
= ipgre_rcv
,
1151 .err_handler
= ipgre_err
,
1155 static void ipgre_destroy_tunnels(struct ipgre_net
*ign
)
1159 for (prio
= 0; prio
< 4; prio
++) {
1161 for (h
= 0; h
< HASH_SIZE
; h
++) {
1162 struct ip_tunnel
*t
;
1163 while ((t
= ign
->tunnels
[prio
][h
]) != NULL
)
1164 unregister_netdevice(t
->dev
);
1169 static int ipgre_init_net(struct net
*net
)
1172 struct ipgre_net
*ign
;
1175 ign
= kzalloc(sizeof(struct ipgre_net
), GFP_KERNEL
);
1179 err
= net_assign_generic(net
, ipgre_net_id
, ign
);
1183 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
), "gre0",
1184 ipgre_tunnel_setup
);
1185 if (!ign
->fb_tunnel_dev
) {
1190 ign
->fb_tunnel_dev
->init
= ipgre_fb_tunnel_init
;
1191 dev_net_set(ign
->fb_tunnel_dev
, net
);
1193 if ((err
= register_netdev(ign
->fb_tunnel_dev
)))
1199 free_netdev(ign
->fb_tunnel_dev
);
1208 static void ipgre_exit_net(struct net
*net
)
1210 struct ipgre_net
*ign
;
1212 ign
= net_generic(net
, ipgre_net_id
);
1214 ipgre_destroy_tunnels(ign
);
1219 static struct pernet_operations ipgre_net_ops
= {
1220 .init
= ipgre_init_net
,
1221 .exit
= ipgre_exit_net
,
1225 * And now the modules code and kernel interface.
1228 static int __init
ipgre_init(void)
1232 printk(KERN_INFO
"GRE over IPv4 tunneling driver\n");
1234 if (inet_add_protocol(&ipgre_protocol
, IPPROTO_GRE
) < 0) {
1235 printk(KERN_INFO
"ipgre init: can't add protocol\n");
1239 err
= register_pernet_gen_device(&ipgre_net_id
, &ipgre_net_ops
);
1241 inet_del_protocol(&ipgre_protocol
, IPPROTO_GRE
);
1246 static void __exit
ipgre_fini(void)
1248 if (inet_del_protocol(&ipgre_protocol
, IPPROTO_GRE
) < 0)
1249 printk(KERN_INFO
"ipgre close: can't remove protocol\n");
1251 unregister_pernet_gen_device(ipgre_net_id
, &ipgre_net_ops
);
1254 module_init(ipgre_init
);
1255 module_exit(ipgre_fini
);
1256 MODULE_LICENSE("GPL");