Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / net / ipv4 / ip_gre.c
blob8cce0e9ea08cb454f28b4dfdd17bb2875470a7f6
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
54 Problems & solutions
55 --------------------
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
107 Alexey Kuznetsov.
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 u32 id, u32 index,
118 bool truncate, bool is_ipv4);
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
135 them "thank you".
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
139 by themselves???
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
147 struct ip_tunnel *t;
149 switch (type) {
150 default:
151 case ICMP_PARAMETERPROB:
152 return;
154 case ICMP_DEST_UNREACH:
155 switch (code) {
156 case ICMP_SR_FAILED:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
159 return;
160 default:
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
165 break;
167 break;
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
171 return;
172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
173 break;
175 case ICMP_REDIRECT:
176 break;
179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id);
181 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
182 tpi->proto == htons(ETH_P_ERSPAN2))
183 itn = net_generic(net, erspan_net_id);
184 else
185 itn = net_generic(net, ipgre_net_id);
187 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
188 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
189 iph->daddr, iph->saddr, tpi->key);
191 if (!t)
192 return;
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
197 type, data_len))
198 return;
199 #endif
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
203 return;
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206 return;
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209 t->err_count++;
210 else
211 t->err_count = 1;
212 t->err_time = jiffies;
215 static void gre_err(struct sk_buff *skb, u32 info)
217 /* All the routers (except for Linux) return only
218 * 8 bytes of packet payload. It means, that precise relaying of
219 * ICMP in the real Internet is absolutely infeasible.
221 * Moreover, Cisco "wise men" put GRE key to the third word
222 * in GRE header. It makes impossible maintaining even soft
223 * state for keyed
224 * GRE tunnels with enabled checksum. Tell them "thank you".
226 * Well, I wonder, rfc1812 was written by Cisco employee,
227 * what the hell these idiots break standards established
228 * by themselves???
231 const struct iphdr *iph = (struct iphdr *)skb->data;
232 const int type = icmp_hdr(skb)->type;
233 const int code = icmp_hdr(skb)->code;
234 struct tnl_ptk_info tpi;
235 bool csum_err = false;
237 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
238 iph->ihl * 4) < 0) {
239 if (!csum_err) /* ignore csum errors. */
240 return;
243 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
244 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
245 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
246 return;
248 if (type == ICMP_REDIRECT) {
249 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
250 IPPROTO_GRE, 0);
251 return;
254 ipgre_err(skb, info, &tpi);
257 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
258 int gre_hdr_len)
260 struct net *net = dev_net(skb->dev);
261 struct metadata_dst *tun_dst = NULL;
262 struct erspan_base_hdr *ershdr;
263 struct erspan_metadata *pkt_md;
264 struct ip_tunnel_net *itn;
265 struct ip_tunnel *tunnel;
266 const struct iphdr *iph;
267 struct erspan_md2 *md2;
268 int ver;
269 int len;
271 itn = net_generic(net, erspan_net_id);
272 len = gre_hdr_len + sizeof(*ershdr);
274 /* Check based hdr len */
275 if (unlikely(!pskb_may_pull(skb, len)))
276 return PACKET_REJECT;
278 iph = ip_hdr(skb);
279 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
280 ver = ershdr->ver;
282 /* The original GRE header does not have key field,
283 * Use ERSPAN 10-bit session ID as key.
285 tpi->key = cpu_to_be32(get_session_id(ershdr));
286 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
287 tpi->flags | TUNNEL_KEY,
288 iph->saddr, iph->daddr, tpi->key);
290 if (tunnel) {
291 len = gre_hdr_len + erspan_hdr_len(ver);
292 if (unlikely(!pskb_may_pull(skb, len)))
293 return PACKET_REJECT;
295 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
296 pkt_md = (struct erspan_metadata *)(ershdr + 1);
298 if (__iptunnel_pull_header(skb,
299 len,
300 htons(ETH_P_TEB),
301 false, false) < 0)
302 goto drop;
304 if (tunnel->collect_md) {
305 struct ip_tunnel_info *info;
306 struct erspan_metadata *md;
307 __be64 tun_id;
308 __be16 flags;
310 tpi->flags |= TUNNEL_KEY;
311 flags = tpi->flags;
312 tun_id = key32_to_tunnel_id(tpi->key);
314 tun_dst = ip_tun_rx_dst(skb, flags,
315 tun_id, sizeof(*md));
316 if (!tun_dst)
317 return PACKET_REJECT;
319 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
320 md->version = ver;
321 md2 = &md->u.md2;
322 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
323 ERSPAN_V2_MDSIZE);
325 info = &tun_dst->u.tun_info;
326 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
327 info->options_len = sizeof(*md);
330 skb_reset_mac_header(skb);
331 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
332 return PACKET_RCVD;
334 return PACKET_REJECT;
336 drop:
337 kfree_skb(skb);
338 return PACKET_RCVD;
341 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
342 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
344 struct metadata_dst *tun_dst = NULL;
345 const struct iphdr *iph;
346 struct ip_tunnel *tunnel;
348 iph = ip_hdr(skb);
349 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
350 iph->saddr, iph->daddr, tpi->key);
352 if (tunnel) {
353 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
354 raw_proto, false) < 0)
355 goto drop;
357 if (tunnel->dev->type != ARPHRD_NONE)
358 skb_pop_mac_header(skb);
359 else
360 skb_reset_mac_header(skb);
361 if (tunnel->collect_md) {
362 __be16 flags;
363 __be64 tun_id;
365 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
366 tun_id = key32_to_tunnel_id(tpi->key);
367 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
368 if (!tun_dst)
369 return PACKET_REJECT;
372 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
373 return PACKET_RCVD;
375 return PACKET_NEXT;
377 drop:
378 kfree_skb(skb);
379 return PACKET_RCVD;
382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
383 int hdr_len)
385 struct net *net = dev_net(skb->dev);
386 struct ip_tunnel_net *itn;
387 int res;
389 if (tpi->proto == htons(ETH_P_TEB))
390 itn = net_generic(net, gre_tap_net_id);
391 else
392 itn = net_generic(net, ipgre_net_id);
394 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
395 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
396 /* ipgre tunnels in collect metadata mode should receive
397 * also ETH_P_TEB traffic.
399 itn = net_generic(net, ipgre_net_id);
400 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
402 return res;
405 static int gre_rcv(struct sk_buff *skb)
407 struct tnl_ptk_info tpi;
408 bool csum_err = false;
409 int hdr_len;
411 #ifdef CONFIG_NET_IPGRE_BROADCAST
412 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
413 /* Looped back packet, drop it! */
414 if (rt_is_output_route(skb_rtable(skb)))
415 goto drop;
417 #endif
419 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
420 if (hdr_len < 0)
421 goto drop;
423 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
424 tpi.proto == htons(ETH_P_ERSPAN2))) {
425 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
426 return 0;
427 goto out;
430 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
431 return 0;
433 out:
434 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
435 drop:
436 kfree_skb(skb);
437 return 0;
440 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
441 const struct iphdr *tnl_params,
442 __be16 proto)
444 struct ip_tunnel *tunnel = netdev_priv(dev);
446 if (tunnel->parms.o_flags & TUNNEL_SEQ)
447 tunnel->o_seqno++;
449 /* Push GRE header. */
450 gre_build_header(skb, tunnel->tun_hlen,
451 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
452 htonl(tunnel->o_seqno));
454 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
457 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
459 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
462 static struct rtable *gre_get_rt(struct sk_buff *skb,
463 struct net_device *dev,
464 struct flowi4 *fl,
465 const struct ip_tunnel_key *key)
467 struct net *net = dev_net(dev);
469 memset(fl, 0, sizeof(*fl));
470 fl->daddr = key->u.ipv4.dst;
471 fl->saddr = key->u.ipv4.src;
472 fl->flowi4_tos = RT_TOS(key->tos);
473 fl->flowi4_mark = skb->mark;
474 fl->flowi4_proto = IPPROTO_GRE;
476 return ip_route_output_key(net, fl);
479 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
480 struct net_device *dev,
481 struct flowi4 *fl,
482 int tunnel_hlen)
484 struct ip_tunnel_info *tun_info;
485 const struct ip_tunnel_key *key;
486 struct rtable *rt = NULL;
487 int min_headroom;
488 bool use_cache;
489 int err;
491 tun_info = skb_tunnel_info(skb);
492 key = &tun_info->key;
493 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
495 if (use_cache)
496 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
497 if (!rt) {
498 rt = gre_get_rt(skb, dev, fl, key);
499 if (IS_ERR(rt))
500 goto err_free_skb;
501 if (use_cache)
502 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
503 fl->saddr);
506 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
507 + tunnel_hlen + sizeof(struct iphdr);
508 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
509 int head_delta = SKB_DATA_ALIGN(min_headroom -
510 skb_headroom(skb) +
511 16);
512 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
513 0, GFP_ATOMIC);
514 if (unlikely(err))
515 goto err_free_rt;
517 return rt;
519 err_free_rt:
520 ip_rt_put(rt);
521 err_free_skb:
522 kfree_skb(skb);
523 dev->stats.tx_dropped++;
524 return NULL;
527 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
528 __be16 proto)
530 struct ip_tunnel *tunnel = netdev_priv(dev);
531 struct ip_tunnel_info *tun_info;
532 const struct ip_tunnel_key *key;
533 struct rtable *rt = NULL;
534 struct flowi4 fl;
535 int tunnel_hlen;
536 __be16 df, flags;
538 tun_info = skb_tunnel_info(skb);
539 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
540 ip_tunnel_info_af(tun_info) != AF_INET))
541 goto err_free_skb;
543 key = &tun_info->key;
544 tunnel_hlen = gre_calc_hlen(key->tun_flags);
546 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
547 if (!rt)
548 return;
550 /* Push Tunnel header. */
551 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
552 goto err_free_rt;
554 flags = tun_info->key.tun_flags &
555 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
556 gre_build_header(skb, tunnel_hlen, flags, proto,
557 tunnel_id_to_key32(tun_info->key.tun_id),
558 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
560 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
562 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
563 key->tos, key->ttl, df, false);
564 return;
566 err_free_rt:
567 ip_rt_put(rt);
568 err_free_skb:
569 kfree_skb(skb);
570 dev->stats.tx_dropped++;
573 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
574 __be16 proto)
576 struct ip_tunnel *tunnel = netdev_priv(dev);
577 struct ip_tunnel_info *tun_info;
578 const struct ip_tunnel_key *key;
579 struct erspan_metadata *md;
580 struct rtable *rt = NULL;
581 bool truncate = false;
582 struct flowi4 fl;
583 int tunnel_hlen;
584 int version;
585 __be16 df;
586 int nhoff;
587 int thoff;
589 tun_info = skb_tunnel_info(skb);
590 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
591 ip_tunnel_info_af(tun_info) != AF_INET))
592 goto err_free_skb;
594 key = &tun_info->key;
595 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
596 goto err_free_rt;
597 md = ip_tunnel_info_opts(tun_info);
598 if (!md)
599 goto err_free_rt;
601 /* ERSPAN has fixed 8 byte GRE header */
602 version = md->version;
603 tunnel_hlen = 8 + erspan_hdr_len(version);
605 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
606 if (!rt)
607 return;
609 if (gre_handle_offloads(skb, false))
610 goto err_free_rt;
612 if (skb->len > dev->mtu + dev->hard_header_len) {
613 pskb_trim(skb, dev->mtu + dev->hard_header_len);
614 truncate = true;
617 nhoff = skb_network_header(skb) - skb_mac_header(skb);
618 if (skb->protocol == htons(ETH_P_IP) &&
619 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
620 truncate = true;
622 thoff = skb_transport_header(skb) - skb_mac_header(skb);
623 if (skb->protocol == htons(ETH_P_IPV6) &&
624 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
625 truncate = true;
627 if (version == 1) {
628 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
629 ntohl(md->u.index), truncate, true);
630 } else if (version == 2) {
631 erspan_build_header_v2(skb,
632 ntohl(tunnel_id_to_key32(key->tun_id)),
633 md->u.md2.dir,
634 get_hwid(&md->u.md2),
635 truncate, true);
636 } else {
637 goto err_free_rt;
640 gre_build_header(skb, 8, TUNNEL_SEQ,
641 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
643 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
645 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
646 key->tos, key->ttl, df, false);
647 return;
649 err_free_rt:
650 ip_rt_put(rt);
651 err_free_skb:
652 kfree_skb(skb);
653 dev->stats.tx_dropped++;
656 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
658 struct ip_tunnel_info *info = skb_tunnel_info(skb);
659 struct rtable *rt;
660 struct flowi4 fl4;
662 if (ip_tunnel_info_af(info) != AF_INET)
663 return -EINVAL;
665 rt = gre_get_rt(skb, dev, &fl4, &info->key);
666 if (IS_ERR(rt))
667 return PTR_ERR(rt);
669 ip_rt_put(rt);
670 info->key.u.ipv4.src = fl4.saddr;
671 return 0;
674 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
675 struct net_device *dev)
677 struct ip_tunnel *tunnel = netdev_priv(dev);
678 const struct iphdr *tnl_params;
680 if (tunnel->collect_md) {
681 gre_fb_xmit(skb, dev, skb->protocol);
682 return NETDEV_TX_OK;
685 if (dev->header_ops) {
686 /* Need space for new headers */
687 if (skb_cow_head(skb, dev->needed_headroom -
688 (tunnel->hlen + sizeof(struct iphdr))))
689 goto free_skb;
691 tnl_params = (const struct iphdr *)skb->data;
693 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
694 * to gre header.
696 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
697 skb_reset_mac_header(skb);
698 } else {
699 if (skb_cow_head(skb, dev->needed_headroom))
700 goto free_skb;
702 tnl_params = &tunnel->parms.iph;
705 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
706 goto free_skb;
708 __gre_xmit(skb, dev, tnl_params, skb->protocol);
709 return NETDEV_TX_OK;
711 free_skb:
712 kfree_skb(skb);
713 dev->stats.tx_dropped++;
714 return NETDEV_TX_OK;
717 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
718 struct net_device *dev)
720 struct ip_tunnel *tunnel = netdev_priv(dev);
721 bool truncate = false;
723 if (tunnel->collect_md) {
724 erspan_fb_xmit(skb, dev, skb->protocol);
725 return NETDEV_TX_OK;
728 if (gre_handle_offloads(skb, false))
729 goto free_skb;
731 if (skb_cow_head(skb, dev->needed_headroom))
732 goto free_skb;
734 if (skb->len > dev->mtu + dev->hard_header_len) {
735 pskb_trim(skb, dev->mtu + dev->hard_header_len);
736 truncate = true;
739 /* Push ERSPAN header */
740 if (tunnel->erspan_ver == 1)
741 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
742 tunnel->index,
743 truncate, true);
744 else if (tunnel->erspan_ver == 2)
745 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
746 tunnel->dir, tunnel->hwid,
747 truncate, true);
748 else
749 goto free_skb;
751 tunnel->parms.o_flags &= ~TUNNEL_KEY;
752 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
753 return NETDEV_TX_OK;
755 free_skb:
756 kfree_skb(skb);
757 dev->stats.tx_dropped++;
758 return NETDEV_TX_OK;
761 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
762 struct net_device *dev)
764 struct ip_tunnel *tunnel = netdev_priv(dev);
766 if (tunnel->collect_md) {
767 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
768 return NETDEV_TX_OK;
771 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
772 goto free_skb;
774 if (skb_cow_head(skb, dev->needed_headroom))
775 goto free_skb;
777 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
778 return NETDEV_TX_OK;
780 free_skb:
781 kfree_skb(skb);
782 dev->stats.tx_dropped++;
783 return NETDEV_TX_OK;
786 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
788 struct ip_tunnel *tunnel = netdev_priv(dev);
789 int len;
791 len = tunnel->tun_hlen;
792 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
793 len = tunnel->tun_hlen - len;
794 tunnel->hlen = tunnel->hlen + len;
796 dev->needed_headroom = dev->needed_headroom + len;
797 if (set_mtu)
798 dev->mtu = max_t(int, dev->mtu - len, 68);
800 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
801 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
802 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
803 dev->features |= NETIF_F_GSO_SOFTWARE;
804 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
805 } else {
806 dev->features &= ~NETIF_F_GSO_SOFTWARE;
807 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
809 dev->features |= NETIF_F_LLTX;
810 } else {
811 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
812 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
816 static int ipgre_tunnel_ioctl(struct net_device *dev,
817 struct ifreq *ifr, int cmd)
819 struct ip_tunnel_parm p;
820 int err;
822 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
823 return -EFAULT;
825 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
826 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
827 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
828 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
829 return -EINVAL;
832 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
833 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
835 err = ip_tunnel_ioctl(dev, &p, cmd);
836 if (err)
837 return err;
839 if (cmd == SIOCCHGTUNNEL) {
840 struct ip_tunnel *t = netdev_priv(dev);
842 t->parms.i_flags = p.i_flags;
843 t->parms.o_flags = p.o_flags;
845 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
846 ipgre_link_update(dev, true);
849 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
850 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
852 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
853 return -EFAULT;
855 return 0;
858 /* Nice toy. Unfortunately, useless in real life :-)
859 It allows to construct virtual multiprotocol broadcast "LAN"
860 over the Internet, provided multicast routing is tuned.
863 I have no idea was this bicycle invented before me,
864 so that I had to set ARPHRD_IPGRE to a random value.
865 I have an impression, that Cisco could make something similar,
866 but this feature is apparently missing in IOS<=11.2(8).
868 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
869 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
871 ping -t 255 224.66.66.66
873 If nobody answers, mbone does not work.
875 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
876 ip addr add 10.66.66.<somewhat>/24 dev Universe
877 ifconfig Universe up
878 ifconfig Universe add fe80::<Your_real_addr>/10
879 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
880 ftp 10.66.66.66
882 ftp fec0:6666:6666::193.233.7.65
885 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
886 unsigned short type,
887 const void *daddr, const void *saddr, unsigned int len)
889 struct ip_tunnel *t = netdev_priv(dev);
890 struct iphdr *iph;
891 struct gre_base_hdr *greh;
893 iph = skb_push(skb, t->hlen + sizeof(*iph));
894 greh = (struct gre_base_hdr *)(iph+1);
895 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
896 greh->protocol = htons(type);
898 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
900 /* Set the source hardware address. */
901 if (saddr)
902 memcpy(&iph->saddr, saddr, 4);
903 if (daddr)
904 memcpy(&iph->daddr, daddr, 4);
905 if (iph->daddr)
906 return t->hlen + sizeof(*iph);
908 return -(t->hlen + sizeof(*iph));
911 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
913 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
914 memcpy(haddr, &iph->saddr, 4);
915 return 4;
918 static const struct header_ops ipgre_header_ops = {
919 .create = ipgre_header,
920 .parse = ipgre_header_parse,
923 #ifdef CONFIG_NET_IPGRE_BROADCAST
924 static int ipgre_open(struct net_device *dev)
926 struct ip_tunnel *t = netdev_priv(dev);
928 if (ipv4_is_multicast(t->parms.iph.daddr)) {
929 struct flowi4 fl4;
930 struct rtable *rt;
932 rt = ip_route_output_gre(t->net, &fl4,
933 t->parms.iph.daddr,
934 t->parms.iph.saddr,
935 t->parms.o_key,
936 RT_TOS(t->parms.iph.tos),
937 t->parms.link);
938 if (IS_ERR(rt))
939 return -EADDRNOTAVAIL;
940 dev = rt->dst.dev;
941 ip_rt_put(rt);
942 if (!__in_dev_get_rtnl(dev))
943 return -EADDRNOTAVAIL;
944 t->mlink = dev->ifindex;
945 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
947 return 0;
950 static int ipgre_close(struct net_device *dev)
952 struct ip_tunnel *t = netdev_priv(dev);
954 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
955 struct in_device *in_dev;
956 in_dev = inetdev_by_index(t->net, t->mlink);
957 if (in_dev)
958 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
960 return 0;
962 #endif
964 static const struct net_device_ops ipgre_netdev_ops = {
965 .ndo_init = ipgre_tunnel_init,
966 .ndo_uninit = ip_tunnel_uninit,
967 #ifdef CONFIG_NET_IPGRE_BROADCAST
968 .ndo_open = ipgre_open,
969 .ndo_stop = ipgre_close,
970 #endif
971 .ndo_start_xmit = ipgre_xmit,
972 .ndo_do_ioctl = ipgre_tunnel_ioctl,
973 .ndo_change_mtu = ip_tunnel_change_mtu,
974 .ndo_get_stats64 = ip_tunnel_get_stats64,
975 .ndo_get_iflink = ip_tunnel_get_iflink,
978 #define GRE_FEATURES (NETIF_F_SG | \
979 NETIF_F_FRAGLIST | \
980 NETIF_F_HIGHDMA | \
981 NETIF_F_HW_CSUM)
983 static void ipgre_tunnel_setup(struct net_device *dev)
985 dev->netdev_ops = &ipgre_netdev_ops;
986 dev->type = ARPHRD_IPGRE;
987 ip_tunnel_setup(dev, ipgre_net_id);
990 static void __gre_tunnel_init(struct net_device *dev)
992 struct ip_tunnel *tunnel;
994 tunnel = netdev_priv(dev);
995 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
996 tunnel->parms.iph.protocol = IPPROTO_GRE;
998 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1000 dev->features |= GRE_FEATURES;
1001 dev->hw_features |= GRE_FEATURES;
1003 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1004 /* TCP offload with GRE SEQ is not supported, nor
1005 * can we support 2 levels of outer headers requiring
1006 * an update.
1008 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1009 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1010 dev->features |= NETIF_F_GSO_SOFTWARE;
1011 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014 /* Can use a lockless transmit, unless we generate
1015 * output sequences
1017 dev->features |= NETIF_F_LLTX;
1021 static int ipgre_tunnel_init(struct net_device *dev)
1023 struct ip_tunnel *tunnel = netdev_priv(dev);
1024 struct iphdr *iph = &tunnel->parms.iph;
1026 __gre_tunnel_init(dev);
1028 memcpy(dev->dev_addr, &iph->saddr, 4);
1029 memcpy(dev->broadcast, &iph->daddr, 4);
1031 dev->flags = IFF_NOARP;
1032 netif_keep_dst(dev);
1033 dev->addr_len = 4;
1035 if (iph->daddr && !tunnel->collect_md) {
1036 #ifdef CONFIG_NET_IPGRE_BROADCAST
1037 if (ipv4_is_multicast(iph->daddr)) {
1038 if (!iph->saddr)
1039 return -EINVAL;
1040 dev->flags = IFF_BROADCAST;
1041 dev->header_ops = &ipgre_header_ops;
1043 #endif
1044 } else if (!tunnel->collect_md) {
1045 dev->header_ops = &ipgre_header_ops;
1048 return ip_tunnel_init(dev);
1051 static const struct gre_protocol ipgre_protocol = {
1052 .handler = gre_rcv,
1053 .err_handler = gre_err,
1056 static int __net_init ipgre_init_net(struct net *net)
1058 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1061 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1063 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1066 static struct pernet_operations ipgre_net_ops = {
1067 .init = ipgre_init_net,
1068 .exit_batch = ipgre_exit_batch_net,
1069 .id = &ipgre_net_id,
1070 .size = sizeof(struct ip_tunnel_net),
1073 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1074 struct netlink_ext_ack *extack)
1076 __be16 flags;
1078 if (!data)
1079 return 0;
1081 flags = 0;
1082 if (data[IFLA_GRE_IFLAGS])
1083 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1084 if (data[IFLA_GRE_OFLAGS])
1085 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1086 if (flags & (GRE_VERSION|GRE_ROUTING))
1087 return -EINVAL;
1089 if (data[IFLA_GRE_COLLECT_METADATA] &&
1090 data[IFLA_GRE_ENCAP_TYPE] &&
1091 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1092 return -EINVAL;
1094 return 0;
1097 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1098 struct netlink_ext_ack *extack)
1100 __be32 daddr;
1102 if (tb[IFLA_ADDRESS]) {
1103 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1104 return -EINVAL;
1105 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1106 return -EADDRNOTAVAIL;
1109 if (!data)
1110 goto out;
1112 if (data[IFLA_GRE_REMOTE]) {
1113 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1114 if (!daddr)
1115 return -EINVAL;
1118 out:
1119 return ipgre_tunnel_validate(tb, data, extack);
1122 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1123 struct netlink_ext_ack *extack)
1125 __be16 flags = 0;
1126 int ret;
1128 if (!data)
1129 return 0;
1131 ret = ipgre_tap_validate(tb, data, extack);
1132 if (ret)
1133 return ret;
1135 /* ERSPAN should only have GRE sequence and key flag */
1136 if (data[IFLA_GRE_OFLAGS])
1137 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1138 if (data[IFLA_GRE_IFLAGS])
1139 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1140 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1141 flags != (GRE_SEQ | GRE_KEY))
1142 return -EINVAL;
1144 /* ERSPAN Session ID only has 10-bit. Since we reuse
1145 * 32-bit key field as ID, check it's range.
1147 if (data[IFLA_GRE_IKEY] &&
1148 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1149 return -EINVAL;
1151 if (data[IFLA_GRE_OKEY] &&
1152 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1153 return -EINVAL;
1155 return 0;
1158 static int ipgre_netlink_parms(struct net_device *dev,
1159 struct nlattr *data[],
1160 struct nlattr *tb[],
1161 struct ip_tunnel_parm *parms,
1162 __u32 *fwmark)
1164 struct ip_tunnel *t = netdev_priv(dev);
1166 memset(parms, 0, sizeof(*parms));
1168 parms->iph.protocol = IPPROTO_GRE;
1170 if (!data)
1171 return 0;
1173 if (data[IFLA_GRE_LINK])
1174 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1176 if (data[IFLA_GRE_IFLAGS])
1177 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1179 if (data[IFLA_GRE_OFLAGS])
1180 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1182 if (data[IFLA_GRE_IKEY])
1183 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1185 if (data[IFLA_GRE_OKEY])
1186 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1188 if (data[IFLA_GRE_LOCAL])
1189 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1191 if (data[IFLA_GRE_REMOTE])
1192 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1194 if (data[IFLA_GRE_TTL])
1195 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1197 if (data[IFLA_GRE_TOS])
1198 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1200 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1201 if (t->ignore_df)
1202 return -EINVAL;
1203 parms->iph.frag_off = htons(IP_DF);
1206 if (data[IFLA_GRE_COLLECT_METADATA]) {
1207 t->collect_md = true;
1208 if (dev->type == ARPHRD_IPGRE)
1209 dev->type = ARPHRD_NONE;
1212 if (data[IFLA_GRE_IGNORE_DF]) {
1213 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1214 && (parms->iph.frag_off & htons(IP_DF)))
1215 return -EINVAL;
1216 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1219 if (data[IFLA_GRE_FWMARK])
1220 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1222 if (data[IFLA_GRE_ERSPAN_VER]) {
1223 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1225 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1226 return -EINVAL;
1229 if (t->erspan_ver == 1) {
1230 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1231 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1232 if (t->index & ~INDEX_MASK)
1233 return -EINVAL;
1235 } else if (t->erspan_ver == 2) {
1236 if (data[IFLA_GRE_ERSPAN_DIR]) {
1237 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1238 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1239 return -EINVAL;
1241 if (data[IFLA_GRE_ERSPAN_HWID]) {
1242 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1243 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1244 return -EINVAL;
1248 return 0;
1251 /* This function returns true when ENCAP attributes are present in the nl msg */
1252 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1253 struct ip_tunnel_encap *ipencap)
1255 bool ret = false;
1257 memset(ipencap, 0, sizeof(*ipencap));
1259 if (!data)
1260 return ret;
1262 if (data[IFLA_GRE_ENCAP_TYPE]) {
1263 ret = true;
1264 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1267 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1268 ret = true;
1269 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1272 if (data[IFLA_GRE_ENCAP_SPORT]) {
1273 ret = true;
1274 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1277 if (data[IFLA_GRE_ENCAP_DPORT]) {
1278 ret = true;
1279 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1282 return ret;
1285 static int gre_tap_init(struct net_device *dev)
1287 __gre_tunnel_init(dev);
1288 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1289 netif_keep_dst(dev);
1291 return ip_tunnel_init(dev);
1294 static const struct net_device_ops gre_tap_netdev_ops = {
1295 .ndo_init = gre_tap_init,
1296 .ndo_uninit = ip_tunnel_uninit,
1297 .ndo_start_xmit = gre_tap_xmit,
1298 .ndo_set_mac_address = eth_mac_addr,
1299 .ndo_validate_addr = eth_validate_addr,
1300 .ndo_change_mtu = ip_tunnel_change_mtu,
1301 .ndo_get_stats64 = ip_tunnel_get_stats64,
1302 .ndo_get_iflink = ip_tunnel_get_iflink,
1303 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1306 static int erspan_tunnel_init(struct net_device *dev)
1308 struct ip_tunnel *tunnel = netdev_priv(dev);
1310 tunnel->tun_hlen = 8;
1311 tunnel->parms.iph.protocol = IPPROTO_GRE;
1312 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1313 erspan_hdr_len(tunnel->erspan_ver);
1315 dev->features |= GRE_FEATURES;
1316 dev->hw_features |= GRE_FEATURES;
1317 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1318 netif_keep_dst(dev);
1320 return ip_tunnel_init(dev);
1323 static const struct net_device_ops erspan_netdev_ops = {
1324 .ndo_init = erspan_tunnel_init,
1325 .ndo_uninit = ip_tunnel_uninit,
1326 .ndo_start_xmit = erspan_xmit,
1327 .ndo_set_mac_address = eth_mac_addr,
1328 .ndo_validate_addr = eth_validate_addr,
1329 .ndo_change_mtu = ip_tunnel_change_mtu,
1330 .ndo_get_stats64 = ip_tunnel_get_stats64,
1331 .ndo_get_iflink = ip_tunnel_get_iflink,
1332 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1335 static void ipgre_tap_setup(struct net_device *dev)
1337 ether_setup(dev);
1338 dev->max_mtu = 0;
1339 dev->netdev_ops = &gre_tap_netdev_ops;
1340 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1341 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1342 ip_tunnel_setup(dev, gre_tap_net_id);
1345 bool is_gretap_dev(const struct net_device *dev)
1347 return dev->netdev_ops == &gre_tap_netdev_ops;
1349 EXPORT_SYMBOL_GPL(is_gretap_dev);
1351 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1352 struct nlattr *tb[], struct nlattr *data[],
1353 struct netlink_ext_ack *extack)
1355 struct ip_tunnel_parm p;
1356 struct ip_tunnel_encap ipencap;
1357 __u32 fwmark = 0;
1358 int err;
1360 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1361 struct ip_tunnel *t = netdev_priv(dev);
1362 err = ip_tunnel_encap_setup(t, &ipencap);
1364 if (err < 0)
1365 return err;
1368 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1369 if (err < 0)
1370 return err;
1371 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1374 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1375 struct nlattr *data[],
1376 struct netlink_ext_ack *extack)
1378 struct ip_tunnel *t = netdev_priv(dev);
1379 struct ip_tunnel_encap ipencap;
1380 __u32 fwmark = t->fwmark;
1381 struct ip_tunnel_parm p;
1382 int err;
1384 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1385 err = ip_tunnel_encap_setup(t, &ipencap);
1387 if (err < 0)
1388 return err;
1391 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1392 if (err < 0)
1393 return err;
1395 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1396 if (err < 0)
1397 return err;
1399 t->parms.i_flags = p.i_flags;
1400 t->parms.o_flags = p.o_flags;
1402 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1403 ipgre_link_update(dev, !tb[IFLA_MTU]);
1405 return 0;
1408 static size_t ipgre_get_size(const struct net_device *dev)
1410 return
1411 /* IFLA_GRE_LINK */
1412 nla_total_size(4) +
1413 /* IFLA_GRE_IFLAGS */
1414 nla_total_size(2) +
1415 /* IFLA_GRE_OFLAGS */
1416 nla_total_size(2) +
1417 /* IFLA_GRE_IKEY */
1418 nla_total_size(4) +
1419 /* IFLA_GRE_OKEY */
1420 nla_total_size(4) +
1421 /* IFLA_GRE_LOCAL */
1422 nla_total_size(4) +
1423 /* IFLA_GRE_REMOTE */
1424 nla_total_size(4) +
1425 /* IFLA_GRE_TTL */
1426 nla_total_size(1) +
1427 /* IFLA_GRE_TOS */
1428 nla_total_size(1) +
1429 /* IFLA_GRE_PMTUDISC */
1430 nla_total_size(1) +
1431 /* IFLA_GRE_ENCAP_TYPE */
1432 nla_total_size(2) +
1433 /* IFLA_GRE_ENCAP_FLAGS */
1434 nla_total_size(2) +
1435 /* IFLA_GRE_ENCAP_SPORT */
1436 nla_total_size(2) +
1437 /* IFLA_GRE_ENCAP_DPORT */
1438 nla_total_size(2) +
1439 /* IFLA_GRE_COLLECT_METADATA */
1440 nla_total_size(0) +
1441 /* IFLA_GRE_IGNORE_DF */
1442 nla_total_size(1) +
1443 /* IFLA_GRE_FWMARK */
1444 nla_total_size(4) +
1445 /* IFLA_GRE_ERSPAN_INDEX */
1446 nla_total_size(4) +
1447 /* IFLA_GRE_ERSPAN_VER */
1448 nla_total_size(1) +
1449 /* IFLA_GRE_ERSPAN_DIR */
1450 nla_total_size(1) +
1451 /* IFLA_GRE_ERSPAN_HWID */
1452 nla_total_size(2) +
1456 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1458 struct ip_tunnel *t = netdev_priv(dev);
1459 struct ip_tunnel_parm *p = &t->parms;
1461 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1462 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1463 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1464 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1465 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1466 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1467 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1468 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1469 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1470 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1471 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1472 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1473 !!(p->iph.frag_off & htons(IP_DF))) ||
1474 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1475 goto nla_put_failure;
1477 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1478 t->encap.type) ||
1479 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1480 t->encap.sport) ||
1481 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1482 t->encap.dport) ||
1483 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1484 t->encap.flags))
1485 goto nla_put_failure;
1487 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1488 goto nla_put_failure;
1490 if (t->collect_md) {
1491 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1492 goto nla_put_failure;
1495 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1496 goto nla_put_failure;
1498 if (t->erspan_ver == 1) {
1499 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1500 goto nla_put_failure;
1501 } else if (t->erspan_ver == 2) {
1502 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1503 goto nla_put_failure;
1504 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1505 goto nla_put_failure;
1508 return 0;
1510 nla_put_failure:
1511 return -EMSGSIZE;
1514 static void erspan_setup(struct net_device *dev)
1516 struct ip_tunnel *t = netdev_priv(dev);
1518 ether_setup(dev);
1519 dev->netdev_ops = &erspan_netdev_ops;
1520 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1521 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1522 ip_tunnel_setup(dev, erspan_net_id);
1523 t->erspan_ver = 1;
1526 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1527 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1528 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1529 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1530 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1531 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1532 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1533 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1534 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1535 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1536 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1537 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1538 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1539 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1540 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1541 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1542 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1543 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1544 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1545 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1546 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1547 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1550 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1551 .kind = "gre",
1552 .maxtype = IFLA_GRE_MAX,
1553 .policy = ipgre_policy,
1554 .priv_size = sizeof(struct ip_tunnel),
1555 .setup = ipgre_tunnel_setup,
1556 .validate = ipgre_tunnel_validate,
1557 .newlink = ipgre_newlink,
1558 .changelink = ipgre_changelink,
1559 .dellink = ip_tunnel_dellink,
1560 .get_size = ipgre_get_size,
1561 .fill_info = ipgre_fill_info,
1562 .get_link_net = ip_tunnel_get_link_net,
1565 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1566 .kind = "gretap",
1567 .maxtype = IFLA_GRE_MAX,
1568 .policy = ipgre_policy,
1569 .priv_size = sizeof(struct ip_tunnel),
1570 .setup = ipgre_tap_setup,
1571 .validate = ipgre_tap_validate,
1572 .newlink = ipgre_newlink,
1573 .changelink = ipgre_changelink,
1574 .dellink = ip_tunnel_dellink,
1575 .get_size = ipgre_get_size,
1576 .fill_info = ipgre_fill_info,
1577 .get_link_net = ip_tunnel_get_link_net,
1580 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1581 .kind = "erspan",
1582 .maxtype = IFLA_GRE_MAX,
1583 .policy = ipgre_policy,
1584 .priv_size = sizeof(struct ip_tunnel),
1585 .setup = erspan_setup,
1586 .validate = erspan_validate,
1587 .newlink = ipgre_newlink,
1588 .changelink = ipgre_changelink,
1589 .dellink = ip_tunnel_dellink,
1590 .get_size = ipgre_get_size,
1591 .fill_info = ipgre_fill_info,
1592 .get_link_net = ip_tunnel_get_link_net,
1595 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1596 u8 name_assign_type)
1598 struct nlattr *tb[IFLA_MAX + 1];
1599 struct net_device *dev;
1600 LIST_HEAD(list_kill);
1601 struct ip_tunnel *t;
1602 int err;
1604 memset(&tb, 0, sizeof(tb));
1606 dev = rtnl_create_link(net, name, name_assign_type,
1607 &ipgre_tap_ops, tb);
1608 if (IS_ERR(dev))
1609 return dev;
1611 /* Configure flow based GRE device. */
1612 t = netdev_priv(dev);
1613 t->collect_md = true;
1615 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1616 if (err < 0) {
1617 free_netdev(dev);
1618 return ERR_PTR(err);
1621 /* openvswitch users expect packet sizes to be unrestricted,
1622 * so set the largest MTU we can.
1624 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1625 if (err)
1626 goto out;
1628 err = rtnl_configure_link(dev, NULL);
1629 if (err < 0)
1630 goto out;
1632 return dev;
1633 out:
1634 ip_tunnel_dellink(dev, &list_kill);
1635 unregister_netdevice_many(&list_kill);
1636 return ERR_PTR(err);
1638 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1640 static int __net_init ipgre_tap_init_net(struct net *net)
1642 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1645 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1647 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1650 static struct pernet_operations ipgre_tap_net_ops = {
1651 .init = ipgre_tap_init_net,
1652 .exit_batch = ipgre_tap_exit_batch_net,
1653 .id = &gre_tap_net_id,
1654 .size = sizeof(struct ip_tunnel_net),
1657 static int __net_init erspan_init_net(struct net *net)
1659 return ip_tunnel_init_net(net, erspan_net_id,
1660 &erspan_link_ops, "erspan0");
1663 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1665 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1668 static struct pernet_operations erspan_net_ops = {
1669 .init = erspan_init_net,
1670 .exit_batch = erspan_exit_batch_net,
1671 .id = &erspan_net_id,
1672 .size = sizeof(struct ip_tunnel_net),
1675 static int __init ipgre_init(void)
1677 int err;
1679 pr_info("GRE over IPv4 tunneling driver\n");
1681 err = register_pernet_device(&ipgre_net_ops);
1682 if (err < 0)
1683 return err;
1685 err = register_pernet_device(&ipgre_tap_net_ops);
1686 if (err < 0)
1687 goto pnet_tap_failed;
1689 err = register_pernet_device(&erspan_net_ops);
1690 if (err < 0)
1691 goto pnet_erspan_failed;
1693 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1694 if (err < 0) {
1695 pr_info("%s: can't add protocol\n", __func__);
1696 goto add_proto_failed;
1699 err = rtnl_link_register(&ipgre_link_ops);
1700 if (err < 0)
1701 goto rtnl_link_failed;
1703 err = rtnl_link_register(&ipgre_tap_ops);
1704 if (err < 0)
1705 goto tap_ops_failed;
1707 err = rtnl_link_register(&erspan_link_ops);
1708 if (err < 0)
1709 goto erspan_link_failed;
1711 return 0;
1713 erspan_link_failed:
1714 rtnl_link_unregister(&ipgre_tap_ops);
1715 tap_ops_failed:
1716 rtnl_link_unregister(&ipgre_link_ops);
1717 rtnl_link_failed:
1718 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1719 add_proto_failed:
1720 unregister_pernet_device(&erspan_net_ops);
1721 pnet_erspan_failed:
1722 unregister_pernet_device(&ipgre_tap_net_ops);
1723 pnet_tap_failed:
1724 unregister_pernet_device(&ipgre_net_ops);
1725 return err;
1728 static void __exit ipgre_fini(void)
1730 rtnl_link_unregister(&ipgre_tap_ops);
1731 rtnl_link_unregister(&ipgre_link_ops);
1732 rtnl_link_unregister(&erspan_link_ops);
1733 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1734 unregister_pernet_device(&ipgre_tap_net_ops);
1735 unregister_pernet_device(&ipgre_net_ops);
1736 unregister_pernet_device(&erspan_net_ops);
1739 module_init(ipgre_init);
1740 module_exit(ipgre_fini);
1741 MODULE_LICENSE("GPL");
1742 MODULE_ALIAS_RTNL_LINK("gre");
1743 MODULE_ALIAS_RTNL_LINK("gretap");
1744 MODULE_ALIAS_RTNL_LINK("erspan");
1745 MODULE_ALIAS_NETDEV("gre0");
1746 MODULE_ALIAS_NETDEV("gretap0");
1747 MODULE_ALIAS_NETDEV("erspan0");