2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
18 For comments look at net/ipv4/ip_gre.c --ANK
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
41 #include <net/ip_tunnels.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
48 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
50 static struct rtnl_link_ops vti_link_ops __read_mostly
;
52 static int vti_net_id __read_mostly
;
54 struct ip_tunnel __rcu
*tunnels_r_l
[HASH_SIZE
];
55 struct ip_tunnel __rcu
*tunnels_r
[HASH_SIZE
];
56 struct ip_tunnel __rcu
*tunnels_l
[HASH_SIZE
];
57 struct ip_tunnel __rcu
*tunnels_wc
[1];
58 struct ip_tunnel __rcu
**tunnels
[4];
60 struct net_device
*fb_tunnel_dev
;
63 static int vti_fb_tunnel_init(struct net_device
*dev
);
64 static int vti_tunnel_init(struct net_device
*dev
);
65 static void vti_tunnel_setup(struct net_device
*dev
);
66 static void vti_dev_free(struct net_device
*dev
);
67 static int vti_tunnel_bind_dev(struct net_device
*dev
);
69 #define VTI_XMIT(stats1, stats2) do { \
71 int pkt_len = skb->len; \
72 err = dst_output(skb); \
73 if (net_xmit_eval(err) == 0) { \
74 u64_stats_update_begin(&(stats1)->syncp); \
75 (stats1)->tx_bytes += pkt_len; \
76 (stats1)->tx_packets++; \
77 u64_stats_update_end(&(stats1)->syncp); \
79 (stats2)->tx_errors++; \
80 (stats2)->tx_aborted_errors++; \
85 static struct ip_tunnel
*vti_tunnel_lookup(struct net
*net
,
86 __be32 remote
, __be32 local
)
88 unsigned h0
= HASH(remote
);
89 unsigned h1
= HASH(local
);
91 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
93 for_each_ip_tunnel_rcu(t
, ipn
->tunnels_r_l
[h0
^ h1
])
94 if (local
== t
->parms
.iph
.saddr
&&
95 remote
== t
->parms
.iph
.daddr
&& (t
->dev
->flags
&IFF_UP
))
97 for_each_ip_tunnel_rcu(t
, ipn
->tunnels_r
[h0
])
98 if (remote
== t
->parms
.iph
.daddr
&& (t
->dev
->flags
&IFF_UP
))
101 for_each_ip_tunnel_rcu(t
, ipn
->tunnels_l
[h1
])
102 if (local
== t
->parms
.iph
.saddr
&& (t
->dev
->flags
&IFF_UP
))
105 for_each_ip_tunnel_rcu(t
, ipn
->tunnels_wc
[0])
106 if (t
&& (t
->dev
->flags
&IFF_UP
))
111 static struct ip_tunnel __rcu
**__vti_bucket(struct vti_net
*ipn
,
112 struct ip_tunnel_parm
*parms
)
114 __be32 remote
= parms
->iph
.daddr
;
115 __be32 local
= parms
->iph
.saddr
;
127 return &ipn
->tunnels
[prio
][h
];
130 static inline struct ip_tunnel __rcu
**vti_bucket(struct vti_net
*ipn
,
133 return __vti_bucket(ipn
, &t
->parms
);
136 static void vti_tunnel_unlink(struct vti_net
*ipn
, struct ip_tunnel
*t
)
138 struct ip_tunnel __rcu
**tp
;
139 struct ip_tunnel
*iter
;
141 for (tp
= vti_bucket(ipn
, t
);
142 (iter
= rtnl_dereference(*tp
)) != NULL
;
145 rcu_assign_pointer(*tp
, t
->next
);
151 static void vti_tunnel_link(struct vti_net
*ipn
, struct ip_tunnel
*t
)
153 struct ip_tunnel __rcu
**tp
= vti_bucket(ipn
, t
);
155 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
156 rcu_assign_pointer(*tp
, t
);
159 static struct ip_tunnel
*vti_tunnel_locate(struct net
*net
,
160 struct ip_tunnel_parm
*parms
,
163 __be32 remote
= parms
->iph
.daddr
;
164 __be32 local
= parms
->iph
.saddr
;
165 struct ip_tunnel
*t
, *nt
;
166 struct ip_tunnel __rcu
**tp
;
167 struct net_device
*dev
;
169 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
171 for (tp
= __vti_bucket(ipn
, parms
);
172 (t
= rtnl_dereference(*tp
)) != NULL
;
174 if (local
== t
->parms
.iph
.saddr
&& remote
== t
->parms
.iph
.daddr
)
181 strlcpy(name
, parms
->name
, IFNAMSIZ
);
183 strcpy(name
, "vti%d");
185 dev
= alloc_netdev(sizeof(*t
), name
, vti_tunnel_setup
);
189 dev_net_set(dev
, net
);
191 nt
= netdev_priv(dev
);
193 dev
->rtnl_link_ops
= &vti_link_ops
;
195 vti_tunnel_bind_dev(dev
);
197 if (register_netdevice(dev
) < 0)
201 vti_tunnel_link(ipn
, nt
);
209 static void vti_tunnel_uninit(struct net_device
*dev
)
211 struct net
*net
= dev_net(dev
);
212 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
214 vti_tunnel_unlink(ipn
, netdev_priv(dev
));
218 static int vti_err(struct sk_buff
*skb
, u32 info
)
221 /* All the routers (except for Linux) return only
222 * 8 bytes of packet payload. It means, that precise relaying of
223 * ICMP in the real Internet is absolutely infeasible.
225 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
226 const int type
= icmp_hdr(skb
)->type
;
227 const int code
= icmp_hdr(skb
)->code
;
233 case ICMP_PARAMETERPROB
:
236 case ICMP_DEST_UNREACH
:
239 case ICMP_PORT_UNREACH
:
240 /* Impossible event. */
243 /* All others are translated to HOST_UNREACH. */
247 case ICMP_TIME_EXCEEDED
:
248 if (code
!= ICMP_EXC_TTL
)
255 t
= vti_tunnel_lookup(dev_net(skb
->dev
), iph
->daddr
, iph
->saddr
);
259 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
260 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
261 t
->parms
.link
, 0, IPPROTO_IPIP
, 0);
267 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
270 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
274 t
->err_time
= jiffies
;
279 /* We dont digest the packet therefore let the packet pass */
280 static int vti_rcv(struct sk_buff
*skb
)
282 struct ip_tunnel
*tunnel
;
283 const struct iphdr
*iph
= ip_hdr(skb
);
285 tunnel
= vti_tunnel_lookup(dev_net(skb
->dev
), iph
->saddr
, iph
->daddr
);
286 if (tunnel
!= NULL
) {
287 struct pcpu_tstats
*tstats
;
289 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
292 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
293 u64_stats_update_begin(&tstats
->syncp
);
294 tstats
->rx_packets
++;
295 tstats
->rx_bytes
+= skb
->len
;
296 u64_stats_update_end(&tstats
->syncp
);
300 skb
->dev
= tunnel
->dev
;
307 /* This function assumes it is being called from dev_queue_xmit()
308 * and that skb is filled properly by that function.
311 static netdev_tx_t
vti_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
313 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
314 struct pcpu_tstats
*tstats
;
315 struct iphdr
*tiph
= &tunnel
->parms
.iph
;
317 struct rtable
*rt
; /* Route to the other host */
318 struct net_device
*tdev
; /* Device to other host */
319 struct iphdr
*old_iph
= ip_hdr(skb
);
320 __be32 dst
= tiph
->daddr
;
323 if (skb
->protocol
!= htons(ETH_P_IP
))
328 memset(&fl4
, 0, sizeof(fl4
));
329 flowi4_init_output(&fl4
, tunnel
->parms
.link
,
330 be32_to_cpu(tunnel
->parms
.i_key
), RT_TOS(tos
),
333 dst
, tiph
->saddr
, 0, 0);
334 rt
= ip_route_output_key(dev_net(dev
), &fl4
);
336 dev
->stats
.tx_carrier_errors
++;
339 /* if there is no transform then this tunnel is not functional.
340 * Or if the xfrm is not mode tunnel.
343 rt
->dst
.xfrm
->props
.mode
!= XFRM_MODE_TUNNEL
) {
344 dev
->stats
.tx_carrier_errors
++;
351 dev
->stats
.collisions
++;
355 if (tunnel
->err_count
> 0) {
356 if (time_before(jiffies
,
357 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
359 dst_link_failure(skb
);
361 tunnel
->err_count
= 0;
364 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
366 skb_dst_set(skb
, &rt
->dst
);
368 skb
->dev
= skb_dst(skb
)->dev
;
370 tstats
= this_cpu_ptr(dev
->tstats
);
371 VTI_XMIT(tstats
, &dev
->stats
);
375 dst_link_failure(skb
);
377 dev
->stats
.tx_errors
++;
382 static int vti_tunnel_bind_dev(struct net_device
*dev
)
384 struct net_device
*tdev
= NULL
;
385 struct ip_tunnel
*tunnel
;
388 tunnel
= netdev_priv(dev
);
389 iph
= &tunnel
->parms
.iph
;
394 memset(&fl4
, 0, sizeof(fl4
));
395 flowi4_init_output(&fl4
, tunnel
->parms
.link
,
396 be32_to_cpu(tunnel
->parms
.i_key
),
397 RT_TOS(iph
->tos
), RT_SCOPE_UNIVERSE
,
399 iph
->daddr
, iph
->saddr
, 0, 0);
400 rt
= ip_route_output_key(dev_net(dev
), &fl4
);
405 dev
->flags
|= IFF_POINTOPOINT
;
408 if (!tdev
&& tunnel
->parms
.link
)
409 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
412 dev
->hard_header_len
= tdev
->hard_header_len
+
413 sizeof(struct iphdr
);
414 dev
->mtu
= tdev
->mtu
;
416 dev
->iflink
= tunnel
->parms
.link
;
421 vti_tunnel_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
424 struct ip_tunnel_parm p
;
426 struct net
*net
= dev_net(dev
);
427 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
432 if (dev
== ipn
->fb_tunnel_dev
) {
433 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
,
438 t
= vti_tunnel_locate(net
, &p
, 0);
441 t
= netdev_priv(dev
);
442 memcpy(&p
, &t
->parms
, sizeof(p
));
443 p
.i_flags
|= GRE_KEY
| VTI_ISVTI
;
444 p
.o_flags
|= GRE_KEY
;
445 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
452 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
456 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
460 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_IPIP
||
464 t
= vti_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
466 if (dev
!= ipn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
473 if (((dev
->flags
&IFF_POINTOPOINT
) &&
475 (!(dev
->flags
&IFF_POINTOPOINT
) &&
480 t
= netdev_priv(dev
);
481 vti_tunnel_unlink(ipn
, t
);
483 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
484 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
485 t
->parms
.i_key
= p
.i_key
;
486 t
->parms
.o_key
= p
.o_key
;
487 t
->parms
.iph
.protocol
= IPPROTO_IPIP
;
488 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
489 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
490 vti_tunnel_link(ipn
, t
);
491 netdev_state_change(dev
);
497 if (cmd
== SIOCCHGTUNNEL
) {
498 t
->parms
.i_key
= p
.i_key
;
499 t
->parms
.o_key
= p
.o_key
;
500 if (t
->parms
.link
!= p
.link
) {
501 t
->parms
.link
= p
.link
;
502 vti_tunnel_bind_dev(dev
);
503 netdev_state_change(dev
);
506 p
.i_flags
|= GRE_KEY
| VTI_ISVTI
;
507 p
.o_flags
|= GRE_KEY
;
508 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
,
512 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
517 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
520 if (dev
== ipn
->fb_tunnel_dev
) {
522 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
,
527 t
= vti_tunnel_locate(net
, &p
, 0);
531 if (t
->dev
== ipn
->fb_tunnel_dev
)
535 unregister_netdevice(dev
);
547 static int vti_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
549 if (new_mtu
< 68 || new_mtu
> 0xFFF8)
555 static const struct net_device_ops vti_netdev_ops
= {
556 .ndo_init
= vti_tunnel_init
,
557 .ndo_uninit
= vti_tunnel_uninit
,
558 .ndo_start_xmit
= vti_tunnel_xmit
,
559 .ndo_do_ioctl
= vti_tunnel_ioctl
,
560 .ndo_change_mtu
= vti_tunnel_change_mtu
,
561 .ndo_get_stats64
= ip_tunnel_get_stats64
,
564 static void vti_dev_free(struct net_device
*dev
)
566 free_percpu(dev
->tstats
);
570 static void vti_tunnel_setup(struct net_device
*dev
)
572 dev
->netdev_ops
= &vti_netdev_ops
;
573 dev
->destructor
= vti_dev_free
;
575 dev
->type
= ARPHRD_TUNNEL
;
576 dev
->hard_header_len
= LL_MAX_HEADER
+ sizeof(struct iphdr
);
577 dev
->mtu
= ETH_DATA_LEN
;
578 dev
->flags
= IFF_NOARP
;
581 dev
->features
|= NETIF_F_NETNS_LOCAL
;
582 dev
->features
|= NETIF_F_LLTX
;
583 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
586 static int vti_tunnel_init(struct net_device
*dev
)
588 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
591 strcpy(tunnel
->parms
.name
, dev
->name
);
593 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
594 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
596 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
603 static int __net_init
vti_fb_tunnel_init(struct net_device
*dev
)
605 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
606 struct iphdr
*iph
= &tunnel
->parms
.iph
;
607 struct vti_net
*ipn
= net_generic(dev_net(dev
), vti_net_id
);
610 strcpy(tunnel
->parms
.name
, dev
->name
);
613 iph
->protocol
= IPPROTO_IPIP
;
616 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
621 rcu_assign_pointer(ipn
->tunnels_wc
[0], tunnel
);
625 static struct xfrm_tunnel vti_handler __read_mostly
= {
627 .err_handler
= vti_err
,
631 static void vti_destroy_tunnels(struct vti_net
*ipn
, struct list_head
*head
)
635 for (prio
= 1; prio
< 4; prio
++) {
637 for (h
= 0; h
< HASH_SIZE
; h
++) {
640 t
= rtnl_dereference(ipn
->tunnels
[prio
][h
]);
642 unregister_netdevice_queue(t
->dev
, head
);
643 t
= rtnl_dereference(t
->next
);
649 static int __net_init
vti_init_net(struct net
*net
)
652 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
654 ipn
->tunnels
[0] = ipn
->tunnels_wc
;
655 ipn
->tunnels
[1] = ipn
->tunnels_l
;
656 ipn
->tunnels
[2] = ipn
->tunnels_r
;
657 ipn
->tunnels
[3] = ipn
->tunnels_r_l
;
659 ipn
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
),
662 if (!ipn
->fb_tunnel_dev
) {
666 dev_net_set(ipn
->fb_tunnel_dev
, net
);
668 err
= vti_fb_tunnel_init(ipn
->fb_tunnel_dev
);
671 ipn
->fb_tunnel_dev
->rtnl_link_ops
= &vti_link_ops
;
673 err
= register_netdev(ipn
->fb_tunnel_dev
);
679 vti_dev_free(ipn
->fb_tunnel_dev
);
685 static void __net_exit
vti_exit_net(struct net
*net
)
687 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
691 vti_destroy_tunnels(ipn
, &list
);
692 unregister_netdevice_many(&list
);
696 static struct pernet_operations vti_net_ops
= {
697 .init
= vti_init_net
,
698 .exit
= vti_exit_net
,
700 .size
= sizeof(struct vti_net
),
703 static int vti_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
708 static void vti_netlink_parms(struct nlattr
*data
[],
709 struct ip_tunnel_parm
*parms
)
711 memset(parms
, 0, sizeof(*parms
));
713 parms
->iph
.protocol
= IPPROTO_IPIP
;
718 if (data
[IFLA_VTI_LINK
])
719 parms
->link
= nla_get_u32(data
[IFLA_VTI_LINK
]);
721 if (data
[IFLA_VTI_IKEY
])
722 parms
->i_key
= nla_get_be32(data
[IFLA_VTI_IKEY
]);
724 if (data
[IFLA_VTI_OKEY
])
725 parms
->o_key
= nla_get_be32(data
[IFLA_VTI_OKEY
]);
727 if (data
[IFLA_VTI_LOCAL
])
728 parms
->iph
.saddr
= nla_get_be32(data
[IFLA_VTI_LOCAL
]);
730 if (data
[IFLA_VTI_REMOTE
])
731 parms
->iph
.daddr
= nla_get_be32(data
[IFLA_VTI_REMOTE
]);
735 static int vti_newlink(struct net
*src_net
, struct net_device
*dev
,
736 struct nlattr
*tb
[], struct nlattr
*data
[])
738 struct ip_tunnel
*nt
;
739 struct net
*net
= dev_net(dev
);
740 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
744 nt
= netdev_priv(dev
);
745 vti_netlink_parms(data
, &nt
->parms
);
747 if (vti_tunnel_locate(net
, &nt
->parms
, 0))
750 mtu
= vti_tunnel_bind_dev(dev
);
754 err
= register_netdevice(dev
);
759 vti_tunnel_link(ipn
, nt
);
765 static int vti_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
766 struct nlattr
*data
[])
768 struct ip_tunnel
*t
, *nt
;
769 struct net
*net
= dev_net(dev
);
770 struct vti_net
*ipn
= net_generic(net
, vti_net_id
);
771 struct ip_tunnel_parm p
;
774 if (dev
== ipn
->fb_tunnel_dev
)
777 nt
= netdev_priv(dev
);
778 vti_netlink_parms(data
, &p
);
780 t
= vti_tunnel_locate(net
, &p
, 0);
788 vti_tunnel_unlink(ipn
, t
);
789 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
790 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
791 t
->parms
.i_key
= p
.i_key
;
792 t
->parms
.o_key
= p
.o_key
;
793 if (dev
->type
!= ARPHRD_ETHER
) {
794 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
795 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
797 vti_tunnel_link(ipn
, t
);
798 netdev_state_change(dev
);
801 if (t
->parms
.link
!= p
.link
) {
802 t
->parms
.link
= p
.link
;
803 mtu
= vti_tunnel_bind_dev(dev
);
806 netdev_state_change(dev
);
812 static size_t vti_get_size(const struct net_device
*dev
)
823 /* IFLA_VTI_REMOTE */
828 static int vti_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
830 struct ip_tunnel
*t
= netdev_priv(dev
);
831 struct ip_tunnel_parm
*p
= &t
->parms
;
833 nla_put_u32(skb
, IFLA_VTI_LINK
, p
->link
);
834 nla_put_be32(skb
, IFLA_VTI_IKEY
, p
->i_key
);
835 nla_put_be32(skb
, IFLA_VTI_OKEY
, p
->o_key
);
836 nla_put_be32(skb
, IFLA_VTI_LOCAL
, p
->iph
.saddr
);
837 nla_put_be32(skb
, IFLA_VTI_REMOTE
, p
->iph
.daddr
);
842 static const struct nla_policy vti_policy
[IFLA_VTI_MAX
+ 1] = {
843 [IFLA_VTI_LINK
] = { .type
= NLA_U32
},
844 [IFLA_VTI_IKEY
] = { .type
= NLA_U32
},
845 [IFLA_VTI_OKEY
] = { .type
= NLA_U32
},
846 [IFLA_VTI_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
847 [IFLA_VTI_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
850 static struct rtnl_link_ops vti_link_ops __read_mostly
= {
852 .maxtype
= IFLA_VTI_MAX
,
853 .policy
= vti_policy
,
854 .priv_size
= sizeof(struct ip_tunnel
),
855 .setup
= vti_tunnel_setup
,
856 .validate
= vti_tunnel_validate
,
857 .newlink
= vti_newlink
,
858 .changelink
= vti_changelink
,
859 .get_size
= vti_get_size
,
860 .fill_info
= vti_fill_info
,
863 static int __init
vti_init(void)
867 pr_info("IPv4 over IPSec tunneling driver\n");
869 err
= register_pernet_device(&vti_net_ops
);
872 err
= xfrm4_mode_tunnel_input_register(&vti_handler
);
874 unregister_pernet_device(&vti_net_ops
);
875 pr_info(KERN_INFO
"vti init: can't register tunnel\n");
878 err
= rtnl_link_register(&vti_link_ops
);
880 goto rtnl_link_failed
;
885 xfrm4_mode_tunnel_input_deregister(&vti_handler
);
886 unregister_pernet_device(&vti_net_ops
);
890 static void __exit
vti_fini(void)
892 rtnl_link_unregister(&vti_link_ops
);
893 if (xfrm4_mode_tunnel_input_deregister(&vti_handler
))
894 pr_info("vti close: can't deregister tunnel\n");
896 unregister_pernet_device(&vti_net_ops
);
899 module_init(vti_init
);
900 module_exit(vti_fini
);
901 MODULE_LICENSE("GPL");
902 MODULE_ALIAS_RTNL_LINK("vti");
903 MODULE_ALIAS_NETDEV("ip_vti0");