pre-2.3.4..
[davej-history.git] / net / ipv4 / ipip.c
blob0aeef4a31bb5ef49bf7974f7a49c1a7bc5d4f4b7
1 /*
2 * Linux NET3: IP/IP protocol decoder.
4 * Version: $Id: ipip.c,v 1.26 1999/03/25 10:04:32 davem Exp $
6 * Authors:
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
9 * Fixes:
10 * Alan Cox : Merged and made usable non modular (its so tiny its silly as
11 * a module taking up 2 pages).
12 * Alan Cox : Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
13 * to keep ip_forward happy.
14 * Alan Cox : More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
15 * Kai Schulte : Fixed #defines for IP_FIREWALL->FIREWALL
16 * David Woodhouse : Perform some basic ICMP handling.
17 * IPIP Routing without decapsulation.
18 * Carlos Picoto : GRE over IP support
19 * Alexey Kuznetsov: Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
20 * I do not want to merge them together.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
29 /* tunnel.c: an IP tunnel driver
31 The purpose of this driver is to provide an IP tunnel through
32 which you can tunnel network traffic transparently across subnets.
34 This was written by looking at Nick Holloway's dummy driver
35 Thanks for the great code!
37 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
39 Minor tweaks:
40 Cleaned up the code a little and added some pre-1.3.0 tweaks.
41 dev->hard_header/hard_header_len changed to use no headers.
42 Comments/bracketing tweaked.
43 Made the tunnels use dev->name not tunnel: when error reporting.
44 Added tx_dropped stat
46 -Alan Cox (Alan.Cox@linux.org) 21 March 95
48 Reworked:
49 Changed to tunnel to destination gateway in addition to the
50 tunnel's pointopoint address
51 Almost completely rewritten
52 Note: There is currently no firewall or ICMP handling done.
54 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
58 /* Things I wish I had known when writing the tunnel driver:
60 When the tunnel_xmit() function is called, the skb contains the
61 packet to be sent (plus a great deal of extra info), and dev
62 contains the tunnel device that _we_ are.
64 When we are passed a packet, we are expected to fill in the
65 source address with our source IP address.
67 What is the proper way to allocate, copy and free a buffer?
68 After you allocate it, it is a "0 length" chunk of memory
69 starting at zero. If you want to add headers to the buffer
70 later, you'll have to call "skb_reserve(skb, amount)" with
71 the amount of memory you want reserved. Then, you call
72 "skb_put(skb, amount)" with the amount of space you want in
73 the buffer. skb_put() returns a pointer to the top (#0) of
74 that buffer. skb->len is set to the amount of space you have
75 "allocated" with skb_put(). You can then write up to skb->len
76 bytes to that buffer. If you need more, you can call skb_put()
77 again with the additional amount of space you need. You can
78 find out how much more space you can allocate by calling
79 "skb_tailroom(skb)".
80 Now, to add header space, call "skb_push(skb, header_len)".
81 This creates space at the beginning of the buffer and returns
82 a pointer to this new space. If later you need to strip a
83 header from a buffer, call "skb_pull(skb, header_len)".
84 skb_headroom() will return how much space is left at the top
85 of the buffer (before the main data). Remember, this headroom
86 space must be reserved before the skb_put() function is called.
90 This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
92 For comments look at net/ipv4/ip_gre.c --ANK
96 #include <linux/module.h>
97 #include <linux/types.h>
98 #include <linux/sched.h>
99 #include <linux/kernel.h>
100 #include <asm/uaccess.h>
101 #include <linux/skbuff.h>
102 #include <linux/netdevice.h>
103 #include <linux/in.h>
104 #include <linux/tcp.h>
105 #include <linux/udp.h>
106 #include <linux/if_arp.h>
107 #include <linux/mroute.h>
108 #include <linux/init.h>
110 #include <net/sock.h>
111 #include <net/ip.h>
112 #include <net/icmp.h>
113 #include <net/protocol.h>
114 #include <net/ipip.h>
116 #define HASH_SIZE 16
117 #define HASH(addr) ((addr^(addr>>4))&0xF)
119 static int ipip_fb_tunnel_init(struct device *dev);
120 static int ipip_tunnel_init(struct device *dev);
122 static struct device ipip_fb_tunnel_dev = {
123 NULL, 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NULL, ipip_fb_tunnel_init,
126 static struct ip_tunnel ipip_fb_tunnel = {
127 NULL, &ipip_fb_tunnel_dev, {0, }, 0, 0, 0, 0, 0, 0, 0, {"tunl0", }
130 static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
131 static struct ip_tunnel *tunnels_r[HASH_SIZE];
132 static struct ip_tunnel *tunnels_l[HASH_SIZE];
133 static struct ip_tunnel *tunnels_wc[1];
134 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
136 static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local)
138 unsigned h0 = HASH(remote);
139 unsigned h1 = HASH(local);
140 struct ip_tunnel *t;
142 for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
143 if (local == t->parms.iph.saddr &&
144 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
145 return t;
147 for (t = tunnels_r[h0]; t; t = t->next) {
148 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
149 return t;
151 for (t = tunnels_l[h1]; t; t = t->next) {
152 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
153 return t;
155 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
156 return t;
157 return NULL;
160 static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
162 u32 remote = t->parms.iph.daddr;
163 u32 local = t->parms.iph.saddr;
164 unsigned h = 0;
165 int prio = 0;
167 if (remote) {
168 prio |= 2;
169 h ^= HASH(remote);
171 if (local) {
172 prio |= 1;
173 h ^= HASH(local);
175 return &tunnels[prio][h];
179 static void ipip_tunnel_unlink(struct ip_tunnel *t)
181 struct ip_tunnel **tp;
183 for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) {
184 if (t == *tp) {
185 *tp = t->next;
186 synchronize_bh();
187 break;
192 static void ipip_tunnel_link(struct ip_tunnel *t)
194 struct ip_tunnel **tp = ipip_bucket(t);
196 t->next = *tp;
197 wmb();
198 *tp = t;
201 struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
203 u32 remote = parms->iph.daddr;
204 u32 local = parms->iph.saddr;
205 struct ip_tunnel *t, **tp, *nt;
206 struct device *dev;
207 unsigned h = 0;
208 int prio = 0;
210 if (remote) {
211 prio |= 2;
212 h ^= HASH(remote);
214 if (local) {
215 prio |= 1;
216 h ^= HASH(local);
218 for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
219 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
220 return t;
222 if (!create)
223 return NULL;
225 MOD_INC_USE_COUNT;
226 dev = kmalloc(sizeof(*dev) + sizeof(*t), GFP_KERNEL);
227 if (dev == NULL) {
228 MOD_DEC_USE_COUNT;
229 return NULL;
231 memset(dev, 0, sizeof(*dev) + sizeof(*t));
232 dev->priv = (void*)(dev+1);
233 nt = (struct ip_tunnel*)dev->priv;
234 nt->dev = dev;
235 dev->name = nt->parms.name;
236 dev->init = ipip_tunnel_init;
237 memcpy(&nt->parms, parms, sizeof(*parms));
238 if (dev->name[0] == 0) {
239 int i;
240 for (i=1; i<100; i++) {
241 sprintf(dev->name, "tunl%d", i);
242 if (dev_get(dev->name) == NULL)
243 break;
245 if (i==100)
246 goto failed;
247 memcpy(parms->name, dev->name, IFNAMSIZ);
249 if (register_netdevice(dev) < 0)
250 goto failed;
252 ipip_tunnel_link(nt);
253 /* Do not decrement MOD_USE_COUNT here. */
254 return nt;
256 failed:
257 kfree(dev);
258 MOD_DEC_USE_COUNT;
259 return NULL;
263 static void ipip_tunnel_destroy(struct device *dev)
265 if (dev == &ipip_fb_tunnel_dev) {
266 tunnels_wc[0] = NULL;
267 synchronize_bh();
268 } else {
269 ipip_tunnel_unlink((struct ip_tunnel*)dev->priv);
270 kfree(dev);
271 MOD_DEC_USE_COUNT;
275 void ipip_err(struct sk_buff *skb, unsigned char *dp, int len)
277 #ifndef I_WISH_WORLD_WERE_PERFECT
279 /* It is not :-( All the routers (except for Linux) return only
280 8 bytes of packet payload. It means, that precise relaying of
281 ICMP in the real Internet is absolutely infeasible.
283 struct iphdr *iph = (struct iphdr*)dp;
284 int type = skb->h.icmph->type;
285 int code = skb->h.icmph->code;
286 struct ip_tunnel *t;
288 if (len < sizeof(struct iphdr))
289 return;
291 switch (type) {
292 default:
293 case ICMP_PARAMETERPROB:
294 return;
296 case ICMP_DEST_UNREACH:
297 switch (code) {
298 case ICMP_SR_FAILED:
299 case ICMP_PORT_UNREACH:
300 /* Impossible event. */
301 return;
302 case ICMP_FRAG_NEEDED:
303 /* Soft state for pmtu is maintained by IP core. */
304 return;
305 default:
306 /* All others are translated to HOST_UNREACH.
307 rfc2003 contains "deep thoughts" about NET_UNREACH,
308 I believe they are just ether pollution. --ANK
310 break;
312 break;
313 case ICMP_TIME_EXCEEDED:
314 if (code != ICMP_EXC_TTL)
315 return;
316 break;
319 t = ipip_tunnel_lookup(iph->daddr, iph->saddr);
320 if (t == NULL || t->parms.iph.daddr == 0)
321 return;
322 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
323 return;
325 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
326 t->err_count++;
327 else
328 t->err_count = 1;
329 t->err_time = jiffies;
330 return;
331 #else
332 struct iphdr *iph = (struct iphdr*)dp;
333 int hlen = iph->ihl<<2;
334 struct iphdr *eiph;
335 int type = skb->h.icmph->type;
336 int code = skb->h.icmph->code;
337 int rel_type = 0;
338 int rel_code = 0;
339 int rel_info = 0;
340 struct sk_buff *skb2;
341 struct rtable *rt;
343 if (len < hlen + sizeof(struct iphdr))
344 return;
345 eiph = (struct iphdr*)(dp + hlen);
347 switch (type) {
348 default:
349 return;
350 case ICMP_PARAMETERPROB:
351 if (skb->h.icmph->un.gateway < hlen)
352 return;
354 /* So... This guy found something strange INSIDE encapsulated
355 packet. Well, he is fool, but what can we do ?
357 rel_type = ICMP_PARAMETERPROB;
358 rel_info = skb->h.icmph->un.gateway - hlen;
359 break;
361 case ICMP_DEST_UNREACH:
362 switch (code) {
363 case ICMP_SR_FAILED:
364 case ICMP_PORT_UNREACH:
365 /* Impossible event. */
366 return;
367 case ICMP_FRAG_NEEDED:
368 /* And it is the only really necesary thing :-) */
369 rel_info = ntohs(skb->h.icmph->un.frag.mtu);
370 if (rel_info < hlen+68)
371 return;
372 rel_info -= hlen;
373 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
374 if (rel_info > ntohs(eiph->tot_len))
375 return;
376 break;
377 default:
378 /* All others are translated to HOST_UNREACH.
379 rfc2003 contains "deep thoughts" about NET_UNREACH,
380 I believe, it is just ether pollution. --ANK
382 rel_type = ICMP_DEST_UNREACH;
383 rel_code = ICMP_HOST_UNREACH;
384 break;
386 break;
387 case ICMP_TIME_EXCEEDED:
388 if (code != ICMP_EXC_TTL)
389 return;
390 break;
393 /* Prepare fake skb to feed it to icmp_send */
394 skb2 = skb_clone(skb, GFP_ATOMIC);
395 if (skb2 == NULL)
396 return;
397 dst_release(skb2->dst);
398 skb2->dst = NULL;
399 skb_pull(skb2, skb->data - (u8*)eiph);
400 skb2->nh.raw = skb2->data;
402 /* Try to guess incoming interface */
403 if (ip_route_output(&rt, eiph->saddr, 0, RT_TOS(eiph->tos), 0)) {
404 kfree_skb(skb2);
405 return;
407 skb2->dev = rt->u.dst.dev;
409 /* route "incoming" packet */
410 if (rt->rt_flags&RTCF_LOCAL) {
411 ip_rt_put(rt);
412 rt = NULL;
413 if (ip_route_output(&rt, eiph->daddr, eiph->saddr, eiph->tos, 0) ||
414 rt->u.dst.dev->type != ARPHRD_IPGRE) {
415 ip_rt_put(rt);
416 kfree_skb(skb2);
417 return;
419 } else {
420 ip_rt_put(rt);
421 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
422 skb2->dst->dev->type != ARPHRD_IPGRE) {
423 kfree_skb(skb2);
424 return;
428 /* change mtu on this route */
429 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
430 if (rel_info > skb2->dst->pmtu) {
431 kfree_skb(skb2);
432 return;
434 skb2->dst->pmtu = rel_info;
435 rel_info = htonl(rel_info);
436 } else if (type == ICMP_TIME_EXCEEDED) {
437 struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
438 if (t->parms.iph.ttl) {
439 rel_type = ICMP_DEST_UNREACH;
440 rel_code = ICMP_HOST_UNREACH;
444 icmp_send(skb2, rel_type, rel_code, rel_info);
445 kfree_skb(skb2);
446 return;
447 #endif
450 int ipip_rcv(struct sk_buff *skb, unsigned short len)
452 struct iphdr *iph;
453 struct ip_tunnel *tunnel;
455 iph = skb->nh.iph;
456 skb->mac.raw = skb->nh.raw;
457 skb->nh.raw = skb_pull(skb, skb->h.raw - skb->data);
458 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
459 skb->protocol = __constant_htons(ETH_P_IP);
460 skb->ip_summed = 0;
461 skb->pkt_type = PACKET_HOST;
463 if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
464 tunnel->stat.rx_packets++;
465 tunnel->stat.rx_bytes += skb->len;
466 skb->dev = tunnel->dev;
467 dst_release(skb->dst);
468 skb->dst = NULL;
469 netif_rx(skb);
470 return 0;
473 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
474 kfree_skb(skb);
475 return 0;
479 * This function assumes it is being called from dev_queue_xmit()
480 * and that skb is filled properly by that function.
483 static int ipip_tunnel_xmit(struct sk_buff *skb, struct device *dev)
485 struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
486 struct net_device_stats *stats = &tunnel->stat;
487 struct iphdr *tiph = &tunnel->parms.iph;
488 u8 tos = tunnel->parms.iph.tos;
489 u16 df = tiph->frag_off;
490 struct rtable *rt; /* Route to the other host */
491 struct device *tdev; /* Device to other host */
492 struct iphdr *old_iph = skb->nh.iph;
493 struct iphdr *iph; /* Our new IP header */
494 int max_headroom; /* The extra header space needed */
495 u32 dst = tiph->daddr;
496 int mtu;
498 if (tunnel->recursion++) {
499 tunnel->stat.collisions++;
500 goto tx_error;
503 if (skb->protocol != __constant_htons(ETH_P_IP))
504 goto tx_error;
506 if (tos&1)
507 tos = old_iph->tos;
509 if (!dst) {
510 /* NBMA tunnel */
511 if ((rt = (struct rtable*)skb->dst) == NULL) {
512 tunnel->stat.tx_fifo_errors++;
513 goto tx_error;
515 if ((dst = rt->rt_gateway) == 0)
516 goto tx_error_icmp;
519 if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
520 tunnel->stat.tx_carrier_errors++;
521 goto tx_error_icmp;
523 tdev = rt->u.dst.dev;
525 if (tdev == dev) {
526 ip_rt_put(rt);
527 tunnel->stat.collisions++;
528 goto tx_error;
531 mtu = rt->u.dst.pmtu - sizeof(struct iphdr);
532 if (mtu < 68) {
533 tunnel->stat.collisions++;
534 ip_rt_put(rt);
535 goto tx_error;
537 if (skb->dst && mtu < skb->dst->pmtu)
538 skb->dst->pmtu = mtu;
540 df |= (old_iph->frag_off&__constant_htons(IP_DF));
542 if ((old_iph->frag_off&__constant_htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
543 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
544 ip_rt_put(rt);
545 goto tx_error;
548 if (tunnel->err_count > 0) {
549 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
550 tunnel->err_count--;
551 dst_link_failure(skb);
552 } else
553 tunnel->err_count = 0;
556 skb->h.raw = skb->nh.raw;
559 * Okay, now see if we can stuff it in the buffer as-is.
561 max_headroom = (((tdev->hard_header_len+15)&~15)+sizeof(struct iphdr));
563 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
564 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
565 if (!new_skb) {
566 ip_rt_put(rt);
567 stats->tx_dropped++;
568 dev_kfree_skb(skb);
569 tunnel->recursion--;
570 return 0;
572 if (skb->sk)
573 skb_set_owner_w(new_skb, skb->sk);
574 dev_kfree_skb(skb);
575 skb = new_skb;
578 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
579 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
580 dst_release(skb->dst);
581 skb->dst = &rt->u.dst;
584 * Push down and install the IPIP header.
587 iph = skb->nh.iph;
588 iph->version = 4;
589 iph->ihl = sizeof(struct iphdr)>>2;
590 iph->frag_off = df;
591 iph->protocol = IPPROTO_IPIP;
592 iph->tos = tos;
593 iph->daddr = rt->rt_dst;
594 iph->saddr = rt->rt_src;
596 if ((iph->ttl = tiph->ttl) == 0)
597 iph->ttl = old_iph->ttl;
599 iph->tot_len = htons(skb->len);
600 iph->id = htons(ip_id_count++);
601 ip_send_check(iph);
603 stats->tx_bytes += skb->len;
604 stats->tx_packets++;
605 ip_send(skb);
606 tunnel->recursion--;
607 return 0;
609 tx_error_icmp:
610 dst_link_failure(skb);
611 tx_error:
612 stats->tx_errors++;
613 dev_kfree_skb(skb);
614 tunnel->recursion--;
615 return 0;
618 static int
619 ipip_tunnel_ioctl (struct device *dev, struct ifreq *ifr, int cmd)
621 int err = 0;
622 struct ip_tunnel_parm p;
623 struct ip_tunnel *t;
625 MOD_INC_USE_COUNT;
627 switch (cmd) {
628 case SIOCGETTUNNEL:
629 t = NULL;
630 if (dev == &ipip_fb_tunnel_dev) {
631 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
632 err = -EFAULT;
633 break;
635 t = ipip_tunnel_locate(&p, 0);
637 if (t == NULL)
638 t = (struct ip_tunnel*)dev->priv;
639 memcpy(&p, &t->parms, sizeof(p));
640 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
641 err = -EFAULT;
642 break;
644 case SIOCADDTUNNEL:
645 case SIOCCHGTUNNEL:
646 err = -EPERM;
647 if (!capable(CAP_NET_ADMIN))
648 goto done;
650 err = -EFAULT;
651 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
652 goto done;
654 err = -EINVAL;
655 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
656 p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
657 goto done;
658 if (p.iph.ttl)
659 p.iph.frag_off |= __constant_htons(IP_DF);
661 t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
663 if (dev != &ipip_fb_tunnel_dev && cmd == SIOCCHGTUNNEL &&
664 t != &ipip_fb_tunnel) {
665 if (t != NULL) {
666 if (t->dev != dev) {
667 err = -EEXIST;
668 break;
670 } else {
671 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
672 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
673 err = -EINVAL;
674 break;
676 t = (struct ip_tunnel*)dev->priv;
677 start_bh_atomic();
678 ipip_tunnel_unlink(t);
679 t->parms.iph.saddr = p.iph.saddr;
680 t->parms.iph.daddr = p.iph.daddr;
681 memcpy(dev->dev_addr, &p.iph.saddr, 4);
682 memcpy(dev->broadcast, &p.iph.daddr, 4);
683 ipip_tunnel_link(t);
684 end_bh_atomic();
685 netdev_state_change(dev);
689 if (t) {
690 err = 0;
691 if (cmd == SIOCCHGTUNNEL) {
692 t->parms.iph.ttl = p.iph.ttl;
693 t->parms.iph.tos = p.iph.tos;
694 t->parms.iph.frag_off = p.iph.frag_off;
696 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
697 err = -EFAULT;
698 } else
699 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
700 break;
702 case SIOCDELTUNNEL:
703 err = -EPERM;
704 if (!capable(CAP_NET_ADMIN))
705 goto done;
707 if (dev == &ipip_fb_tunnel_dev) {
708 err = -EFAULT;
709 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
710 goto done;
711 err = -ENOENT;
712 if ((t = ipip_tunnel_locate(&p, 0)) == NULL)
713 goto done;
714 err = -EPERM;
715 if (t == &ipip_fb_tunnel)
716 goto done;
718 err = unregister_netdevice(dev);
719 break;
721 default:
722 err = -EINVAL;
725 done:
726 MOD_DEC_USE_COUNT;
727 return err;
730 static struct net_device_stats *ipip_tunnel_get_stats(struct device *dev)
732 return &(((struct ip_tunnel*)dev->priv)->stat);
735 static int ipip_tunnel_change_mtu(struct device *dev, int new_mtu)
737 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
738 return -EINVAL;
739 dev->mtu = new_mtu;
740 return 0;
743 static void ipip_tunnel_init_gen(struct device *dev)
745 struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
747 dev->destructor = ipip_tunnel_destroy;
748 dev->hard_start_xmit = ipip_tunnel_xmit;
749 dev->get_stats = ipip_tunnel_get_stats;
750 dev->do_ioctl = ipip_tunnel_ioctl;
751 dev->change_mtu = ipip_tunnel_change_mtu;
753 dev_init_buffers(dev);
755 dev->type = ARPHRD_TUNNEL;
756 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
757 dev->mtu = 1500 - sizeof(struct iphdr);
758 dev->flags = IFF_NOARP;
759 dev->iflink = 0;
760 dev->addr_len = 4;
761 memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
762 memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
765 static int ipip_tunnel_init(struct device *dev)
767 struct device *tdev = NULL;
768 struct ip_tunnel *tunnel;
769 struct iphdr *iph;
771 tunnel = (struct ip_tunnel*)dev->priv;
772 iph = &tunnel->parms.iph;
774 ipip_tunnel_init_gen(dev);
776 if (iph->daddr) {
777 struct rtable *rt;
778 if (!ip_route_output(&rt, iph->daddr, iph->saddr, RT_TOS(iph->tos), tunnel->parms.link)) {
779 tdev = rt->u.dst.dev;
780 ip_rt_put(rt);
782 dev->flags |= IFF_POINTOPOINT;
785 if (!tdev && tunnel->parms.link)
786 tdev = dev_get_by_index(tunnel->parms.link);
788 if (tdev) {
789 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
790 dev->mtu = tdev->mtu - sizeof(struct iphdr);
792 dev->iflink = tunnel->parms.link;
794 return 0;
797 #ifdef MODULE
798 static int ipip_fb_tunnel_open(struct device *dev)
800 MOD_INC_USE_COUNT;
801 return 0;
804 static int ipip_fb_tunnel_close(struct device *dev)
806 MOD_DEC_USE_COUNT;
807 return 0;
809 #endif
811 __initfunc(int ipip_fb_tunnel_init(struct device *dev))
813 struct iphdr *iph;
815 ipip_tunnel_init_gen(dev);
816 #ifdef MODULE
817 dev->open = ipip_fb_tunnel_open;
818 dev->stop = ipip_fb_tunnel_close;
819 #endif
821 iph = &ipip_fb_tunnel.parms.iph;
822 iph->version = 4;
823 iph->protocol = IPPROTO_IPIP;
824 iph->ihl = 5;
826 tunnels_wc[0] = &ipip_fb_tunnel;
827 return 0;
830 static struct inet_protocol ipip_protocol = {
831 ipip_rcv, /* IPIP handler */
832 ipip_err, /* TUNNEL error control */
833 0, /* next */
834 IPPROTO_IPIP, /* protocol ID */
835 0, /* copy */
836 NULL, /* data */
837 "IPIP" /* name */
840 #ifdef MODULE
841 int init_module(void)
842 #else
843 __initfunc(int ipip_init(void))
844 #endif
846 printk(KERN_INFO "IPv4 over IPv4 tunneling driver\n");
848 ipip_fb_tunnel_dev.priv = (void*)&ipip_fb_tunnel;
849 ipip_fb_tunnel_dev.name = ipip_fb_tunnel.parms.name;
850 #ifdef MODULE
851 register_netdev(&ipip_fb_tunnel_dev);
852 #else
853 register_netdevice(&ipip_fb_tunnel_dev);
854 #endif
856 inet_add_protocol(&ipip_protocol);
857 return 0;
860 #ifdef MODULE
862 void cleanup_module(void)
864 if ( inet_del_protocol(&ipip_protocol) < 0 )
865 printk(KERN_INFO "ipip close: can't remove protocol\n");
867 unregister_netdevice(&ipip_fb_tunnel_dev);
870 #endif