2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
19 * Daniel Podlejski <underley@underley.eu.org>
20 * Modifications for 2.3.99-pre5 kernel.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/kernel.h>
29 #include <linux/major.h>
30 #include <linux/slab.h>
31 #include <linux/poll.h>
32 #include <linux/fcntl.h>
33 #include <linux/init.h>
34 #include <linux/random.h>
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/miscdevice.h>
39 #include <linux/rtnetlink.h>
41 #include <linux/if_arp.h>
42 #include <linux/if_ether.h>
43 #include <linux/if_tun.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
52 /* Network device part of the driver */
54 /* Net device open. */
55 static int tun_net_open(struct net_device
*dev
)
57 netif_start_queue(dev
);
61 /* Net device close. */
62 static int tun_net_close(struct net_device
*dev
)
64 netif_stop_queue(dev
);
68 /* Net device start xmit */
69 static int tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
71 struct tun_struct
*tun
= (struct tun_struct
*)dev
->priv
;
73 DBG(KERN_INFO
"%s: tun_net_xmit %d\n", tun
->name
, skb
->len
);
75 /* Drop packet if interface is not attached */
80 if (!(tun
->flags
& TUN_ONE_QUEUE
)) {
81 /* Normal queueing mode.
82 * Packet scheduler handles dropping. */
83 if (skb_queue_len(&tun
->readq
) >= TUN_READQ_SIZE
)
84 netif_stop_queue(dev
);
87 * Driver handles dropping itself. */
88 if (skb_queue_len(&tun
->readq
) >= dev
->tx_queue_len
)
91 skb_queue_tail(&tun
->readq
, skb
);
93 /* Notify and wake up reader process */
94 if (tun
->flags
& TUN_FASYNC
)
95 kill_fasync(&tun
->fasync
, SIGIO
, POLL_IN
);
96 wake_up_interruptible(&tun
->read_wait
);
100 tun
->stats
.tx_dropped
++;
105 static void tun_net_mclist(struct net_device
*dev
)
107 /* Nothing to do for multicast filters.
108 * We always accept all frames. */
112 static struct net_device_stats
*tun_net_stats(struct net_device
*dev
)
114 struct tun_struct
*tun
= (struct tun_struct
*)dev
->priv
;
118 /* Initialize net device. */
119 int tun_net_init(struct net_device
*dev
)
121 struct tun_struct
*tun
= (struct tun_struct
*)dev
->priv
;
123 DBG(KERN_INFO
"%s: tun_net_init\n", tun
->name
);
125 switch (tun
->flags
& TUN_TYPE_MASK
) {
127 /* Point-to-Point TUN Device */
128 dev
->hard_header_len
= 0;
132 /* Type PPP seems most suitable */
133 dev
->type
= ARPHRD_PPP
;
134 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
135 dev
->tx_queue_len
= 10;
139 /* Ethernet TAP Device */
140 dev
->set_multicast_list
= tun_net_mclist
;
142 /* Generate random Ethernet address. */
143 *(u16
*)dev
->dev_addr
= htons(0x00FF);
144 get_random_bytes(dev
->dev_addr
+ sizeof(u16
), 4);
153 /* Character device part */
156 static unsigned int tun_chr_poll(struct file
*file
, poll_table
* wait
)
158 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
159 unsigned int mask
= POLLOUT
| POLLWRNORM
;
164 DBG(KERN_INFO
"%s: tun_chr_poll\n", tun
->name
);
166 poll_wait(file
, &tun
->read_wait
, wait
);
168 if (skb_queue_len(&tun
->readq
))
169 mask
|= POLLIN
| POLLRDNORM
;
174 /* Get packet from user space buffer(already verified) */
175 static __inline__ ssize_t
tun_get_user(struct tun_struct
*tun
, struct iovec
*iv
, size_t count
)
177 struct tun_pi pi
= { 0, __constant_htons(ETH_P_IP
) };
181 if (!(tun
->flags
& TUN_NO_PI
)) {
182 if ((len
-= sizeof(pi
)) > len
)
185 memcpy_fromiovec((void *)&pi
, iv
, sizeof(pi
));
188 if (!(skb
= alloc_skb(len
+ 2, GFP_KERNEL
))) {
189 tun
->stats
.rx_dropped
++;
194 memcpy_fromiovec(skb_put(skb
, len
), iv
, len
);
197 switch (tun
->flags
& TUN_TYPE_MASK
) {
199 skb
->mac
.raw
= skb
->data
;
200 skb
->protocol
= pi
.proto
;
203 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
207 if (tun
->flags
& TUN_NOCHECKSUM
)
208 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
212 tun
->stats
.rx_packets
++;
213 tun
->stats
.rx_bytes
+= len
;
219 static ssize_t
tun_chr_writev(struct file
* file
, const struct iovec
*iv
,
220 unsigned long count
, loff_t
*pos
)
222 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
229 DBG(KERN_INFO
"%s: tun_chr_write %d\n", tun
->name
, count
);
231 for (i
= 0, len
= 0; i
< count
; i
++) {
232 if (verify_area(VERIFY_READ
, iv
[i
].iov_base
, iv
[i
].iov_len
))
234 len
+= iv
[i
].iov_len
;
237 return tun_get_user(tun
, (struct iovec
*) iv
, len
);
241 static ssize_t
tun_chr_write(struct file
* file
, const char * buf
,
242 size_t count
, loff_t
*pos
)
244 struct iovec iv
= { (void *) buf
, count
};
245 return tun_chr_writev(file
, &iv
, 1, pos
);
248 /* Put packet to the user space buffer (already verified) */
249 static __inline__ ssize_t
tun_put_user(struct tun_struct
*tun
,
251 struct iovec
*iv
, int len
)
253 struct tun_pi pi
= { 0, skb
->protocol
};
256 if (!(tun
->flags
& TUN_NO_PI
)) {
257 if ((len
-= sizeof(pi
)) < 0)
260 if (len
< skb
->len
) {
261 /* Packet will be striped */
262 pi
.flags
|= TUN_PKT_STRIP
;
265 memcpy_toiovec(iv
, (void *) &pi
, sizeof(pi
));
269 len
= min_t(int, skb
->len
, len
);
271 skb_copy_datagram_iovec(skb
, 0, iv
, len
);
274 tun
->stats
.tx_packets
++;
275 tun
->stats
.tx_bytes
+= len
;
281 static ssize_t
tun_chr_readv(struct file
*file
, const struct iovec
*iv
,
282 unsigned long count
, loff_t
*pos
)
284 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
285 DECLARE_WAITQUEUE(wait
, current
);
287 ssize_t len
, ret
= 0;
293 DBG(KERN_INFO
"%s: tun_chr_read\n", tun
->name
);
295 for (i
= 0, len
= 0; i
< count
; i
++) {
296 if (verify_area(VERIFY_WRITE
, iv
[i
].iov_base
, iv
[i
].iov_len
))
298 len
+= iv
[i
].iov_len
;
303 add_wait_queue(&tun
->read_wait
, &wait
);
305 current
->state
= TASK_INTERRUPTIBLE
;
307 /* Read frames from the queue */
308 if (!(skb
=skb_dequeue(&tun
->readq
))) {
309 if (file
->f_flags
& O_NONBLOCK
) {
313 if (signal_pending(current
)) {
318 /* Nothing to read, let's sleep */
322 netif_start_queue(tun
->dev
);
324 ret
= tun_put_user(tun
, skb
, (struct iovec
*) iv
, len
);
330 current
->state
= TASK_RUNNING
;
331 remove_wait_queue(&tun
->read_wait
, &wait
);
337 static ssize_t
tun_chr_read(struct file
* file
, char * buf
,
338 size_t count
, loff_t
*pos
)
340 struct iovec iv
= { buf
, count
};
341 return tun_chr_readv(file
, &iv
, 1, pos
);
344 static void tun_setup(struct net_device
*dev
)
346 struct tun_struct
*tun
= dev
->priv
;
348 skb_queue_head_init(&tun
->readq
);
349 init_waitqueue_head(&tun
->read_wait
);
352 dev
->init
= tun_net_init
;
353 tun
->name
= dev
->name
;
354 SET_MODULE_OWNER(dev
);
355 dev
->open
= tun_net_open
;
356 dev
->hard_start_xmit
= tun_net_xmit
;
357 dev
->stop
= tun_net_close
;
358 dev
->get_stats
= tun_net_stats
;
359 dev
->destructor
= (void (*)(struct net_device
*))kfree
;
362 static int tun_set_iff(struct file
*file
, struct ifreq
*ifr
)
364 struct tun_struct
*tun
;
365 struct net_device
*dev
;
368 dev
= __dev_get_by_name(ifr
->ifr_name
);
373 if (dev
->init
!= tun_net_init
|| tun
->attached
)
376 /* Check permissions */
377 if (tun
->owner
!= -1)
378 if (current
->euid
!= tun
->owner
&& !capable(CAP_NET_ADMIN
))
382 unsigned long flags
= 0;
387 if (ifr
->ifr_flags
& IFF_TUN
) {
389 flags
|= TUN_TUN_DEV
;
391 } else if (ifr
->ifr_flags
& IFF_TAP
) {
393 flags
|= TUN_TAP_DEV
;
399 name
= ifr
->ifr_name
;
401 dev
= alloc_netdev(sizeof(struct tun_struct
), name
,
410 if (strchr(dev
->name
, '%')) {
411 err
= dev_alloc_name(dev
, dev
->name
);
418 if ((err
= register_netdevice(tun
->dev
))) {
425 DBG(KERN_INFO
"%s: tun_set_iff\n", tun
->name
);
427 if (ifr
->ifr_flags
& IFF_NO_PI
)
428 tun
->flags
|= TUN_NO_PI
;
430 if (ifr
->ifr_flags
& IFF_ONE_QUEUE
)
431 tun
->flags
|= TUN_ONE_QUEUE
;
433 file
->private_data
= tun
;
436 strcpy(ifr
->ifr_name
, tun
->name
);
442 static int tun_chr_ioctl(struct inode
*inode
, struct file
*file
,
443 unsigned int cmd
, unsigned long arg
)
445 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
447 if (cmd
== TUNSETIFF
&& !tun
) {
451 if (copy_from_user(&ifr
, (void *)arg
, sizeof(ifr
)))
453 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
456 err
= tun_set_iff(file
, &ifr
);
462 copy_to_user((void *)arg
, &ifr
, sizeof(ifr
));
469 DBG(KERN_INFO
"%s: tun_chr_ioctl cmd %d\n", tun
->name
, cmd
);
473 /* Disable/Enable checksum */
475 tun
->flags
|= TUN_NOCHECKSUM
;
477 tun
->flags
&= ~TUN_NOCHECKSUM
;
479 DBG(KERN_INFO
"%s: checksum %s\n",
480 tun
->name
, arg
? "disabled" : "enabled");
484 /* Disable/Enable persist mode */
486 tun
->flags
|= TUN_PERSIST
;
488 tun
->flags
&= ~TUN_PERSIST
;
490 DBG(KERN_INFO
"%s: persist %s\n",
491 tun
->name
, arg
? "disabled" : "enabled");
495 /* Set owner of the device */
496 tun
->owner
= (uid_t
) arg
;
498 DBG(KERN_INFO
"%s: owner set to %d\n", tun
->owner
);
514 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
516 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
522 DBG(KERN_INFO
"%s: tun_chr_fasync %d\n", tun
->name
, on
);
524 if ((ret
= fasync_helper(fd
, file
, on
, &tun
->fasync
)) < 0)
528 ret
= f_setown(file
, current
->pid
, 0);
531 tun
->flags
|= TUN_FASYNC
;
533 tun
->flags
&= ~TUN_FASYNC
;
538 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
540 DBG1(KERN_INFO
"tunX: tun_chr_open\n");
541 file
->private_data
= NULL
;
545 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
547 struct tun_struct
*tun
= (struct tun_struct
*)file
->private_data
;
552 DBG(KERN_INFO
"%s: tun_chr_close\n", tun
->name
);
554 tun_chr_fasync(-1, file
, 0);
558 /* Detach from net device */
559 file
->private_data
= NULL
;
562 /* Drop read queue */
563 skb_queue_purge(&tun
->readq
);
565 if (!(tun
->flags
& TUN_PERSIST
))
566 unregister_netdevice(tun
->dev
);
573 static struct file_operations tun_fops
= {
574 .owner
= THIS_MODULE
,
576 .read
= tun_chr_read
,
577 .readv
= tun_chr_readv
,
578 .write
= tun_chr_write
,
579 .writev
= tun_chr_writev
,
580 .poll
= tun_chr_poll
,
581 .ioctl
= tun_chr_ioctl
,
582 .open
= tun_chr_open
,
583 .release
= tun_chr_close
,
584 .fasync
= tun_chr_fasync
587 static struct miscdevice tun_miscdev
= {
593 int __init
tun_init(void)
595 printk(KERN_INFO
"Universal TUN/TAP device driver %s "
596 "(C)1999-2002 Maxim Krasnyansky\n", TUN_VER
);
598 if (misc_register(&tun_miscdev
)) {
599 printk(KERN_ERR
"tun: Can't register misc device %d\n", TUN_MINOR
);
606 void tun_cleanup(void)
608 misc_deregister(&tun_miscdev
);
611 module_init(tun_init
);
612 module_exit(tun_cleanup
);
613 MODULE_LICENSE("GPL");