1 /* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
16 * $FreeBSD: src/sys/net/if_tun.c,v 1.74.2.8 2002/02/13 00:43:11 dillon Exp $
17 * $DragonFly: src/sys/net/tun/if_tun.c,v 1.35 2008/04/05 06:28:35 sephe Exp $
20 #include "opt_atalk.h"
22 #include "opt_inet6.h"
25 #include <sys/param.h>
27 #include <sys/systm.h>
29 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/filio.h>
33 #include <sys/sockio.h>
34 #include <sys/thread2.h>
35 #include <sys/ttycom.h>
37 #include <sys/signalvar.h>
38 #include <sys/filedesc.h>
39 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43 #include <sys/malloc.h>
46 #include <net/if_types.h>
47 #include <net/ifq_var.h>
48 #include <net/netisr.h>
49 #include <net/route.h>
52 #include <netinet/in.h>
57 #include "if_tunvar.h"
60 static MALLOC_DEFINE(M_TUN
, "tun", "Tunnel Interface");
62 static void tunattach (void *);
63 PSEUDO_SET(tunattach
, if_tun
);
65 static void tuncreate (cdev_t dev
);
67 #define TUNDEBUG if (tundebug) if_printf
68 static int tundebug
= 0;
69 SYSCTL_INT(_debug
, OID_AUTO
, if_tun_debug
, CTLFLAG_RW
, &tundebug
, 0, "");
71 static int tunoutput (struct ifnet
*, struct mbuf
*, struct sockaddr
*,
73 static int tunifioctl (struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
74 static int tuninit (struct ifnet
*);
75 static void tunstart(struct ifnet
*);
77 static d_open_t tunopen
;
78 static d_close_t tunclose
;
79 static d_read_t tunread
;
80 static d_write_t tunwrite
;
81 static d_ioctl_t tunioctl
;
82 static d_poll_t tunpoll
;
85 static struct dev_ops tun_ops
= {
86 { "tun", CDEV_MAJOR
, 0 },
96 tunattach(void *dummy
)
98 dev_ops_add(&tun_ops
, 0, 0);
102 tuncreate(cdev_t dev
)
104 struct tun_softc
*sc
;
107 dev
= make_dev(&tun_ops
, minor(dev
),
108 UID_UUCP
, GID_DIALER
, 0600, "tun%d", lminor(dev
));
110 MALLOC(sc
, struct tun_softc
*, sizeof(*sc
), M_TUN
, M_WAITOK
| M_ZERO
);
111 sc
->tun_flags
= TUN_INITED
;
114 if_initname(ifp
, "tun", lminor(dev
));
115 ifp
->if_mtu
= TUNMTU
;
116 ifp
->if_ioctl
= tunifioctl
;
117 ifp
->if_output
= tunoutput
;
118 ifp
->if_start
= tunstart
;
119 ifp
->if_flags
= IFF_POINTOPOINT
| IFF_MULTICAST
;
120 ifp
->if_type
= IFT_PPP
;
121 ifq_set_maxlen(&ifp
->if_snd
, ifqmaxlen
);
122 ifq_set_ready(&ifp
->if_snd
);
124 if_attach(ifp
, NULL
);
125 bpfattach(ifp
, DLT_NULL
, sizeof(u_int
));
130 * tunnel open - must be superuser & the device must be
134 tunopen(struct dev_open_args
*ap
)
136 cdev_t dev
= ap
->a_head
.a_dev
;
138 struct tun_softc
*tp
;
141 if ((error
= suser_cred(ap
->a_cred
, 0)) != NULL
)
149 if (tp
->tun_flags
& TUN_OPEN
)
151 tp
->tun_pid
= curproc
->p_pid
;
153 tp
->tun_flags
|= TUN_OPEN
;
154 TUNDEBUG(ifp
, "open\n");
159 * tunclose - close the device - mark i/f down & delete
163 tunclose(struct dev_close_args
*ap
)
165 cdev_t dev
= ap
->a_head
.a_dev
;
166 struct tun_softc
*tp
;
172 tp
->tun_flags
&= ~TUN_OPEN
;
175 /* Junk all pending output. */
176 lwkt_serialize_enter(ifp
->if_serializer
);
177 ifq_purge(&ifp
->if_snd
);
178 lwkt_serialize_exit(ifp
->if_serializer
);
180 if (ifp
->if_flags
& IFF_UP
) {
181 lwkt_serialize_enter(ifp
->if_serializer
);
183 lwkt_serialize_exit(ifp
->if_serializer
);
185 ifp
->if_flags
&= ~IFF_RUNNING
;
186 if_purgeaddrs_nolink(ifp
);
188 funsetown(tp
->tun_sigio
);
189 selwakeup(&tp
->tun_rsel
);
191 TUNDEBUG(ifp
, "closed\n");
196 tuninit(struct ifnet
*ifp
)
198 struct tun_softc
*tp
= ifp
->if_softc
;
199 struct ifaddr_container
*ifac
;
202 TUNDEBUG(ifp
, "tuninit\n");
204 ifp
->if_flags
|= IFF_UP
| IFF_RUNNING
;
205 getmicrotime(&ifp
->if_lastchange
);
207 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
208 struct ifaddr
*ifa
= ifac
->ifa
;
210 if (ifa
->ifa_addr
== NULL
) {
212 /* XXX: Should maybe return straight off? */
215 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
216 struct sockaddr_in
*si
;
218 si
= (struct sockaddr_in
*)ifa
->ifa_addr
;
219 if (si
->sin_addr
.s_addr
)
220 tp
->tun_flags
|= TUN_IASET
;
229 * Process an ioctl request.
234 tunifioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
236 struct ifreq
*ifr
= (struct ifreq
*)data
;
237 struct tun_softc
*tp
= ifp
->if_softc
;
243 ifs
= (struct ifstat
*)data
;
245 ksprintf(ifs
->ascii
+ strlen(ifs
->ascii
),
246 "\tOpened by PID %d\n", tp
->tun_pid
);
249 error
= tuninit(ifp
);
250 TUNDEBUG(ifp
, "address set, error=%d\n", error
);
253 error
= tuninit(ifp
);
254 TUNDEBUG(ifp
, "destination address set, error=%d\n", error
);
257 ifp
->if_mtu
= ifr
->ifr_mtu
;
258 TUNDEBUG(ifp
, "mtu set\n");
271 * tunoutput - queue packets from higher level ready to put out.
276 tunoutput(struct ifnet
*ifp
, struct mbuf
*m0
, struct sockaddr
*dst
,
279 struct tun_softc
*tp
= ifp
->if_softc
;
281 struct altq_pktattr pktattr
;
283 TUNDEBUG(ifp
, "tunoutput\n");
285 if ((tp
->tun_flags
& TUN_READY
) != TUN_READY
) {
286 TUNDEBUG(ifp
, "not ready 0%o\n", tp
->tun_flags
);
292 * if the queueing discipline needs packet classification,
293 * do it before prepending link headers.
295 ifq_classify(&ifp
->if_snd
, m0
, dst
->sa_family
, &pktattr
);
297 /* BPF write needs to be handled specially */
298 if (dst
->sa_family
== AF_UNSPEC
) {
299 dst
->sa_family
= *(mtod(m0
, int *));
300 m0
->m_len
-= sizeof(int);
301 m0
->m_pkthdr
.len
-= sizeof(int);
302 m0
->m_data
+= sizeof(int);
307 * We need to prepend the address family as
310 uint32_t af
= dst
->sa_family
;
312 bpf_ptap(ifp
->if_bpf
, m0
, &af
, sizeof(af
));
315 /* prepend sockaddr? this may abort if the mbuf allocation fails */
316 if (tp
->tun_flags
& TUN_LMODE
) {
317 /* allocate space for sockaddr */
318 M_PREPEND(m0
, dst
->sa_len
, MB_DONTWAIT
);
320 /* if allocation failed drop packet */
322 IF_DROP(&ifp
->if_snd
);
326 bcopy(dst
, m0
->m_data
, dst
->sa_len
);
330 if (tp
->tun_flags
& TUN_IFHEAD
) {
331 /* Prepend the address family */
332 M_PREPEND(m0
, 4, MB_DONTWAIT
);
334 /* if allocation failed drop packet */
336 IF_DROP(&ifp
->if_snd
);
340 *(u_int32_t
*)m0
->m_data
= htonl(dst
->sa_family
);
343 if (dst
->sa_family
!= AF_INET
)
351 error
= ifq_handoff(ifp
, m0
, &pktattr
);
353 ifp
->if_collisions
++;
356 if (tp
->tun_flags
& TUN_RWAIT
) {
357 tp
->tun_flags
&= ~TUN_RWAIT
;
361 if (tp
->tun_flags
& TUN_ASYNC
&& tp
->tun_sigio
)
362 pgsigio(tp
->tun_sigio
, SIGIO
, 0);
363 selwakeup(&tp
->tun_rsel
);
370 * the ops interface is now pretty minimal.
373 tunioctl(struct dev_ioctl_args
*ap
)
375 cdev_t dev
= ap
->a_head
.a_dev
;
376 struct tun_softc
*tp
= dev
->si_drv1
;
377 struct tuninfo
*tunp
;
381 tunp
= (struct tuninfo
*)ap
->a_data
;
382 if (tunp
->mtu
< IF_MINMTU
)
384 tp
->tun_if
.if_mtu
= tunp
->mtu
;
385 tp
->tun_if
.if_type
= tunp
->type
;
386 tp
->tun_if
.if_baudrate
= tunp
->baudrate
;
389 tunp
= (struct tuninfo
*)ap
->a_data
;
390 tunp
->mtu
= tp
->tun_if
.if_mtu
;
391 tunp
->type
= tp
->tun_if
.if_type
;
392 tunp
->baudrate
= tp
->tun_if
.if_baudrate
;
395 tundebug
= *(int *)ap
->a_data
;
398 *(int *)ap
->a_data
= tundebug
;
401 if (*(int *)ap
->a_data
) {
402 tp
->tun_flags
|= TUN_LMODE
;
403 tp
->tun_flags
&= ~TUN_IFHEAD
;
405 tp
->tun_flags
&= ~TUN_LMODE
;
408 if (*(int *)ap
->a_data
) {
409 tp
->tun_flags
|= TUN_IFHEAD
;
410 tp
->tun_flags
&= ~TUN_LMODE
;
412 tp
->tun_flags
&= ~TUN_IFHEAD
;
415 *(int *)ap
->a_data
= (tp
->tun_flags
& TUN_IFHEAD
) ? 1 : 0;
418 /* deny this if UP */
419 if (tp
->tun_if
.if_flags
& IFF_UP
)
422 switch (*(int *)ap
->a_data
& ~IFF_MULTICAST
) {
423 case IFF_POINTOPOINT
:
425 tp
->tun_if
.if_flags
&= ~(IFF_BROADCAST
|IFF_POINTOPOINT
);
426 tp
->tun_if
.if_flags
|= *(int *)ap
->a_data
;
433 tp
->tun_pid
= curproc
->p_pid
;
436 if (*(int *)ap
->a_data
)
437 tp
->tun_flags
|= TUN_ASYNC
;
439 tp
->tun_flags
&= ~TUN_ASYNC
;
442 lwkt_serialize_enter(tp
->tun_if
.if_serializer
);
443 if (!ifq_is_empty(&tp
->tun_if
.if_snd
)) {
446 mb
= ifq_poll(&tp
->tun_if
.if_snd
);
447 for( *(int *)ap
->a_data
= 0; mb
!= 0; mb
= mb
->m_next
)
448 *(int *)ap
->a_data
+= mb
->m_len
;
450 *(int *)ap
->a_data
= 0;
452 lwkt_serialize_exit(tp
->tun_if
.if_serializer
);
455 return (fsetown(*(int *)ap
->a_data
, &tp
->tun_sigio
));
458 *(int *)ap
->a_data
= fgetown(tp
->tun_sigio
);
461 /* This is deprecated, FIOSETOWN should be used instead. */
463 return (fsetown(-(*(int *)ap
->a_data
), &tp
->tun_sigio
));
465 /* This is deprecated, FIOGETOWN should be used instead. */
467 *(int *)ap
->a_data
= -fgetown(tp
->tun_sigio
);
477 * The ops read interface - reads a packet at a time, or at
478 * least as much of a packet as can be read.
481 tunread(struct dev_read_args
*ap
)
483 cdev_t dev
= ap
->a_head
.a_dev
;
484 struct uio
*uio
= ap
->a_uio
;
485 struct tun_softc
*tp
= dev
->si_drv1
;
486 struct ifnet
*ifp
= &tp
->tun_if
;
490 TUNDEBUG(ifp
, "read\n");
491 if ((tp
->tun_flags
& TUN_READY
) != TUN_READY
) {
492 TUNDEBUG(ifp
, "not ready 0%o\n", tp
->tun_flags
);
496 tp
->tun_flags
&= ~TUN_RWAIT
;
498 lwkt_serialize_enter(ifp
->if_serializer
);
500 while ((m0
= ifq_dequeue(&ifp
->if_snd
, NULL
)) == NULL
) {
501 if (ap
->a_ioflag
& IO_NDELAY
) {
502 lwkt_serialize_exit(ifp
->if_serializer
);
505 tp
->tun_flags
|= TUN_RWAIT
;
506 lwkt_serialize_exit(ifp
->if_serializer
);
507 if ((error
= tsleep(tp
, PCATCH
, "tunread", 0)) != 0)
509 lwkt_serialize_enter(ifp
->if_serializer
);
512 lwkt_serialize_exit(ifp
->if_serializer
);
514 while (m0
&& uio
->uio_resid
> 0 && error
== 0) {
515 len
= min(uio
->uio_resid
, m0
->m_len
);
517 error
= uiomove(mtod(m0
, caddr_t
), len
, uio
);
522 TUNDEBUG(ifp
, "Dropping mbuf\n");
529 * the ops write interface - an atomic write is a packet - or else!
532 tunwrite(struct dev_write_args
*ap
)
534 cdev_t dev
= ap
->a_head
.a_dev
;
535 struct uio
*uio
= ap
->a_uio
;
536 struct tun_softc
*tp
= dev
->si_drv1
;
537 struct ifnet
*ifp
= &tp
->tun_if
;
538 struct mbuf
*top
, **mp
, *m
;
539 int error
=0, tlen
, mlen
;
543 TUNDEBUG(ifp
, "tunwrite\n");
545 if (uio
->uio_resid
== 0)
548 if (uio
->uio_resid
< 0 || uio
->uio_resid
> TUNMRU
) {
549 TUNDEBUG(ifp
, "len=%d!\n", uio
->uio_resid
);
552 tlen
= uio
->uio_resid
;
554 /* get a header mbuf */
555 MGETHDR(m
, MB_DONTWAIT
, MT_DATA
);
562 while (error
== 0 && uio
->uio_resid
> 0) {
563 m
->m_len
= min(mlen
, uio
->uio_resid
);
564 error
= uiomove(mtod (m
, caddr_t
), m
->m_len
, uio
);
567 if (uio
->uio_resid
> 0) {
568 MGET (m
, MB_DONTWAIT
, MT_DATA
);
583 top
->m_pkthdr
.len
= tlen
;
584 top
->m_pkthdr
.rcvif
= ifp
;
587 if (tp
->tun_flags
& TUN_IFHEAD
) {
589 * Conveniently, we already have a 4-byte address
590 * family prepended to our packet !
591 * Inconveniently, it's in the wrong byte order !
593 if ((top
= m_pullup(top
, sizeof(family
))) == NULL
)
595 *mtod(top
, u_int32_t
*) =
596 ntohl(*mtod(top
, u_int32_t
*));
597 bpf_mtap(ifp
->if_bpf
, top
);
598 *mtod(top
, u_int32_t
*) =
599 htonl(*mtod(top
, u_int32_t
*));
602 * We need to prepend the address family as
605 static const uint32_t af
= AF_INET
;
607 bpf_ptap(ifp
->if_bpf
, top
, &af
, sizeof(af
));
611 if (tp
->tun_flags
& TUN_IFHEAD
) {
612 if (top
->m_len
< sizeof(family
) &&
613 (top
= m_pullup(top
, sizeof(family
))) == NULL
)
615 family
= ntohl(*mtod(top
, u_int32_t
*));
616 m_adj(top
, sizeof(family
));
620 ifp
->if_ibytes
+= top
->m_pkthdr
.len
;
646 return (EAFNOSUPPORT
);
649 netisr_dispatch(isr
, top
);
654 * tunpoll - the poll interface, this is only useful on reads
655 * really. The write detect always returns true, write never blocks
656 * anyway, it either accepts the packet or drops it.
659 tunpoll(struct dev_poll_args
*ap
)
661 cdev_t dev
= ap
->a_head
.a_dev
;
662 struct tun_softc
*tp
= dev
->si_drv1
;
663 struct ifnet
*ifp
= &tp
->tun_if
;
666 TUNDEBUG(ifp
, "tunpoll\n");
668 lwkt_serialize_enter(ifp
->if_serializer
);
670 if (ap
->a_events
& (POLLIN
| POLLRDNORM
)) {
671 if (!ifq_is_empty(&ifp
->if_snd
)) {
672 TUNDEBUG(ifp
, "tunpoll q=%d\n", ifp
->if_snd
.ifq_len
);
673 revents
|= ap
->a_events
& (POLLIN
| POLLRDNORM
);
675 TUNDEBUG(ifp
, "tunpoll waiting\n");
676 selrecord(curthread
, &tp
->tun_rsel
);
679 if (ap
->a_events
& (POLLOUT
| POLLWRNORM
))
680 revents
|= ap
->a_events
& (POLLOUT
| POLLWRNORM
);
682 lwkt_serialize_exit(ifp
->if_serializer
);
683 ap
->a_events
= revents
;
688 * Start packet transmission on the interface.
689 * when the interface queue is rate-limited by ALTQ,
690 * if_start is needed to drain packets from the queue in order
691 * to notify readers when outgoing packets become ready.
694 tunstart(struct ifnet
*ifp
)
696 struct tun_softc
*tp
= ifp
->if_softc
;
699 if (!ifq_is_enabled(&ifp
->if_snd
))
702 m
= ifq_poll(&ifp
->if_snd
);
704 if (tp
->tun_flags
& TUN_RWAIT
) {
705 tp
->tun_flags
&= ~TUN_RWAIT
;
708 if (tp
->tun_flags
& TUN_ASYNC
&& tp
->tun_sigio
)
709 pgsigio(tp
->tun_sigio
, SIGIO
, 0);
710 selwakeup(&tp
->tun_rsel
);