xio: added create empty file routine
[netsniff-ng.git] / src / ct_server.c
blob46ecf33911c2bbac93f55c31556b36b21159af32
1 /*
2 * curvetun - the cipherspace wormhole creator
3 * Part of the netsniff-ng project
4 * By Daniel Borkmann <daniel@netsniff-ng.org>
5 * Copyright 2011 Daniel Borkmann <daniel@netsniff-ng.org>,
6 * Subject to the GPL, version 2.
7 */
9 #define _GNU_SOURCE
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <fcntl.h>
13 #include <errno.h>
14 #include <string.h>
15 #include <unistd.h>
16 #include <pthread.h>
17 #include <syslog.h>
18 #include <signal.h>
19 #include <netdb.h>
20 #include <stdint.h>
21 #include <netinet/in.h>
22 #include <netinet/tcp.h>
23 #include <netinet/udp.h>
24 #include <sys/poll.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/wait.h>
28 #include <sys/epoll.h>
29 #include <arpa/inet.h>
30 #include <linux/if_tun.h>
32 #include "die.h"
33 #include "xsys.h"
34 #include "xio.h"
35 #include "xmalloc.h"
36 #include "curvetun.h"
37 #include "curve.h"
38 #include "built_in.h"
39 #include "usermgmt.h"
40 #include "cpusched.h"
41 #include "trie.h"
43 struct parent_info {
44 int efd;
45 int refd;
46 int tunfd;
47 int ipv4;
48 int udp;
51 struct worker_struct {
52 pthread_t trid;
53 int efd[2];
54 unsigned int cpu;
55 struct parent_info parent;
56 int (*handler)(int fd, const struct worker_struct *ws,
57 char *buff, size_t len);
58 struct curve25519_struct *c;
61 static struct worker_struct *threadpool = NULL;
63 static int auth_log = 1;
65 extern volatile sig_atomic_t sigint;
67 static int handler_udp_tun_to_net(int fd, const struct worker_struct *ws,
68 char *buff, size_t len) __pure;
69 static int handler_udp_net_to_tun(int fd, const struct worker_struct *ws,
70 char *buff, size_t len) __pure;
71 static int handler_udp(int fd, const struct worker_struct *ws,
72 char *buff, size_t len) __pure;
73 static int handler_tcp_tun_to_net(int fd, const struct worker_struct *ws,
74 char *buff, size_t len) __pure;
75 static int handler_tcp_net_to_tun(int fd, const struct worker_struct *ws,
76 char *buff, size_t len) __pure;
77 static int handler_tcp(int fd, const struct worker_struct *ws,
78 char *buff, size_t len) __pure;
79 ssize_t handler_tcp_read(int fd, char *buff, size_t len);
80 static void *worker(void *self) __pure;
82 static int handler_udp_tun_to_net(int fd, const struct worker_struct *ws,
83 char *buff, size_t len)
85 int dfd, keep = 1;
86 char *cbuff;
87 ssize_t rlen, err, clen;
88 struct ct_proto *hdr;
89 struct curve25519_proto *p;
90 struct sockaddr_storage naddr;
91 socklen_t nlen;
92 size_t off = sizeof(struct ct_proto) + crypto_box_zerobytes;
94 if (!buff || len <= off) {
95 errno = EINVAL;
96 return 0;
99 errno = 0;
101 memset(buff, 0, len);
102 while ((rlen = read(fd, buff + off, len - off)) > 0) {
103 dfd = -1;
104 nlen = 0;
105 p = NULL;
107 memset(&naddr, 0, sizeof(naddr));
109 hdr = (struct ct_proto *) buff;
110 memset(hdr, 0, sizeof(*hdr));
111 hdr->flags = 0;
113 trie_addr_lookup(buff + off, rlen, ws->parent.ipv4, &dfd, &naddr,
114 (size_t *) &nlen);
115 if (unlikely(dfd < 0 || nlen == 0)) {
116 syslog(LOG_INFO, "CPU%u: UDP tunnel lookup failed: "
117 "unknown destination\n", ws->cpu);
118 memset(buff, 0, len);
119 continue;
122 err = get_user_by_sockaddr(&naddr, nlen, &p);
123 if (unlikely(err || !p)) {
124 syslog(LOG_ERR, "CPU%u: User protocol not in cache! "
125 "Dropping connection!\n", ws->cpu);
126 memset(buff, 0, len);
127 continue;
130 clen = curve25519_encode(ws->c, p, (unsigned char *) (buff + off -
131 crypto_box_zerobytes), (rlen +
132 crypto_box_zerobytes), (unsigned char **)
133 &cbuff);
134 if (unlikely(clen <= 0)) {
135 syslog(LOG_ERR, "CPU%u: UDP tunnel encrypt error: %zd\n",
136 ws->cpu, clen);
137 memset(buff, 0, len);
138 continue;
141 hdr->payload = htons((uint16_t) clen);
143 set_udp_cork(dfd);
145 err = sendto(dfd, hdr, sizeof(struct ct_proto), 0,
146 (struct sockaddr *) &naddr, nlen);
147 if (unlikely(err < 0))
148 syslog(LOG_ERR, "CPU%u: UDP tunnel write error: %s\n",
149 ws->cpu, strerror(errno));
151 err = sendto(dfd, cbuff, clen, 0, (struct sockaddr *) &naddr,
152 nlen);
153 if (unlikely(err < 0))
154 syslog(LOG_ERR, "CPU%u: UDP tunnel write error: %s\n",
155 ws->cpu, strerror(errno));
157 set_udp_uncork(dfd);
159 errno = 0;
160 memset(buff, 0, len);
163 if (unlikely(rlen < 0 && errno != EAGAIN))
164 syslog(LOG_ERR, "CPU%u: UDP tunnel read error: %s\n",
165 ws->cpu, strerror(errno));
167 return keep;
170 static void handler_udp_notify_close(int fd, struct sockaddr_storage *addr)
172 ssize_t err;
173 struct ct_proto hdr;
175 memset(&hdr, 0, sizeof(hdr));
176 hdr.flags |= PROTO_FLAG_EXIT;
177 hdr.payload = 0;
179 err = sendto(fd, &hdr, sizeof(hdr), 0, (struct sockaddr *) addr, sizeof(*addr));
180 if (err < 0)
181 syslog(LOG_ERR, "Error while sending close notification: "
182 "%s!\n", strerror(errno));
185 static int handler_udp_net_to_tun(int fd, const struct worker_struct *ws,
186 char *buff, size_t len)
188 int keep = 1;
189 char *cbuff;
190 ssize_t rlen, err, clen;
191 struct ct_proto *hdr;
192 struct curve25519_proto *p;
193 struct sockaddr_storage naddr;
194 socklen_t nlen = sizeof(naddr);
196 if (!buff || !len) {
197 errno = EINVAL;
198 return 0;
201 memset(&naddr, 0, sizeof(naddr));
202 errno = 0;
204 while ((rlen = recvfrom(fd, buff, len, 0, (struct sockaddr *) &naddr,
205 &nlen)) > 0) {
206 p = NULL;
207 hdr = (struct ct_proto *) buff;
208 if (unlikely(rlen < sizeof(struct ct_proto)))
209 goto close;
210 if (unlikely(rlen - sizeof(*hdr) != ntohs(hdr->payload)))
211 goto close;
212 if (unlikely(ntohs(hdr->payload) == 0))
213 goto close;
214 if (hdr->flags & PROTO_FLAG_EXIT) {
215 close:
216 remove_user_by_sockaddr(&naddr, nlen);
217 trie_addr_remove_addr(&naddr, nlen);
218 handler_udp_notify_close(fd, &naddr);
220 return keep;
222 if (hdr->flags & PROTO_FLAG_INIT) {
223 if (auth_log)
224 syslog(LOG_INFO, "Got initial userhash from remote end!\n");
226 if (unlikely(rlen - sizeof(*hdr) <
227 sizeof(struct username_struct)))
228 goto close;
230 err = try_register_user_by_sockaddr(ws->c, buff + sizeof(struct ct_proto),
231 rlen - sizeof(struct ct_proto),
232 &naddr, nlen, auth_log);
233 if (unlikely(err))
234 goto close;
236 goto next;
239 err = get_user_by_sockaddr(&naddr, nlen, &p);
240 if (unlikely(err || !p)) {
241 syslog(LOG_ERR, "CPU%u: User protocol not in cache! "
242 "Dropping connection!\n", ws->cpu);
243 goto close;
246 clen = curve25519_decode(ws->c, p, (unsigned char *) buff +
247 sizeof(struct ct_proto),
248 rlen - sizeof(struct ct_proto),
249 (unsigned char **) &cbuff, NULL);
250 if (unlikely(clen <= 0)) {
251 syslog(LOG_ERR, "CPU%u: UDP net decryption error: %zd\n",
252 ws->cpu, clen);
253 goto close;
256 cbuff += crypto_box_zerobytes;
257 clen -= crypto_box_zerobytes;
259 err = trie_addr_maybe_update(cbuff, clen, ws->parent.ipv4,
260 fd, &naddr, nlen);
261 if (unlikely(err)) {
262 syslog(LOG_INFO, "CPU%u: Malicious packet dropped "
263 "from id %d\n", ws->cpu, fd);
264 goto next;
267 err = write(ws->parent.tunfd, cbuff, clen);
268 if (unlikely(err < 0))
269 syslog(LOG_ERR, "CPU%u: UDP net write error: %s\n",
270 ws->cpu, strerror(errno));
271 next:
272 nlen = sizeof(naddr);
274 memset(&naddr, 0, sizeof(naddr));
275 errno = 0;
278 if (unlikely(rlen < 0 && errno != EAGAIN))
279 syslog(LOG_ERR, "CPU%u: UDP net read error: %s\n",
280 ws->cpu, strerror(errno));
282 return keep;
285 static int handler_udp(int fd, const struct worker_struct *ws,
286 char *buff, size_t len)
288 int ret = 0;
290 if (fd == ws->parent.tunfd)
291 ret = handler_udp_tun_to_net(fd, ws, buff, len);
292 else
293 ret = handler_udp_net_to_tun(fd, ws, buff, len);
295 return ret;
298 static int handler_tcp_tun_to_net(int fd, const struct worker_struct *ws,
299 char *buff, size_t len)
301 int dfd, keep = 1;
302 char *cbuff;
303 ssize_t rlen, err, clen;
304 struct ct_proto *hdr;
305 struct curve25519_proto *p;
306 socklen_t nlen;
307 size_t off = sizeof(struct ct_proto) + crypto_box_zerobytes;
309 if (!buff || len <= off) {
310 errno = EINVAL;
311 return 0;
314 errno = 0;
315 memset(buff, 0, len);
317 while ((rlen = read(fd, buff + off, len - off)) > 0) {
318 dfd = -1;
319 p = NULL;
321 hdr = (struct ct_proto *) buff;
322 memset(hdr, 0, sizeof(*hdr));
323 hdr->flags = 0;
325 trie_addr_lookup(buff + off, rlen, ws->parent.ipv4, &dfd, NULL,
326 (size_t *) &nlen);
327 if (unlikely(dfd < 0)) {
328 syslog(LOG_INFO, "CPU%u: TCP tunnel lookup failed: "
329 "unknown destination\n", ws->cpu);
330 memset(buff, 0, len);
331 continue;
334 err = get_user_by_socket(dfd, &p);
335 if (unlikely(err || !p)) {
336 syslog(LOG_ERR, "CPU%u: User protocol not in cache! "
337 "Dropping connection!\n", ws->cpu);
338 memset(buff, 0, len);
339 continue;
342 clen = curve25519_encode(ws->c, p, (unsigned char *) (buff + off -
343 crypto_box_zerobytes), (rlen +
344 crypto_box_zerobytes), (unsigned char **)
345 &cbuff);
346 if (unlikely(clen <= 0)) {
347 syslog(LOG_ERR, "CPU%u: TCP tunnel encrypt error: %zd\n",
348 ws->cpu, clen);
349 memset(buff, 0, len);
350 continue;
353 hdr->payload = htons((uint16_t) clen);
355 set_tcp_cork(dfd);
357 err = write_exact(dfd, hdr, sizeof(struct ct_proto), 0);
358 if (unlikely(err < 0))
359 syslog(LOG_ERR, "CPU%u: TCP tunnel write error: %s\n",
360 ws->cpu, strerror(errno));
362 err = write_exact(dfd, cbuff, clen, 0);
363 if (unlikely(err < 0))
364 syslog(LOG_ERR, "CPU%u: TCP tunnel write error: %s\n",
365 ws->cpu, strerror(errno));
367 set_tcp_uncork(dfd);
369 errno = 0;
370 memset(buff, 0, len);
373 if (unlikely(rlen < 0 && errno != EAGAIN))
374 syslog(LOG_ERR, "CPU%u: TCP tunnel read error: %s\n",
375 ws->cpu, strerror(errno));
377 return keep;
380 ssize_t handler_tcp_read(int fd, char *buff, size_t len)
382 ssize_t rlen;
383 struct ct_proto *hdr = (struct ct_proto *) buff;
385 if (!buff || !len) {
386 errno = EINVAL;
387 return 0;
390 /* May exit on EAGAIN if 0 Byte read */
391 rlen = read_exact(fd, buff, sizeof(struct ct_proto), 1);
392 if (rlen < 0)
393 return rlen;
394 if (unlikely(ntohs(hdr->payload) > len - sizeof(struct ct_proto))) {
395 errno = ENOMEM;
396 return 1; /* Force server to close connection */
399 /* May not exit on EAGAIN if 0 Byte read */
400 rlen = read_exact(fd, buff + sizeof(struct ct_proto),
401 ntohs(hdr->payload), 0);
402 if (rlen < 0)
403 return rlen;
405 return sizeof(struct ct_proto) + rlen;
408 static void handler_tcp_notify_close(int fd)
410 ssize_t err;
411 struct ct_proto hdr;
413 memset(&hdr, 0, sizeof(hdr));
414 hdr.flags |= PROTO_FLAG_EXIT;
415 hdr.payload = 0;
417 err = write(fd, &hdr, sizeof(hdr));
418 if (err < 0)
419 syslog(LOG_ERR, "Error while sending close notification: "
420 "%s!\n", strerror(errno));
423 static int handler_tcp_net_to_tun(int fd, const struct worker_struct *ws,
424 char *buff, size_t len)
426 int keep = 1, count = 0;
427 char *cbuff;
428 ssize_t rlen, err, clen;
429 struct ct_proto *hdr;
430 struct curve25519_proto *p;
432 if (!buff || !len) {
433 errno = EINVAL;
434 return 0;
437 errno = 0;
439 while ((rlen = handler_tcp_read(fd, buff, len)) > 0) {
440 p = NULL;
441 hdr = (struct ct_proto *) buff;
442 if (unlikely(rlen < sizeof(struct ct_proto)))
443 goto close;
444 if (unlikely(rlen - sizeof(*hdr) != ntohs(hdr->payload)))
445 goto close;
446 if (unlikely(ntohs(hdr->payload) == 0))
447 goto close;
448 if (hdr->flags & PROTO_FLAG_EXIT) {
449 close:
450 remove_user_by_socket(fd);
451 trie_addr_remove(fd);
452 handler_tcp_notify_close(fd);
454 rlen = write(ws->parent.efd, &fd, sizeof(fd));
455 if (rlen != sizeof(fd))
456 syslog(LOG_ERR, "CPU%u: TCP event write error: %s\n",
457 ws->cpu, strerror(errno));
459 keep = 0;
460 return keep;
462 if (hdr->flags & PROTO_FLAG_INIT) {
463 if (auth_log)
464 syslog(LOG_INFO, "Got initial userhash from remote end!\n");
465 if (unlikely(rlen - sizeof(*hdr) <
466 sizeof(struct username_struct)))
467 goto close;
469 err = try_register_user_by_socket(ws->c, buff + sizeof(struct ct_proto),
470 rlen - sizeof(struct ct_proto),
471 fd, auth_log);
472 if (unlikely(err))
473 goto close;
475 continue;
478 err = get_user_by_socket(fd, &p);
479 if (unlikely(err || !p)) {
480 syslog(LOG_ERR, "CPU%u: User protocol not in cache! "
481 "Dropping connection!\n", ws->cpu);
482 goto close;
485 clen = curve25519_decode(ws->c, p, (unsigned char *) buff +
486 sizeof(struct ct_proto),
487 rlen - sizeof(struct ct_proto),
488 (unsigned char **) &cbuff, NULL);
489 if (unlikely(clen <= 0)) {
490 syslog(LOG_ERR, "CPU%u: TCP net decryption error: %zd\n",
491 ws->cpu, clen);
492 goto close;
495 cbuff += crypto_box_zerobytes;
496 clen -= crypto_box_zerobytes;
498 err = trie_addr_maybe_update(cbuff, clen, ws->parent.ipv4,
499 fd, NULL, 0);
500 if (unlikely(err)) {
501 syslog(LOG_INFO, "CPU%u: Malicious packet dropped "
502 "from id %d\n", ws->cpu, fd);
503 continue;
506 err = write(ws->parent.tunfd, cbuff, clen);
507 if (unlikely(err < 0))
508 syslog(LOG_ERR, "CPU%u: TCP net write error: %s\n",
509 ws->cpu, strerror(errno));
511 count++;
512 if (count == 10) {
513 err = write_exact(ws->efd[1], &fd, sizeof(fd), 1);
514 if (unlikely(err != sizeof(fd)))
515 syslog(LOG_ERR, "CPU%u: TCP net put fd back in "
516 "pipe error: %s\n", ws->cpu, strerror(errno));
517 return keep;
520 errno = 0;
523 if (unlikely(rlen < 0 && errno != EAGAIN && errno != EBADF))
524 syslog(LOG_ERR, "CPU%u: TCP net read error: %s\n",
525 ws->cpu, strerror(errno));
527 return keep;
530 static int handler_tcp(int fd, const struct worker_struct *ws,
531 char *buff, size_t len)
533 int ret = 0;
535 if (fd == ws->parent.tunfd)
536 ret = handler_tcp_tun_to_net(fd, ws, buff, len);
537 else
538 ret = handler_tcp_net_to_tun(fd, ws, buff, len);
540 return ret;
543 static void *worker(void *self)
545 int fd, old_state;
546 ssize_t ret;
547 size_t blen = TUNBUFF_SIZ; //FIXME
548 const struct worker_struct *ws = self;
549 struct pollfd fds;
550 char *buff;
552 fds.fd = ws->efd[0];
553 fds.events = POLLIN;
555 ret = curve25519_alloc_or_maybe_die(ws->c);
556 if (ret < 0)
557 syslog_panic("Cannot init curve25519!\n");
559 buff = xmalloc_aligned(blen, 64);
561 syslog(LOG_INFO, "curvetun thread on CPU%u up!\n", ws->cpu);
563 pthread_cleanup_push(xfree_func, ws->c);
564 pthread_cleanup_push(curve25519_free, ws->c);
565 pthread_cleanup_push(xfree_func, buff);
567 while (likely(!sigint)) {
568 poll(&fds, 1, -1);
569 if ((fds.revents & POLLIN) != POLLIN)
570 continue;
572 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_state);
574 while ((ret = read_exact(ws->efd[0], &fd, sizeof(fd), 1)) > 0) {
575 if (ret != sizeof(fd)) {
576 syslog(LOG_ERR, "CPU%u: Thread could not read "
577 "event descriptor!\n", ws->cpu);
578 sched_yield();
580 continue;
583 ret = ws->handler(fd, ws, buff, blen);
584 if (ret) {
585 ret = write_exact(ws->parent.refd, &fd, sizeof(fd), 1);
586 if (ret != sizeof(fd))
587 syslog(LOG_ERR, "CPU%u: Retriggering failed: "
588 "%s\n", ws->cpu, strerror(errno));
592 pthread_setcancelstate(old_state, NULL);
595 syslog(LOG_INFO, "curvetun thread on CPU%u down!\n", ws->cpu);
597 pthread_cleanup_pop(1);
598 pthread_cleanup_pop(1);
599 pthread_cleanup_pop(1);
601 pthread_exit((void *) ((long) ws->cpu));
604 static void thread_spawn_or_panic(unsigned int cpus, int efd, int refd,
605 int tunfd, int ipv4, int udp)
607 int i, ret;
608 cpu_set_t cpuset;
609 unsigned int threads;
611 threads = cpus * THREADS_PER_CPU;
613 for (i = 0; i < threads; ++i) {
614 CPU_ZERO(&cpuset);
615 threadpool[i].cpu = i % cpus;
616 CPU_SET(threadpool[i].cpu, &cpuset);
618 ret = pipe2(threadpool[i].efd, O_NONBLOCK);
619 if (ret < 0)
620 syslog_panic("Cannot create event socket!\n");
622 threadpool[i].c = xmalloc_aligned(sizeof(*threadpool[i].c), 64);
623 threadpool[i].parent.efd = efd;
624 threadpool[i].parent.refd = refd;
625 threadpool[i].parent.tunfd = tunfd;
626 threadpool[i].parent.ipv4 = ipv4;
627 threadpool[i].parent.udp = udp;
628 threadpool[i].handler = udp ? handler_udp : handler_tcp;
630 ret = pthread_create(&threadpool[i].trid, NULL,
631 worker, &threadpool[i]);
632 if (ret < 0)
633 syslog_panic("Thread creation failed!\n");
635 ret = pthread_setaffinity_np(threadpool[i].trid,
636 sizeof(cpuset), &cpuset);
637 if (ret < 0)
638 syslog_panic("Thread CPU migration failed!\n");
640 pthread_detach(threadpool[i].trid);
643 sleep(1);
646 static void thread_finish(unsigned int cpus)
648 int i;
649 unsigned int threads;
651 threads = cpus * THREADS_PER_CPU;
653 for (i = 0; i < threads; ++i) {
654 while (pthread_join(threadpool[i].trid, NULL) < 0)
657 close(threadpool[i].efd[0]);
658 close(threadpool[i].efd[1]);
662 int server_main(char *home, char *dev, char *port, int udp, int ipv4, int log)
664 int lfd = -1, kdpfd, nfds, nfd, curfds, efd[2], refd[2], tunfd, i, mtu;
665 unsigned int cpus = 0, threads, udp_cpu = 0;
666 ssize_t ret;
667 struct epoll_event ev, *events;
668 struct addrinfo hints, *ahead, *ai;
670 auth_log = !!log;
671 openlog("curvetun", LOG_PID | LOG_CONS | LOG_NDELAY, LOG_DAEMON);
672 syslog(LOG_INFO, "curvetun server booting!\n");
673 if (!auth_log)
674 syslog(LOG_INFO, "curvetun user logging disabled!\n");
676 parse_userfile_and_generate_user_store_or_die(home);
678 memset(&hints, 0, sizeof(hints));
679 hints.ai_family = PF_UNSPEC;
680 hints.ai_socktype = udp ? SOCK_DGRAM : SOCK_STREAM;
681 hints.ai_protocol = udp ? IPPROTO_UDP : IPPROTO_TCP;
682 hints.ai_flags = AI_PASSIVE;
684 ret = getaddrinfo(NULL, port, &hints, &ahead);
685 if (ret < 0)
686 syslog_panic("Cannot get address info!\n");
688 for (ai = ahead; ai != NULL && lfd < 0; ai = ai->ai_next) {
689 lfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
690 if (lfd < 0)
691 continue;
692 if (ai->ai_family == AF_INET6) {
693 int one = 1;
694 #ifdef IPV6_V6ONLY
695 ret = setsockopt(lfd, IPPROTO_IPV6, IPV6_V6ONLY,
696 &one, sizeof(one));
697 if (ret < 0) {
698 close(lfd);
699 lfd = -1;
700 continue;
702 #else
703 close(lfd);
704 lfd = -1;
705 continue;
706 #endif /* IPV6_V6ONLY */
709 set_reuseaddr(lfd);
711 mtu = IP_PMTUDISC_DONT;
712 setsockopt(lfd, SOL_IP, IP_MTU_DISCOVER, &mtu, sizeof(mtu));
714 ret = bind(lfd, ai->ai_addr, ai->ai_addrlen);
715 if (ret < 0) {
716 close(lfd);
717 lfd = -1;
718 continue;
721 if (!udp) {
722 ret = listen(lfd, 5);
723 if (ret < 0) {
724 close(lfd);
725 lfd = -1;
726 continue;
729 if (ipv4 == -1) {
730 ipv4 = (ai->ai_family == AF_INET6 ? 0 :
731 (ai->ai_family == AF_INET ? 1 : -1));
733 if (auth_log) {
734 syslog(LOG_INFO, "curvetun on IPv%d via %s on port %s!\n",
735 ai->ai_family == AF_INET ? 4 : 6,
736 udp ? "UDP" : "TCP", port);
737 syslog(LOG_INFO, "Allowed overlay proto is IPv%d!\n",
738 ipv4 ? 4 : 6);
742 freeaddrinfo(ahead);
744 if (lfd < 0 || ipv4 < 0)
745 syslog_panic("Cannot create socket!\n");
747 tunfd = tun_open_or_die(dev ? dev : DEVNAME_SERVER, IFF_TUN | IFF_NO_PI);
749 ret = pipe2(efd, O_NONBLOCK);
750 if (ret < 0)
751 syslog_panic("Cannot create parent event fd!\n");
753 ret = pipe2(refd, O_NONBLOCK);
754 if (ret < 0)
755 syslog_panic("Cannot create parent (r)event fd!\n");
757 set_nonblocking(lfd);
759 events = xzmalloc(MAX_EPOLL_SIZE * sizeof(*events));
760 for (i = 0; i < MAX_EPOLL_SIZE; ++i)
761 events[i].data.fd = -1;
763 kdpfd = epoll_create(MAX_EPOLL_SIZE);
764 if (kdpfd < 0)
765 syslog_panic("Cannot create socket!\n");
767 memset(&ev, 0, sizeof(ev));
768 ev.events = udp ? EPOLLIN | EPOLLET | EPOLLONESHOT : EPOLLIN;
769 ev.data.fd = lfd;
771 ret = epoll_ctl(kdpfd, EPOLL_CTL_ADD, lfd, &ev);
772 if (ret < 0)
773 syslog_panic("Cannot add socket for epoll!\n");
775 memset(&ev, 0, sizeof(ev));
776 ev.events = EPOLLIN;
777 ev.data.fd = efd[0];
779 ret = epoll_ctl(kdpfd, EPOLL_CTL_ADD, efd[0], &ev);
780 if (ret < 0)
781 syslog_panic("Cannot add socket for events!\n");
783 memset(&ev, 0, sizeof(ev));
784 ev.events = EPOLLIN;
785 ev.data.fd = refd[0];
787 ret = epoll_ctl(kdpfd, EPOLL_CTL_ADD, refd[0], &ev);
788 if (ret < 0)
789 syslog_panic("Cannot add socket for (r)events!\n");
791 memset(&ev, 0, sizeof(ev));
792 ev.events = EPOLLIN | EPOLLET | EPOLLONESHOT;
793 ev.data.fd = tunfd;
795 ret = epoll_ctl(kdpfd, EPOLL_CTL_ADD, tunfd, &ev);
796 if (ret < 0)
797 syslog_panic("Cannot add socket for tundev!\n");
799 curfds = 4;
801 trie_init();
803 cpus = get_number_cpus_online();
804 threads = cpus * THREADS_PER_CPU;
805 if (!((threads != 0) && ((threads & (threads - 1)) == 0)))
806 syslog_panic("Thread number not power of two!\n");
808 threadpool = xzmalloc(sizeof(*threadpool) * threads);
810 thread_spawn_or_panic(cpus, efd[1], refd[1], tunfd, ipv4, udp);
812 init_cpusched(threads);
814 register_socket(tunfd);
815 register_socket(lfd);
817 syslog(LOG_INFO, "curvetun up and running!\n");
819 while (likely(!sigint)) {
820 nfds = epoll_wait(kdpfd, events, curfds, -1);
821 if (nfds < 0) {
822 syslog(LOG_ERR, "epoll_wait error: %s\n",
823 strerror(errno));
824 break;
827 for (i = 0; i < nfds; ++i) {
828 if (unlikely(events[i].data.fd < 0))
829 continue;
831 if (events[i].data.fd == lfd && !udp) {
832 int one, ncpu;
833 char hbuff[256], sbuff[256];
834 struct sockaddr_storage taddr;
835 socklen_t tlen;
837 tlen = sizeof(taddr);
838 nfd = accept(lfd, (struct sockaddr *) &taddr,
839 &tlen);
840 if (nfd < 0) {
841 syslog(LOG_ERR, "accept error: %s\n",
842 strerror(errno));
843 continue;
846 if (curfds + 1 > MAX_EPOLL_SIZE) {
847 close(nfd);
848 continue;
851 curfds++;
853 ncpu = register_socket(nfd);
855 memset(hbuff, 0, sizeof(hbuff));
856 memset(sbuff, 0, sizeof(sbuff));
857 getnameinfo((struct sockaddr *) &taddr, tlen,
858 hbuff, sizeof(hbuff),
859 sbuff, sizeof(sbuff),
860 NI_NUMERICHOST | NI_NUMERICSERV);
862 if (auth_log) {
863 syslog(LOG_INFO, "New connection from %s:%s "
864 "with id %d on CPU%d, %d active!\n",
865 hbuff, sbuff, nfd, ncpu, curfds);
868 set_nonblocking(nfd);
870 one = 1;
871 setsockopt(nfd, SOL_SOCKET, SO_KEEPALIVE,
872 &one, sizeof(one));
874 one = 1;
875 setsockopt(nfd, IPPROTO_TCP, TCP_NODELAY,
876 &one, sizeof(one));
878 memset(&ev, 0, sizeof(ev));
880 ev.events = EPOLLIN | EPOLLET | EPOLLONESHOT;
881 ev.data.fd = nfd;
883 ret = epoll_ctl(kdpfd, EPOLL_CTL_ADD, nfd, &ev);
884 if (ret < 0) {
885 syslog(LOG_ERR, "Epoll ctl add error"
886 "on id %d: %s\n", nfd,
887 strerror(errno));
888 close(nfd);
889 curfds--;
891 continue;
893 } else if (events[i].data.fd == refd[0]) {
894 int fd_one;
896 ret = read_exact(refd[0], &fd_one, sizeof(fd_one), 1);
897 if (ret != sizeof(fd_one) || fd_one <= 0)
898 continue;
900 memset(&ev, 0, sizeof(ev));
901 ev.events = EPOLLIN | EPOLLET | EPOLLONESHOT;
902 ev.data.fd = fd_one;
904 ret = epoll_ctl(kdpfd, EPOLL_CTL_MOD, fd_one, &ev);
905 if (ret < 0) {
906 syslog(LOG_ERR, "Epoll ctl mod "
907 "error on id %d: %s\n",
908 fd_one, strerror(errno));
909 close(fd_one);
911 continue;
913 } else if (events[i].data.fd == efd[0]) {
914 int fd_del, test;
916 ret = read_exact(efd[0], &fd_del, sizeof(fd_del), 1);
917 if (ret != sizeof(fd_del) || fd_del <= 0)
918 continue;
920 ret = read(fd_del, &test, sizeof(test));
921 if (ret < 0 && errno == EBADF)
922 continue;
924 ret = epoll_ctl(kdpfd, EPOLL_CTL_DEL, fd_del, &ev);
925 if (ret < 0) {
926 syslog(LOG_ERR, "Epoll ctl del "
927 "error on id %d: %s\n",
928 fd_del, strerror(errno));
929 close(fd_del);
930 continue;
933 close(fd_del);
935 curfds--;
937 unregister_socket(fd_del);
939 if (auth_log) {
940 syslog(LOG_INFO, "Closed connection with "
941 "id %d, %d active!\n",
942 fd_del, curfds);
944 } else {
945 int cpu, fd_work = events[i].data.fd;
947 if (!udp)
948 cpu = socket_to_cpu(fd_work);
949 else
950 udp_cpu = (udp_cpu + 1) & (threads - 1);
952 ret = write_exact(threadpool[udp ? udp_cpu : cpu].efd[1],
953 &fd_work, sizeof(fd_work), 1);
954 if (ret != sizeof(fd_work))
955 syslog(LOG_ERR, "Write error on event "
956 "dispatch: %s\n", strerror(errno));
961 syslog(LOG_INFO, "curvetun prepare shut down!\n");
963 close(lfd);
964 close(efd[0]);
965 close(efd[1]);
966 close(refd[0]);
967 close(refd[1]);
968 close(tunfd);
970 thread_finish(cpus);
972 xfree(threadpool);
973 xfree(events);
975 unregister_socket(lfd);
976 unregister_socket(tunfd);
978 destroy_cpusched();
980 trie_cleanup();
982 destroy_user_store();
984 syslog(LOG_INFO, "curvetun shut down!\n");
985 closelog();
987 return 0;