Print a warning if an illegal value is used for the spi but continue
[vpnc.git] / tunip.c
blob00d9e1c49378da67e74cfe1a716d19224aa3b77d
1 /* IPSec ESP and AH support.
2 Copyright (c) 1999 Pierre Beyssac
3 Copyright (C) 2002 Geoffrey Keating
4 Copyright (C) 2003-2007 Maurice Massar
5 Copyright (C) 2004 Tomas Mraz
6 Copyright (C) 2005 Michael Tilstra
7 Copyright (C) 2006 Daniel Roethlisberger
8 Copyright (C) 2007 Paolo Zarpellon (tap+Cygwin support)
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 $Id$
27 /* borrowed from pipsecd (-; */
29 /*-
30 * Copyright (c) 1999 Pierre Beyssac
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
56 #include <sys/types.h>
57 #include <sys/socket.h>
58 #include <errno.h>
59 #include <assert.h>
60 #include <unistd.h>
61 #include <fcntl.h>
62 #include <stdio.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #ifndef __SKYOS__
67 #include <netinet/ip_icmp.h>
68 #endif
69 #include <arpa/inet.h>
70 #include <stdlib.h>
71 #include <string.h>
72 #include <strings.h>
73 #include <syslog.h>
74 #include <time.h>
75 #include <sys/select.h>
76 #include <signal.h>
78 #ifdef __CYGWIN__
79 #include <pthread.h>
80 #endif
82 #if !defined(__sun__) && !defined(__SKYOS__)
83 #include <err.h>
84 #endif
86 #include <gcrypt.h>
87 #include "sysdep.h"
88 #include "config.h"
89 #include "vpnc.h"
91 #include "tunip.h"
93 #ifndef MAX
94 #define MAX(a,b) ((a)>(b)?(a):(b))
95 #endif
97 #ifndef FD_COPY
98 #define FD_COPY(f, t) ((void)memcpy((t), (f), sizeof(*(f))))
99 #endif
101 /* A real ESP header (RFC 2406) */
102 typedef struct esp_encap_header {
103 uint32_t spi; /* security parameters index */
104 uint32_t seq_id; /* sequence id (unimplemented) */
105 /* variable-length payload data + padding */
106 /* unsigned char next_header */
107 /* optional auth data */
108 } __attribute__((packed)) esp_encap_header_t;
110 struct encap_method {
111 int fixed_header_size;
113 int (*recv) (struct sa_block *s, unsigned char *buf, unsigned int bufsize);
114 void (*send_peer) (struct sa_block *s, unsigned char *buf, unsigned int bufsize);
115 int (*recv_peer) (struct sa_block *s);
118 /* Yuck! Global variables... */
120 #define MAX_HEADER 72
121 #define MAX_PACKET 4096
122 int volatile do_kill;
123 static uint8_t global_buffer_rx[MAX_HEADER + MAX_PACKET + ETH_HLEN];
124 static uint8_t global_buffer_tx[MAX_HEADER + MAX_PACKET + ETH_HLEN];
127 * in_cksum --
128 * Checksum routine for Internet Protocol family headers (C Version)
130 static u_short in_cksum(u_short *addr, int len)
132 register int nleft = len;
133 register u_short *w = addr;
134 register int sum = 0;
135 u_short answer = 0;
138 * Our algorithm is simple, using a 32 bit accumulator (sum), we add
139 * sequential 16 bit words to it, and at the end, fold back all the
140 * carry bits from the top 16 bits into the lower 16 bits.
142 while (nleft > 1) {
143 sum += *w++;
144 nleft -= 2;
147 /* mop up an odd byte, if necessary */
148 if (nleft == 1) {
149 *(u_char *) (&answer) = *(u_char *) w;
150 sum += answer;
153 /* add back carry outs from top 16 bits to low 16 bits */
154 sum = (sum >> 16) + (sum & 0xffff); /* add hi 16 to low 16 */
155 sum += (sum >> 16); /* add carry */
156 answer = ~sum; /* truncate to 16 bits */
157 return (answer);
161 * Decapsulate from a raw IP packet
163 static int encap_rawip_recv(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
165 ssize_t r;
166 struct ip *p = (struct ip *)buf;
167 struct sockaddr_in from;
168 socklen_t fromlen = sizeof(from);
170 r = recvfrom(s->esp_fd, buf, bufsize, 0, (struct sockaddr *)&from, &fromlen);
171 if (r == -1) {
172 logmsg(LOG_ERR, "recvfrom: %m");
173 return -1;
175 if (from.sin_addr.s_addr != s->dst.s_addr) {
176 logmsg(LOG_ALERT, "packet from unknown host %s", inet_ntoa(from.sin_addr));
177 return -1;
179 if (r < (p->ip_hl << 2) + s->ipsec.em->fixed_header_size) {
180 logmsg(LOG_ALERT, "packet too short. got %zd, expected %d", r, (p->ip_hl << 2) + s->ipsec.em->fixed_header_size);
181 return -1;
184 #ifdef NEED_IPLEN_FIX
185 p->ip_len = r;
186 #else
187 p->ip_len = ntohs(r);
188 #endif
190 s->ipsec.rx.buf = buf;
191 s->ipsec.rx.buflen = r;
192 s->ipsec.rx.bufpayload = (p->ip_hl << 2);
193 s->ipsec.rx.bufsize = bufsize;
194 return r;
198 * Decapsulate from an UDP packet
200 static int encap_udp_recv(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
202 ssize_t r;
204 r = recv(s->esp_fd, buf, bufsize, 0);
205 if (r == -1) {
206 logmsg(LOG_ERR, "recvfrom: %m");
207 return -1;
209 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD && r > 8) {
210 r -= 8;
211 memmove(buf, buf + 8, r);
213 if( r == 1 && *buf == 0xff )
215 DEBUGTOP(1, printf("UDP NAT keepalive packet received\n"));
216 return -1;
218 if (r < s->ipsec.em->fixed_header_size) {
219 logmsg(LOG_ALERT, "packet too short from %s. got %zd, expected %d",
220 inet_ntoa(s->dst), r, s->ipsec.em->fixed_header_size);
221 return -1;
224 s->ipsec.rx.buf = buf;
225 s->ipsec.rx.buflen = r;
226 s->ipsec.rx.bufpayload = 0;
227 s->ipsec.rx.bufsize = bufsize;
228 return r;
232 * Decapsulate packet
234 static int encap_any_decap(struct sa_block *s)
236 s->ipsec.rx.buflen -= s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size;
237 s->ipsec.rx.buf += s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size;
238 if (s->ipsec.rx.buflen == 0)
239 return 0;
240 return 1;
244 * Send decapsulated packet to tunnel device
246 static int tun_send_ip(struct sa_block *s)
248 int sent, len;
249 uint8_t *start;
251 start = s->ipsec.rx.buf;
252 len = s->ipsec.rx.buflen;
254 if (opt_if_mode == IF_MODE_TAP) {
255 #ifndef __sun__
257 * Add ethernet header before s->ipsec.rx.buf where
258 * at least ETH_HLEN bytes should be available.
260 struct ether_header *eth_hdr = (struct ether_header *) (s->ipsec.rx.buf - ETH_HLEN);
262 memcpy(eth_hdr->ether_dhost, s->tun_hwaddr, ETH_ALEN);
263 memcpy(eth_hdr->ether_shost, s->tun_hwaddr, ETH_ALEN);
265 /* Use a different MAC as source */
266 eth_hdr->ether_shost[0] ^= 0x80; /* toggle some visible bit */
267 eth_hdr->ether_type = htons(ETHERTYPE_IP);
269 start = (uint8_t *) eth_hdr;
270 len += ETH_HLEN;
271 #endif
274 sent = tun_write(s->tun_fd, start, len);
275 if (sent != len)
276 logmsg(LOG_ERR, "truncated in: %d -> %d\n", len, sent);
277 hex_dump("Tx pkt", start, len, NULL);
278 return 1;
282 * Compute HMAC for an arbitrary stream of bytes
284 static int hmac_compute(int md_algo,
285 const unsigned char *data, unsigned int data_size,
286 unsigned char *digest, unsigned char do_store,
287 const unsigned char *secret, unsigned short secret_size)
289 gcry_md_hd_t md_ctx;
290 int ret;
291 unsigned char *hmac_digest;
292 unsigned int hmac_len;
294 /* See RFC 2104 */
295 gcry_md_open(&md_ctx, md_algo, GCRY_MD_FLAG_HMAC);
296 assert(md_ctx != NULL);
297 ret = gcry_md_setkey(md_ctx, secret, secret_size);
298 assert(ret == 0);
299 gcry_md_write(md_ctx, data, data_size);
300 gcry_md_final(md_ctx);
301 hmac_digest = gcry_md_read(md_ctx, 0);
302 hmac_len = 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
304 if (do_store) {
305 memcpy(digest, hmac_digest, hmac_len);
306 ret = 0;
307 } else
308 ret = memcmp(digest, hmac_digest, hmac_len);
310 gcry_md_close(md_ctx);
311 return ret;
315 * Encapsulate a packet in ESP
317 static void encap_esp_encapsulate(struct sa_block *s)
319 esp_encap_header_t *eh;
320 unsigned char *iv, *cleartext;
321 size_t i, padding, pad_blksz;
322 unsigned int cleartextlen;
325 * Add padding as necessary
327 * done: this should be checked, RFC 2406 section 2.4 is quite
328 * obscure on that point.
329 * seems fine
331 pad_blksz = s->ipsec.blk_len;
332 while (pad_blksz & 3) /* must be multiple of 4 */
333 pad_blksz <<= 1;
334 padding = pad_blksz - ((s->ipsec.tx.buflen + 2 - s->ipsec.tx.var_header_size - s->ipsec.tx.bufpayload) % pad_blksz);
335 DEBUG(3, printf("sending packet: len = %d, padding = %lu\n", s->ipsec.tx.buflen, (unsigned long)padding));
336 if (padding == pad_blksz)
337 padding = 0;
339 for (i = 1; i <= padding; i++) {
340 s->ipsec.tx.buf[s->ipsec.tx.buflen] = i;
341 s->ipsec.tx.buflen++;
344 /* Add trailing padlen and next_header */
345 s->ipsec.tx.buf[s->ipsec.tx.buflen++] = padding;
346 s->ipsec.tx.buf[s->ipsec.tx.buflen++] = IPPROTO_IPIP;
348 cleartext = s->ipsec.tx.buf + s->ipsec.tx.var_header_size + s->ipsec.tx.bufpayload;
349 cleartextlen = s->ipsec.tx.buflen - s->ipsec.tx.var_header_size - s->ipsec.tx.bufpayload;
351 eh = (esp_encap_header_t *) (s->ipsec.tx.buf + s->ipsec.tx.bufpayload);
352 eh->spi = s->ipsec.tx.spi;
353 eh->seq_id = htonl(s->ipsec.tx.seq_id++);
355 /* Copy initialization vector in packet */
356 iv = (unsigned char *)(eh + 1);
357 gcry_create_nonce(iv, s->ipsec.iv_len);
358 hex_dump("iv", iv, s->ipsec.iv_len, NULL);
360 hex_dump("sending ESP packet (before crypt)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
362 if (s->ipsec.cry_algo) {
363 gcry_cipher_setiv(s->ipsec.tx.cry_ctx, iv, s->ipsec.iv_len);
364 gcry_cipher_encrypt(s->ipsec.tx.cry_ctx, cleartext, cleartextlen, NULL, 0);
367 hex_dump("sending ESP packet (after crypt)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
369 /* Handle optional authentication field */
370 if (s->ipsec.md_algo) {
371 hmac_compute(s->ipsec.md_algo,
372 s->ipsec.tx.buf + s->ipsec.tx.bufpayload,
373 s->ipsec.tx.var_header_size + cleartextlen,
374 s->ipsec.tx.buf + s->ipsec.tx.bufpayload
375 + s->ipsec.tx.var_header_size + cleartextlen,
376 1, s->ipsec.tx.key_md, s->ipsec.md_len);
377 s->ipsec.tx.buflen += 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
378 hex_dump("sending ESP packet (after ah)", s->ipsec.tx.buf, s->ipsec.tx.buflen, NULL);
383 * Encapsulate a packet in IP ESP and send to the peer.
384 * "buf" should have exactly MAX_HEADER free bytes at its beginning
385 * to account for encapsulation data (not counted in "size").
387 static void encap_esp_send_peer(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
389 ssize_t sent;
390 struct ip *tip, ip;
391 struct sockaddr_in dstaddr;
393 buf += MAX_HEADER;
395 /* Keep a pointer to the old IP header */
396 tip = (struct ip *)buf;
398 s->ipsec.tx.buf = buf;
399 s->ipsec.tx.buflen = bufsize;
401 /* Prepend our encapsulation header and new IP header */
402 s->ipsec.tx.var_header_size = (s->ipsec.em->fixed_header_size + s->ipsec.iv_len);
404 s->ipsec.tx.buf -= sizeof(struct ip) + s->ipsec.tx.var_header_size;
405 s->ipsec.tx.buflen += sizeof(struct ip) + s->ipsec.tx.var_header_size;
407 s->ipsec.tx.bufpayload = sizeof(struct ip);
409 /* Fill non-mutable fields */
410 ip.ip_v = IPVERSION;
411 ip.ip_hl = 5;
412 /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
413 ip.ip_id = htons(s->ipsec.ip_id++);
414 ip.ip_p = IPPROTO_ESP;
415 ip.ip_src = s->src;
416 ip.ip_dst = s->dst;
418 /* Fill mutable fields */
419 ip.ip_tos = (bufsize < sizeof(struct ip)) ? 0 : tip->ip_tos;
420 ip.ip_off = 0;
421 ip.ip_ttl = IPDEFTTL;
422 ip.ip_sum = 0;
424 encap_esp_encapsulate(s);
426 ip.ip_len = s->ipsec.tx.buflen;
427 #ifdef NEED_IPLEN_FIX
428 ip.ip_len = htons(ip.ip_len);
429 #endif
430 ip.ip_sum = in_cksum((u_short *) s->ipsec.tx.buf, sizeof(struct ip));
432 memcpy(s->ipsec.tx.buf, &ip, sizeof ip);
434 dstaddr.sin_family = AF_INET;
435 dstaddr.sin_addr = s->dst;
436 dstaddr.sin_port = 0;
437 sent = sendto(s->esp_fd, s->ipsec.tx.buf, s->ipsec.tx.buflen, 0, (struct sockaddr *)&dstaddr, sizeof(struct sockaddr_in));
438 if (sent == -1) {
439 logmsg(LOG_ERR, "esp sendto: %m");
440 return;
442 if (sent != s->ipsec.tx.buflen)
443 logmsg(LOG_ALERT, "esp truncated out (%lld out of %d)", (long long)sent, s->ipsec.tx.buflen);
447 * Encapsulate a packet in UDP ESP and send to the peer.
448 * "buf" should have exactly MAX_HEADER free bytes at its beginning
449 * to account for encapsulation data (not counted in "size").
451 static void encap_udp_send_peer(struct sa_block *s, unsigned char *buf, unsigned int bufsize)
453 ssize_t sent;
455 buf += MAX_HEADER;
457 s->ipsec.tx.buf = buf;
458 s->ipsec.tx.buflen = bufsize;
460 /* Prepend our encapsulation header and new IP header */
461 s->ipsec.tx.var_header_size = (s->ipsec.em->fixed_header_size + s->ipsec.iv_len);
463 s->ipsec.tx.buf -= s->ipsec.tx.var_header_size;
464 s->ipsec.tx.buflen += s->ipsec.tx.var_header_size;
466 s->ipsec.tx.bufpayload = 0;
468 encap_esp_encapsulate(s);
470 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD) {
471 s->ipsec.tx.buf -= 8;
472 s->ipsec.tx.buflen += 8;
473 memset(s->ipsec.tx.buf, 0, 8);
476 sent = send(s->esp_fd, s->ipsec.tx.buf, s->ipsec.tx.buflen, 0);
477 if (sent == -1) {
478 logmsg(LOG_ERR, "udp sendto: %m");
479 return;
481 if (sent != s->ipsec.tx.buflen)
482 logmsg(LOG_ALERT, "udp truncated out (%lld out of %d)",
483 (long long)sent, s->ipsec.tx.buflen);
486 static int encap_esp_recv_peer(struct sa_block *s)
488 int len, i;
489 size_t blksz;
490 unsigned char padlen, next_header;
491 unsigned char *pad;
492 unsigned char *iv;
494 s->ipsec.rx.var_header_size = s->ipsec.iv_len;
495 iv = s->ipsec.rx.buf + s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size;
497 len = s->ipsec.rx.buflen - s->ipsec.rx.bufpayload - s->ipsec.em->fixed_header_size - s->ipsec.rx.var_header_size;
499 if (len < 0) {
500 logmsg(LOG_ALERT, "Packet too short");
501 return -1;
504 /* Handle optional authentication field */
505 if (s->ipsec.md_algo) {
506 len -= 12; /*gcry_md_get_algo_dlen(peer->local_sa->md_algo); */
507 s->ipsec.rx.buflen -= 12;
508 if (hmac_compute(s->ipsec.md_algo,
509 s->ipsec.rx.buf + s->ipsec.rx.bufpayload,
510 s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len,
511 s->ipsec.rx.buf + s->ipsec.rx.bufpayload
512 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len,
514 s->ipsec.rx.key_md,
515 s->ipsec.md_len) != 0) {
516 logmsg(LOG_ALERT, "HMAC mismatch in ESP mode");
517 return -1;
521 blksz = s->ipsec.blk_len;
522 if (s->ipsec.cry_algo && ((len % blksz) != 0)) {
523 logmsg(LOG_ALERT,
524 "payload len %d not a multiple of algorithm block size %lu", len,
525 (unsigned long)blksz);
526 return -1;
529 hex_dump("receiving ESP packet (before decrypt)",
530 &s->ipsec.rx.buf[s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size +
531 s->ipsec.rx.var_header_size], len, NULL);
533 if (s->ipsec.cry_algo) {
534 unsigned char *data;
536 data = (s->ipsec.rx.buf + s->ipsec.rx.bufpayload
537 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size);
538 gcry_cipher_setiv(s->ipsec.rx.cry_ctx, iv, s->ipsec.iv_len);
539 gcry_cipher_decrypt(s->ipsec.rx.cry_ctx, data, len, NULL, 0);
542 hex_dump("receiving ESP packet (after decrypt)",
543 &s->ipsec.rx.buf[s->ipsec.rx.bufpayload + s->ipsec.em->fixed_header_size +
544 s->ipsec.rx.var_header_size], len, NULL);
546 padlen = s->ipsec.rx.buf[s->ipsec.rx.bufpayload
547 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len - 2];
548 next_header = s->ipsec.rx.buf[s->ipsec.rx.bufpayload
549 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len - 1];
551 if (padlen + 2 > len) {
552 logmsg(LOG_ALERT, "Inconsistent padlen");
553 return -1;
555 if (next_header != IPPROTO_IPIP) {
556 logmsg(LOG_ALERT, "Inconsistent next_header %d", next_header);
557 return -1;
559 DEBUG(3, printf("pad len: %d, next_header: %d\n", padlen, next_header));
561 len -= padlen + 2;
562 s->ipsec.rx.buflen -= padlen + 2;
564 /* Check padding */
565 pad = s->ipsec.rx.buf + s->ipsec.rx.bufpayload
566 + s->ipsec.em->fixed_header_size + s->ipsec.rx.var_header_size + len;
567 for (i = 1; i <= padlen; i++) {
568 if (*pad != i) {
569 logmsg(LOG_ALERT, "Bad padding");
570 return -1;
572 pad++;
575 return 0;
578 static void encap_esp_new(struct encap_method *encap)
580 encap->recv = encap_rawip_recv;
581 encap->send_peer = encap_esp_send_peer;
582 encap->recv_peer = encap_esp_recv_peer;
583 encap->fixed_header_size = sizeof(esp_encap_header_t);
586 static void encap_udp_new(struct encap_method *encap)
588 encap->recv = encap_udp_recv;
589 encap->send_peer = encap_udp_send_peer;
590 encap->recv_peer = encap_esp_recv_peer;
591 encap->fixed_header_size = sizeof(esp_encap_header_t);
595 * Process ARP
596 * Return 1 if packet has been processed, 0 otherwise
598 static int process_arp(struct sa_block *s, uint8_t *frame)
600 #ifndef __sun__
601 int frame_size;
602 uint8_t tmp[4];
603 struct ether_header *eth = (struct ether_header *) frame;
604 struct ether_arp *arp = (struct ether_arp *) (frame + ETH_HLEN);
606 if (ntohs(eth->ether_type) != ETHERTYPE_ARP) {
607 return 0;
610 if (ntohs(arp->arp_hrd) != ARPHRD_ETHER ||
611 ntohs(arp->arp_pro) != 0x800 ||
612 arp->arp_hln != ETH_ALEN ||
613 arp->arp_pln != 4 ||
614 ntohs(arp->arp_op) != ARPOP_REQUEST ||
615 !memcmp(arp->arp_spa, arp->arp_tpa, 4) ||
616 memcmp(eth->ether_shost, s->tun_hwaddr, ETH_ALEN) ||
617 !memcmp(arp->arp_tpa, &s->our_address, 4)) {
618 /* whatever .. just drop it */
619 return 1;
622 /* send arp reply */
624 memcpy(eth->ether_dhost, s->tun_hwaddr, ETH_ALEN);
625 eth->ether_shost[0] ^= 0x80; /* Use a different MAC as source */
627 memcpy(tmp, arp->arp_spa, 4);
628 memcpy(arp->arp_spa, arp->arp_tpa, 4);
629 memcpy(arp->arp_tpa, tmp, 4);
631 memcpy(arp->arp_tha, s->tun_hwaddr, ETH_ALEN);
632 arp->arp_sha[0] ^= 0x80; /* Use a different MAC as source */
634 arp->arp_op = htons(ARPOP_REPLY);
636 frame_size = ETH_HLEN + sizeof(struct ether_arp);
637 tun_write(s->tun_fd, frame, frame_size);
638 hex_dump("ARP reply", frame, frame_size, NULL);
640 return 1;
641 #else
642 s = 0;
643 frame = 0;
644 return 0;
645 #endif
649 * Process non-IP packets
650 * Return 1 if packet has been processed, 0 otherwise
652 static int process_non_ip(uint8_t *frame)
654 struct ether_header *eth = (struct ether_header *) frame;
656 if (ntohs(eth->ether_type) != ETHERTYPE_IP) {
657 /* drop non-ip traffic */
658 return 1;
661 return 0;
664 static void process_tun(struct sa_block *s)
666 int pack;
667 int size = MAX_PACKET;
668 uint8_t *start = global_buffer_rx + MAX_HEADER;
670 if (opt_if_mode == IF_MODE_TAP) {
671 /* Make sure IP packet starts at buf + MAX_HEADER */
672 start -= ETH_HLEN;
673 size += ETH_HLEN;
676 /* Receive a packet from the tunnel interface */
677 pack = tun_read(s->tun_fd, start, size);
679 hex_dump("Rx pkt", start, pack, NULL);
681 if (opt_if_mode == IF_MODE_TAP) {
682 if (process_arp(s, start)) {
683 return;
685 if (process_non_ip(start)) {
686 return;
688 pack -= ETH_HLEN;
691 if (pack == -1) {
692 logmsg(LOG_ERR, "read: %m");
693 return;
696 /* Don't access the contents of the buffer other than byte aligned.
697 * 12: Offset of ip source address in ip header,
698 * 4: Length of IP address */
699 if (!memcmp(global_buffer_rx + MAX_HEADER + 12, &s->dst.s_addr, 4)) {
700 logmsg(LOG_ALERT, "routing loop to %s",
701 inet_ntoa(s->dst));
702 return;
705 /* Encapsulate and send to the other end of the tunnel */
706 s->ipsec.life.tx += pack;
707 s->ipsec.em->send_peer(s, global_buffer_rx, pack);
710 static void process_socket(struct sa_block *s)
712 /* Receive a packet from a socket */
713 int pack;
714 uint8_t *start = global_buffer_tx;
715 esp_encap_header_t *eh;
717 if (opt_if_mode == IF_MODE_TAP) {
718 start += ETH_HLEN;
721 pack = s->ipsec.em->recv(s, start, MAX_HEADER + MAX_PACKET);
722 if (pack == -1)
723 return;
725 eh = (esp_encap_header_t *) (s->ipsec.rx.buf + s->ipsec.rx.bufpayload);
726 if (eh->spi == 0) {
727 process_late_ike(s, s->ipsec.rx.buf + s->ipsec.rx.bufpayload + 4 /* SPI-size */,
728 s->ipsec.rx.buflen - s->ipsec.rx.bufpayload - 4);
729 return;
730 } else if (eh->spi != s->ipsec.rx.spi) {
731 logmsg(LOG_NOTICE, "unknown spi %#08x from peer", ntohl(eh->spi));
732 return;
733 } else if (ntohl(eh->spi) < 256) {
734 syslog(LOG_NOTICE, "illegal spi %d from peer - continuing", ntohl(eh->spi));
737 /* Check auth digest and/or decrypt */
738 if (s->ipsec.em->recv_peer(s) != 0)
739 return;
741 if (encap_any_decap(s) == 0) {
742 logmsg(LOG_DEBUG, "received update probe from peer");
743 } else {
744 /* Send the decapsulated packet to the tunnel interface */
745 s->ipsec.life.rx += s->ipsec.rx.buflen;
746 tun_send_ip(s);
750 #if defined(__CYGWIN__)
751 static void *tun_thread (void *arg)
753 struct sa_block *s = (struct sa_block *) arg;
755 while (!do_kill) {
756 process_tun(s);
758 return NULL;
760 #endif
762 static void vpnc_main_loop(struct sa_block *s)
764 fd_set rfds, refds;
765 int nfds=0;
766 int enable_keepalives;
767 int timed_mode;
768 ssize_t len;
769 struct timeval select_timeout;
770 struct timeval normal_timeout;
771 time_t next_ike_keepalive=0;
772 time_t next_ike_dpd=0;
773 #if defined(__CYGWIN__)
774 pthread_t tid;
775 #endif
777 /* non-esp marker, nat keepalive payload (0xFF) */
778 uint8_t keepalive_v2[5] = { 0x00, 0x00, 0x00, 0x00, 0xFF };
779 uint8_t keepalive_v1[1] = { 0xFF };
780 uint8_t *keepalive;
781 size_t keepalive_size;
783 if (s->ipsec.natt_active_mode == NATT_ACTIVE_DRAFT_OLD) {
784 keepalive = keepalive_v1;
785 keepalive_size = sizeof(keepalive_v1);
786 } else { /* active_mode is either RFC or CISCO_UDP */
787 keepalive = keepalive_v2;
788 keepalive_size = sizeof(keepalive_v2);
791 /* send keepalives if UDP encapsulation is enabled */
792 enable_keepalives = (s->ipsec.encap_mode != IPSEC_ENCAP_TUNNEL);
794 /* regular wakeups if keepalives on ike or dpd active */
795 timed_mode = ((enable_keepalives && s->ike_fd != s->esp_fd) || s->ike.do_dpd);
797 FD_ZERO(&rfds);
799 #if !defined(__CYGWIN__)
800 FD_SET(s->tun_fd, &rfds);
801 nfds = MAX(nfds, s->tun_fd +1);
802 #endif
804 FD_SET(s->esp_fd, &rfds);
805 nfds = MAX(nfds, s->esp_fd +1);
807 if (s->ike_fd != s->esp_fd) {
808 FD_SET(s->ike_fd, &rfds);
809 nfds = MAX(nfds, s->ike_fd +1);
812 #if defined(__CYGWIN__)
813 if (pthread_create(&tid, NULL, tun_thread, s)) {
814 logmsg(LOG_ERR, "Cannot create tun thread!\n");
815 return;
817 #endif
819 normal_timeout.tv_sec = 86400;
820 normal_timeout.tv_usec = 0;
822 if (s->ike.do_dpd) {
823 /* send initial dpd request */
824 next_ike_dpd = time(NULL) + s->ike.dpd_idle;
825 dpd_ike(s);
826 normal_timeout.tv_sec = s->ike.dpd_idle;
827 normal_timeout.tv_usec = 0;
830 if (enable_keepalives) {
831 normal_timeout.tv_sec = 9;
832 normal_timeout.tv_usec = 500000;
834 if (s->ike_fd != s->esp_fd) {
835 /* send initial nat ike keepalive packet */
836 next_ike_keepalive = time(NULL) + 9;
837 keepalive_ike(s);
841 select_timeout = normal_timeout;
843 while (!do_kill) {
844 int presult;
846 do {
847 struct timeval *tvp = NULL;
848 FD_COPY(&rfds, &refds);
849 if (s->ike.do_dpd || enable_keepalives)
850 tvp = &select_timeout;
851 presult = select(nfds, &refds, NULL, NULL, tvp);
852 if (presult == 0 && (s->ike.do_dpd || enable_keepalives)) {
853 /* reset to max timeout */
854 select_timeout = normal_timeout;
855 if (enable_keepalives) {
856 if (s->ike_fd != s->esp_fd) {
857 /* send nat ike keepalive packet */
858 next_ike_keepalive = time(NULL) + 9;
859 keepalive_ike(s);
861 /* send nat keepalive packet */
862 if (send(s->esp_fd, keepalive, keepalive_size, 0) == -1) {
863 logmsg(LOG_ERR, "keepalive sendto: %m");
866 if (s->ike.do_dpd) {
867 time_t now = time(NULL);
868 if (s->ike.dpd_seqno != s->ike.dpd_seqno_ack) {
869 /* Wake up more often for dpd attempts */
870 select_timeout.tv_sec = 5;
871 select_timeout.tv_usec = 0;
872 dpd_ike(s);
873 next_ike_dpd = now + s->ike.dpd_idle;
875 else if (now >= next_ike_dpd) {
876 dpd_ike(s);
877 next_ike_dpd = now + s->ike.dpd_idle;
881 DEBUG(2,printf("lifetime status: %ld of %u seconds used, %u|%u of %u kbytes used\n",
882 time(NULL) - s->ipsec.life.start,
883 s->ipsec.life.seconds,
884 s->ipsec.life.rx/1024,
885 s->ipsec.life.tx/1024,
886 s->ipsec.life.kbytes));
887 } while ((presult == 0 || (presult == -1 && errno == EINTR)) && !do_kill);
888 if (presult == -1) {
889 logmsg(LOG_ERR, "select: %m");
890 continue;
893 #if !defined(__CYGWIN__)
894 if (FD_ISSET(s->tun_fd, &refds)) {
895 process_tun(s);
897 #endif
899 if (FD_ISSET(s->esp_fd, &refds) ) {
900 process_socket(s);
903 if (s->ike_fd != s->esp_fd && FD_ISSET(s->ike_fd, &refds) ) {
904 DEBUG(3,printf("received something on ike fd..\n"));
905 len = recv(s->ike_fd, global_buffer_tx, MAX_HEADER + MAX_PACKET, 0);
906 process_late_ike(s, global_buffer_tx, len);
909 if (timed_mode) {
910 time_t now = time(NULL);
911 time_t next_up = now + 86400;
912 if (enable_keepalives) {
913 /* never wait more than 9 seconds for a UDP keepalive */
914 next_up = now + 9;
915 if (s->ike_fd != s->esp_fd) {
916 if (now >= next_ike_keepalive) {
917 /* send nat ike keepalive packet now */
918 next_ike_keepalive = now + 9;
919 keepalive_ike(s);
920 select_timeout = normal_timeout;
922 if (next_ike_keepalive < next_up)
923 next_up = next_ike_keepalive;
926 if (s->ike.do_dpd) {
927 if (s->ike.dpd_seqno != s->ike.dpd_seqno_ack) {
928 dpd_ike(s);
929 next_ike_dpd = now + s->ike.dpd_idle;
930 if (now + 5 < next_up)
931 next_up = now + 5;
933 else if (now >= next_ike_dpd) {
934 dpd_ike(s);
935 next_ike_dpd = now + s->ike.dpd_idle;
937 if (next_ike_dpd < next_up)
938 next_up = next_ike_dpd;
940 /* Reduce timeout so next activity happens on schedule */
941 select_timeout.tv_sec = next_up - now;
942 select_timeout.tv_usec = 0;
947 switch (do_kill) {
948 case -2:
949 logmsg(LOG_NOTICE, "connection terminated by dead peer detection");
950 break;
951 case -1:
952 logmsg(LOG_NOTICE, "connection terminated by peer");
953 break;
954 default:
955 logmsg(LOG_NOTICE, "terminated by signal: %d", do_kill);
956 break;
960 static void killit(int signum)
962 do_kill = signum;
965 static void write_pidfile(const char *pidfile)
967 FILE *pf;
969 if (pidfile == NULL || pidfile[0] == '\0')
970 return;
972 pf = fopen(pidfile, "w");
973 if (pf == NULL) {
974 logmsg(LOG_WARNING, "can't open pidfile %s for writing", pidfile);
975 return;
978 fprintf(pf, "%d\n", (int)getpid());
979 fclose(pf);
982 void vpnc_doit(struct sa_block *s)
984 struct sigaction act;
985 struct encap_method meth;
987 const char *pidfile = config[CONFIG_PID_FILE];
989 switch (s->ipsec.encap_mode) {
990 case IPSEC_ENCAP_TUNNEL:
991 encap_esp_new(&meth);
992 gcry_create_nonce(&s->ipsec.ip_id, sizeof(uint16_t));
993 break;
994 case IPSEC_ENCAP_UDP_TUNNEL:
995 case IPSEC_ENCAP_UDP_TUNNEL_OLD:
996 encap_udp_new(&meth);
997 break;
998 default:
999 abort();
1001 s->ipsec.em = &meth;
1003 s->ipsec.rx.key_cry = s->ipsec.rx.key;
1004 hex_dump("rx.key_cry", s->ipsec.rx.key_cry, s->ipsec.key_len, NULL);
1006 s->ipsec.rx.key_md = s->ipsec.rx.key + s->ipsec.key_len;
1007 hex_dump("rx.key_md", s->ipsec.rx.key_md, s->ipsec.md_len, NULL);
1009 if (s->ipsec.cry_algo) {
1010 gcry_cipher_open(&s->ipsec.rx.cry_ctx, s->ipsec.cry_algo, GCRY_CIPHER_MODE_CBC, 0);
1011 gcry_cipher_setkey(s->ipsec.rx.cry_ctx, s->ipsec.rx.key_cry, s->ipsec.key_len);
1012 } else {
1013 s->ipsec.rx.cry_ctx = NULL;
1016 s->ipsec.tx.key_cry = s->ipsec.tx.key;
1017 hex_dump("tx.key_cry", s->ipsec.tx.key_cry, s->ipsec.key_len, NULL);
1019 s->ipsec.tx.key_md = s->ipsec.tx.key + s->ipsec.key_len;
1020 hex_dump("tx.key_md", s->ipsec.tx.key_md, s->ipsec.md_len, NULL);
1022 if (s->ipsec.cry_algo) {
1023 gcry_cipher_open(&s->ipsec.tx.cry_ctx, s->ipsec.cry_algo, GCRY_CIPHER_MODE_CBC, 0);
1024 gcry_cipher_setkey(s->ipsec.tx.cry_ctx, s->ipsec.tx.key_cry, s->ipsec.key_len);
1025 } else {
1026 s->ipsec.tx.cry_ctx = NULL;
1029 DEBUG(2, printf("remote -> local spi: %#08x\n", ntohl(s->ipsec.rx.spi)));
1030 DEBUG(2, printf("local -> remote spi: %#08x\n", ntohl(s->ipsec.tx.spi)));
1032 do_kill = 0;
1034 sigaction(SIGHUP, NULL, &act);
1035 if (act.sa_handler == SIG_DFL)
1036 signal(SIGHUP, killit);
1038 signal(SIGINT, killit);
1039 signal(SIGTERM, killit);
1041 chdir("/");
1043 if (!opt_nd) {
1044 pid_t pid;
1045 if ((pid = fork()) < 0) {
1046 fprintf(stderr, "Warning, could not fork the child process!\n");
1047 } else if (pid == 0) {
1048 close(0); open("/dev/null", O_RDONLY, 0666);
1049 close(1); open("/dev/null", O_WRONLY, 0666);
1050 close(2); open("/dev/null", O_WRONLY, 0666);
1051 setsid();
1052 } else {
1053 printf("VPNC started in background (pid: %d)...\n", (int)pid);
1055 * Use _exit(), since exit() will call the handler
1056 * registered with atexit() that will remove the
1057 * route path to concentrator.
1059 _exit(0);
1061 openlog("vpnc", LOG_PID | LOG_PERROR, LOG_DAEMON);
1062 logmsg = syslog;
1063 } else {
1064 printf("VPNC started in foreground...\n");
1066 write_pidfile(pidfile);
1068 vpnc_main_loop(s);
1070 if (pidfile)
1071 unlink(pidfile); /* ignore errors */