1 /* IPSec ESP and AH support.
2 Copyright (c) 1999 Pierre Beyssac
3 Copyright (C) 2002 Geoffrey Keating
4 Copyright (C) 2003-2007 Maurice Massar
5 Copyright (C) 2004 Tomas Mraz
6 Copyright (C) 2005 Michael Tilstra
7 Copyright (C) 2006 Daniel Roethlisberger
8 Copyright (C) 2007 Paolo Zarpellon (tap+Cygwin support)
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 /* borrowed from pipsecd (-; */
30 * Copyright (c) 1999 Pierre Beyssac
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <sys/types.h>
57 #include <sys/socket.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
67 #include <netinet/ip_icmp.h>
69 #include <arpa/inet.h>
75 #include <sys/select.h>
82 #if !defined(__sun__) && !defined(__SKYOS__)
94 #define MAX(a,b) ((a)>(b)?(a):(b))
98 #define FD_COPY(f, t) ((void)memcpy((t), (f), sizeof(*(f))))
101 /* A real ESP header (RFC 2406) */
102 typedef struct esp_encap_header
{
103 uint32_t spi
; /* security parameters index */
104 uint32_t seq_id
; /* sequence id (unimplemented) */
105 /* variable-length payload data + padding */
106 /* unsigned char next_header */
107 /* optional auth data */
108 } __attribute__((packed
)) esp_encap_header_t
;
110 struct encap_method
{
111 int fixed_header_size
;
113 int (*recv
) (struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
);
114 void (*send_peer
) (struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
);
115 int (*recv_peer
) (struct sa_block
*s
);
118 /* Yuck! Global variables... */
120 #define MAX_HEADER 72
121 #define MAX_PACKET 4096
122 int volatile do_kill
;
123 static uint8_t global_buffer_rx
[MAX_HEADER
+ MAX_PACKET
+ ETH_HLEN
];
124 static uint8_t global_buffer_tx
[MAX_HEADER
+ MAX_PACKET
+ ETH_HLEN
];
128 * Checksum routine for Internet Protocol family headers (C Version)
130 static u_short
in_cksum(u_short
*addr
, int len
)
132 register int nleft
= len
;
133 register u_short
*w
= addr
;
134 register int sum
= 0;
138 * Our algorithm is simple, using a 32 bit accumulator (sum), we add
139 * sequential 16 bit words to it, and at the end, fold back all the
140 * carry bits from the top 16 bits into the lower 16 bits.
147 /* mop up an odd byte, if necessary */
149 *(u_char
*) (&answer
) = *(u_char
*) w
;
153 /* add back carry outs from top 16 bits to low 16 bits */
154 sum
= (sum
>> 16) + (sum
& 0xffff); /* add hi 16 to low 16 */
155 sum
+= (sum
>> 16); /* add carry */
156 answer
= ~sum
; /* truncate to 16 bits */
161 * Decapsulate from a raw IP packet
163 static int encap_rawip_recv(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
166 struct ip
*p
= (struct ip
*)buf
;
167 struct sockaddr_in from
;
168 socklen_t fromlen
= sizeof(from
);
170 r
= recvfrom(s
->esp_fd
, buf
, bufsize
, 0, (struct sockaddr
*)&from
, &fromlen
);
172 syslog(LOG_ERR
, "recvfrom: %m");
175 if (from
.sin_addr
.s_addr
!= s
->dst
.s_addr
) {
176 syslog(LOG_ALERT
, "packet from unknown host %s", inet_ntoa(from
.sin_addr
));
179 if (r
< (p
->ip_hl
<< 2) + s
->ipsec
.em
->fixed_header_size
) {
180 syslog(LOG_ALERT
, "packet too short. got %zd, expected %d", r
, (p
->ip_hl
<< 2) + s
->ipsec
.em
->fixed_header_size
);
184 #ifdef NEED_IPLEN_FIX
187 p
->ip_len
= ntohs(r
);
190 s
->ipsec
.rx
.buf
= buf
;
191 s
->ipsec
.rx
.buflen
= r
;
192 s
->ipsec
.rx
.bufpayload
= (p
->ip_hl
<< 2);
193 s
->ipsec
.rx
.bufsize
= bufsize
;
198 * Decapsulate from an UDP packet
200 static int encap_udp_recv(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
204 r
= recv(s
->esp_fd
, buf
, bufsize
, 0);
206 syslog(LOG_ERR
, "recvfrom: %m");
209 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
&& r
> 8) {
211 memmove(buf
, buf
+ 8, r
);
213 if( r
== 1 && *buf
== 0xff )
215 DEBUGTOP(1, printf("UDP NAT keepalive packet received\n"));
218 if (r
< s
->ipsec
.em
->fixed_header_size
) {
219 syslog(LOG_ALERT
, "packet too short from %s. got %zd, expected %d",
220 inet_ntoa(s
->dst
), r
, s
->ipsec
.em
->fixed_header_size
);
224 s
->ipsec
.rx
.buf
= buf
;
225 s
->ipsec
.rx
.buflen
= r
;
226 s
->ipsec
.rx
.bufpayload
= 0;
227 s
->ipsec
.rx
.bufsize
= bufsize
;
234 static int encap_any_decap(struct sa_block
*s
)
236 s
->ipsec
.rx
.buflen
-= s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
;
237 s
->ipsec
.rx
.buf
+= s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
;
238 if (s
->ipsec
.rx
.buflen
== 0)
244 * Send decapsulated packet to tunnel device
246 static int tun_send_ip(struct sa_block
*s
)
251 start
= s
->ipsec
.rx
.buf
;
252 len
= s
->ipsec
.rx
.buflen
;
254 if (opt_if_mode
== IF_MODE_TAP
) {
257 * Add ethernet header before s->ipsec.rx.buf where
258 * at least ETH_HLEN bytes should be available.
260 struct ether_header
*eth_hdr
= (struct ether_header
*) (s
->ipsec
.rx
.buf
- ETH_HLEN
);
262 memcpy(eth_hdr
->ether_dhost
, s
->tun_hwaddr
, ETH_ALEN
);
263 memcpy(eth_hdr
->ether_shost
, s
->tun_hwaddr
, ETH_ALEN
);
265 /* Use a different MAC as source */
266 eth_hdr
->ether_shost
[0] ^= 0x80; /* toggle some visible bit */
267 eth_hdr
->ether_type
= htons(ETHERTYPE_IP
);
269 start
= (uint8_t *) eth_hdr
;
274 sent
= tun_write(s
->tun_fd
, start
, len
);
276 syslog(LOG_ERR
, "truncated in: %d -> %d\n", len
, sent
);
277 hex_dump("Tx pkt", start
, len
, NULL
);
282 * Compute HMAC for an arbitrary stream of bytes
284 static int hmac_compute(int md_algo
,
285 const unsigned char *data
, unsigned int data_size
,
286 unsigned char *digest
, unsigned char do_store
,
287 const unsigned char *secret
, unsigned short secret_size
)
291 unsigned char *hmac_digest
;
292 unsigned int hmac_len
;
295 gcry_md_open(&md_ctx
, md_algo
, GCRY_MD_FLAG_HMAC
);
296 assert(md_ctx
!= NULL
);
297 ret
= gcry_md_setkey(md_ctx
, secret
, secret_size
);
299 gcry_md_write(md_ctx
, data
, data_size
);
300 gcry_md_final(md_ctx
);
301 hmac_digest
= gcry_md_read(md_ctx
, 0);
302 hmac_len
= 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
305 memcpy(digest
, hmac_digest
, hmac_len
);
308 ret
= memcmp(digest
, hmac_digest
, hmac_len
);
310 gcry_md_close(md_ctx
);
315 * Encapsulate a packet in ESP
317 static void encap_esp_encapsulate(struct sa_block
*s
)
319 esp_encap_header_t
*eh
;
320 unsigned char *iv
, *cleartext
;
321 size_t i
, padding
, pad_blksz
;
322 unsigned int cleartextlen
;
325 * Add padding as necessary
327 * done: this should be checked, RFC 2406 section 2.4 is quite
328 * obscure on that point.
331 pad_blksz
= s
->ipsec
.blk_len
;
332 while (pad_blksz
& 3) /* must be multiple of 4 */
334 padding
= pad_blksz
- ((s
->ipsec
.tx
.buflen
+ 2 - s
->ipsec
.tx
.var_header_size
- s
->ipsec
.tx
.bufpayload
) % pad_blksz
);
335 DEBUG(3, printf("sending packet: len = %d, padding = %lu\n", s
->ipsec
.tx
.buflen
, (unsigned long)padding
));
336 if (padding
== pad_blksz
)
339 for (i
= 1; i
<= padding
; i
++) {
340 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
] = i
;
341 s
->ipsec
.tx
.buflen
++;
344 /* Add trailing padlen and next_header */
345 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
++] = padding
;
346 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
++] = IPPROTO_IPIP
;
348 cleartext
= s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.var_header_size
+ s
->ipsec
.tx
.bufpayload
;
349 cleartextlen
= s
->ipsec
.tx
.buflen
- s
->ipsec
.tx
.var_header_size
- s
->ipsec
.tx
.bufpayload
;
351 eh
= (esp_encap_header_t
*) (s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
);
352 eh
->spi
= s
->ipsec
.tx
.spi
;
353 eh
->seq_id
= htonl(s
->ipsec
.tx
.seq_id
++);
355 /* Copy initialization vector in packet */
356 iv
= (unsigned char *)(eh
+ 1);
357 gcry_create_nonce(iv
, s
->ipsec
.iv_len
);
358 hex_dump("iv", iv
, s
->ipsec
.iv_len
, NULL
);
360 hex_dump("sending ESP packet (before crypt)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
362 if (s
->ipsec
.cry_algo
) {
363 gcry_cipher_setiv(s
->ipsec
.tx
.cry_ctx
, iv
, s
->ipsec
.iv_len
);
364 gcry_cipher_encrypt(s
->ipsec
.tx
.cry_ctx
, cleartext
, cleartextlen
, NULL
, 0);
367 hex_dump("sending ESP packet (after crypt)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
369 /* Handle optional authentication field */
370 if (s
->ipsec
.md_algo
) {
371 hmac_compute(s
->ipsec
.md_algo
,
372 s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
,
373 s
->ipsec
.tx
.var_header_size
+ cleartextlen
,
374 s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
375 + s
->ipsec
.tx
.var_header_size
+ cleartextlen
,
376 1, s
->ipsec
.tx
.key_md
, s
->ipsec
.md_len
);
377 s
->ipsec
.tx
.buflen
+= 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
378 hex_dump("sending ESP packet (after ah)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
383 * Encapsulate a packet in IP ESP and send to the peer.
384 * "buf" should have exactly MAX_HEADER free bytes at its beginning
385 * to account for encapsulation data (not counted in "size").
387 static void encap_esp_send_peer(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
391 struct sockaddr_in dstaddr
;
395 /* Keep a pointer to the old IP header */
396 tip
= (struct ip
*)buf
;
398 s
->ipsec
.tx
.buf
= buf
;
399 s
->ipsec
.tx
.buflen
= bufsize
;
401 /* Prepend our encapsulation header and new IP header */
402 s
->ipsec
.tx
.var_header_size
= (s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.iv_len
);
404 s
->ipsec
.tx
.buf
-= sizeof(struct ip
) + s
->ipsec
.tx
.var_header_size
;
405 s
->ipsec
.tx
.buflen
+= sizeof(struct ip
) + s
->ipsec
.tx
.var_header_size
;
407 s
->ipsec
.tx
.bufpayload
= sizeof(struct ip
);
409 /* Fill non-mutable fields */
412 /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
413 ip
.ip_id
= htons(s
->ipsec
.ip_id
++);
414 ip
.ip_p
= IPPROTO_ESP
;
418 /* Fill mutable fields */
419 ip
.ip_tos
= (bufsize
< sizeof(struct ip
)) ? 0 : tip
->ip_tos
;
421 ip
.ip_ttl
= IPDEFTTL
;
424 encap_esp_encapsulate(s
);
426 ip
.ip_len
= s
->ipsec
.tx
.buflen
;
427 #ifdef NEED_IPLEN_FIX
428 ip
.ip_len
= htons(ip
.ip_len
);
430 ip
.ip_sum
= in_cksum((u_short
*) s
->ipsec
.tx
.buf
, sizeof(struct ip
));
432 memcpy(s
->ipsec
.tx
.buf
, &ip
, sizeof ip
);
434 dstaddr
.sin_family
= AF_INET
;
435 dstaddr
.sin_addr
= s
->dst
;
436 dstaddr
.sin_port
= 0;
437 sent
= sendto(s
->esp_fd
, s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, 0, (struct sockaddr
*)&dstaddr
, sizeof(struct sockaddr_in
));
439 syslog(LOG_ERR
, "esp sendto: %m");
442 if (sent
!= s
->ipsec
.tx
.buflen
)
443 syslog(LOG_ALERT
, "esp truncated out (%lld out of %d)", (long long)sent
, s
->ipsec
.tx
.buflen
);
447 * Encapsulate a packet in UDP ESP and send to the peer.
448 * "buf" should have exactly MAX_HEADER free bytes at its beginning
449 * to account for encapsulation data (not counted in "size").
451 static void encap_udp_send_peer(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
457 s
->ipsec
.tx
.buf
= buf
;
458 s
->ipsec
.tx
.buflen
= bufsize
;
460 /* Prepend our encapsulation header and new IP header */
461 s
->ipsec
.tx
.var_header_size
= (s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.iv_len
);
463 s
->ipsec
.tx
.buf
-= s
->ipsec
.tx
.var_header_size
;
464 s
->ipsec
.tx
.buflen
+= s
->ipsec
.tx
.var_header_size
;
466 s
->ipsec
.tx
.bufpayload
= 0;
468 encap_esp_encapsulate(s
);
470 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
) {
471 s
->ipsec
.tx
.buf
-= 8;
472 s
->ipsec
.tx
.buflen
+= 8;
473 memset(s
->ipsec
.tx
.buf
, 0, 8);
476 sent
= send(s
->esp_fd
, s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, 0);
478 syslog(LOG_ERR
, "udp sendto: %m");
481 if (sent
!= s
->ipsec
.tx
.buflen
)
482 syslog(LOG_ALERT
, "udp truncated out (%lld out of %d)",
483 (long long)sent
, s
->ipsec
.tx
.buflen
);
486 static int encap_esp_recv_peer(struct sa_block
*s
)
490 unsigned char padlen
, next_header
;
493 struct esp_encap_header
*eh
;
495 eh
= (struct esp_encap_header
*)(s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
);
496 s
->ipsec
.rx
.var_header_size
= s
->ipsec
.iv_len
;
497 iv
= s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
;
499 len
= s
->ipsec
.rx
.buflen
- s
->ipsec
.rx
.bufpayload
- s
->ipsec
.em
->fixed_header_size
- s
->ipsec
.rx
.var_header_size
;
502 syslog(LOG_ALERT
, "Packet too short");
506 /* Handle optional authentication field */
507 if (s
->ipsec
.md_algo
) {
508 len
-= 12; /*gcry_md_get_algo_dlen(peer->local_sa->md_algo); */
509 s
->ipsec
.rx
.buflen
-= 12;
510 if (hmac_compute(s
->ipsec
.md_algo
,
511 s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
,
512 s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
,
513 s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
514 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
,
517 s
->ipsec
.md_len
) != 0) {
518 syslog(LOG_ALERT
, "HMAC mismatch in ESP mode");
523 blksz
= s
->ipsec
.blk_len
;
524 if (s
->ipsec
.cry_algo
&& ((len
% blksz
) != 0)) {
526 "payload len %d not a multiple of algorithm block size %lu", len
,
527 (unsigned long)blksz
);
531 hex_dump("receiving ESP packet (before decrypt)",
532 &s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+
533 s
->ipsec
.rx
.var_header_size
], len
, NULL
);
535 if (s
->ipsec
.cry_algo
) {
538 data
= (s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
539 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
);
540 gcry_cipher_setiv(s
->ipsec
.rx
.cry_ctx
, iv
, s
->ipsec
.iv_len
);
541 gcry_cipher_decrypt(s
->ipsec
.rx
.cry_ctx
, data
, len
, NULL
, 0);
544 hex_dump("receiving ESP packet (after decrypt)",
545 &s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+
546 s
->ipsec
.rx
.var_header_size
], len
, NULL
);
548 padlen
= s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
549 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
- 2];
550 next_header
= s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
551 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
- 1];
553 if (padlen
+ 2 > len
) {
554 syslog(LOG_ALERT
, "Inconsistent padlen");
557 if (next_header
!= IPPROTO_IPIP
) {
558 syslog(LOG_ALERT
, "Inconsistent next_header %d", next_header
);
561 DEBUG(3, printf("pad len: %d, next_header: %d\n", padlen
, next_header
));
564 s
->ipsec
.rx
.buflen
-= padlen
+ 2;
567 pad
= s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
568 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
;
569 for (i
= 1; i
<= padlen
; i
++) {
571 syslog(LOG_ALERT
, "Bad padding");
580 static void encap_esp_new(struct encap_method
*encap
)
582 encap
->recv
= encap_rawip_recv
;
583 encap
->send_peer
= encap_esp_send_peer
;
584 encap
->recv_peer
= encap_esp_recv_peer
;
585 encap
->fixed_header_size
= sizeof(esp_encap_header_t
);
588 static void encap_udp_new(struct encap_method
*encap
)
590 encap
->recv
= encap_udp_recv
;
591 encap
->send_peer
= encap_udp_send_peer
;
592 encap
->recv_peer
= encap_esp_recv_peer
;
593 encap
->fixed_header_size
= sizeof(esp_encap_header_t
);
598 * Return 1 if packet has been processed, 0 otherwise
600 static int process_arp(struct sa_block
*s
, uint8_t *frame
)
605 struct ether_header
*eth
= (struct ether_header
*) frame
;
606 struct ether_arp
*arp
= (struct ether_arp
*) (frame
+ ETH_HLEN
);
608 if (ntohs(eth
->ether_type
) != ETHERTYPE_ARP
) {
612 if (ntohs(arp
->arp_hrd
) != ARPHRD_ETHER
||
613 ntohs(arp
->arp_pro
) != 0x800 ||
614 arp
->arp_hln
!= ETH_ALEN
||
616 ntohs(arp
->arp_op
) != ARPOP_REQUEST
||
617 !memcmp(arp
->arp_spa
, arp
->arp_tpa
, 4) ||
618 memcmp(eth
->ether_shost
, s
->tun_hwaddr
, ETH_ALEN
) ||
619 !memcmp(arp
->arp_tpa
, &s
->our_address
, 4)) {
620 /* whatever .. just drop it */
626 memcpy(eth
->ether_dhost
, s
->tun_hwaddr
, ETH_ALEN
);
627 eth
->ether_shost
[0] ^= 0x80; /* Use a different MAC as source */
629 memcpy(tmp
, arp
->arp_spa
, 4);
630 memcpy(arp
->arp_spa
, arp
->arp_tpa
, 4);
631 memcpy(arp
->arp_tpa
, tmp
, 4);
633 memcpy(arp
->arp_tha
, s
->tun_hwaddr
, ETH_ALEN
);
634 arp
->arp_sha
[0] ^= 0x80; /* Use a different MAC as source */
636 arp
->arp_op
= htons(ARPOP_REPLY
);
638 frame_size
= ETH_HLEN
+ sizeof(struct ether_arp
);
639 tun_write(s
->tun_fd
, frame
, frame_size
);
640 hex_dump("ARP reply", frame
, frame_size
, NULL
);
651 * Process non-IP packets
652 * Return 1 if packet has been processed, 0 otherwise
654 static int process_non_ip(struct sa_block
*s
, uint8_t *frame
)
656 struct ether_header
*eth
= (struct ether_header
*) frame
;
658 s
= NULL
; /* unused */
660 if (ntohs(eth
->ether_type
) != ETHERTYPE_IP
) {
661 /* drop non-ip traffic */
668 static void process_tun(struct sa_block
*s
)
671 int size
= MAX_PACKET
;
672 uint8_t *start
= global_buffer_rx
+ MAX_HEADER
;
674 if (opt_if_mode
== IF_MODE_TAP
) {
675 /* Make sure IP packet starts at buf + MAX_HEADER */
680 /* Receive a packet from the tunnel interface */
681 pack
= tun_read(s
->tun_fd
, start
, size
);
683 hex_dump("Rx pkt", start
, pack
, NULL
);
685 if (opt_if_mode
== IF_MODE_TAP
) {
686 if (process_arp(s
, start
)) {
689 if (process_non_ip(s
, start
)) {
696 syslog(LOG_ERR
, "read: %m");
700 /* Don't access the contents of the buffer other than byte aligned.
701 * 12: Offset of ip source address in ip header,
702 * 4: Length of IP address */
703 if (!memcmp(global_buffer_rx
+ MAX_HEADER
+ 12, &s
->dst
.s_addr
, 4)) {
704 syslog(LOG_ALERT
, "routing loop to %s",
709 /* Encapsulate and send to the other end of the tunnel */
710 s
->ipsec
.life
.tx
+= pack
;
711 s
->ipsec
.em
->send_peer(s
, global_buffer_rx
, pack
);
714 static void process_socket(struct sa_block
*s
)
716 /* Receive a packet from a socket */
718 uint8_t *start
= global_buffer_tx
;
719 esp_encap_header_t
*eh
;
721 if (opt_if_mode
== IF_MODE_TAP
) {
725 pack
= s
->ipsec
.em
->recv(s
, start
, MAX_HEADER
+ MAX_PACKET
);
729 eh
= (esp_encap_header_t
*) (s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
);
731 process_late_ike(s
, s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
+ 4 /* SPI-size */,
732 s
->ipsec
.rx
.buflen
- s
->ipsec
.rx
.bufpayload
- 4);
734 } else if (eh
->spi
!= s
->ipsec
.rx
.spi
) {
735 syslog(LOG_NOTICE
, "unknown spi %#08x from peer", ntohl(eh
->spi
));
739 /* Check auth digest and/or decrypt */
740 if (s
->ipsec
.em
->recv_peer(s
) != 0)
743 if (encap_any_decap(s
) == 0) {
744 syslog(LOG_DEBUG
, "received update probe from peer");
746 /* Send the decapsulated packet to the tunnel interface */
747 s
->ipsec
.life
.rx
+= s
->ipsec
.rx
.buflen
;
752 #if defined(__CYGWIN__)
753 static void *tun_thread (void *arg
)
755 struct sa_block
*s
= (struct sa_block
*) arg
;
764 static void vpnc_main_loop(struct sa_block
*s
)
768 int enable_keepalives
;
771 struct timeval select_timeout
;
772 struct timeval normal_timeout
;
773 time_t next_ike_keepalive
=0;
774 time_t next_ike_dpd
=0;
775 #if defined(__CYGWIN__)
779 /* non-esp marker, nat keepalive payload (0xFF) */
780 uint8_t keepalive_v2
[5] = { 0x00, 0x00, 0x00, 0x00, 0xFF };
781 uint8_t keepalive_v1
[1] = { 0xFF };
783 size_t keepalive_size
;
785 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
) {
786 keepalive
= keepalive_v1
;
787 keepalive_size
= sizeof(keepalive_v1
);
788 } else { /* active_mode is either RFC or CISCO_UDP */
789 keepalive
= keepalive_v2
;
790 keepalive_size
= sizeof(keepalive_v2
);
793 /* send keepalives if UDP encapsulation is enabled */
794 enable_keepalives
= (s
->ipsec
.encap_mode
!= IPSEC_ENCAP_TUNNEL
);
796 /* regular wakeups if keepalives on ike or dpd active */
797 timed_mode
= ((enable_keepalives
&& s
->ike_fd
!= s
->esp_fd
) || s
->ike
.do_dpd
);
801 #if !defined(__CYGWIN__)
802 FD_SET(s
->tun_fd
, &rfds
);
803 nfds
= MAX(nfds
, s
->tun_fd
+1);
806 FD_SET(s
->esp_fd
, &rfds
);
807 nfds
= MAX(nfds
, s
->esp_fd
+1);
809 if (s
->ike_fd
!= s
->esp_fd
) {
810 FD_SET(s
->ike_fd
, &rfds
);
811 nfds
= MAX(nfds
, s
->ike_fd
+1);
814 #if defined(__CYGWIN__)
815 if (pthread_create(&tid
, NULL
, tun_thread
, s
)) {
816 syslog(LOG_ERR
, "Cannot create tun thread!\n");
821 normal_timeout
.tv_sec
= 86400;
822 normal_timeout
.tv_usec
= 0;
825 /* send initial dpd request */
826 next_ike_dpd
= time(NULL
) + s
->ike
.dpd_idle
;
828 normal_timeout
.tv_sec
= s
->ike
.dpd_idle
;
829 normal_timeout
.tv_usec
= 0;
832 if (enable_keepalives
) {
833 normal_timeout
.tv_sec
= 9;
834 normal_timeout
.tv_usec
= 500000;
836 if (s
->ike_fd
!= s
->esp_fd
) {
837 /* send initial nat ike keepalive packet */
838 next_ike_keepalive
= time(NULL
) + 9;
843 select_timeout
= normal_timeout
;
849 struct timeval
*tvp
= NULL
;
850 FD_COPY(&rfds
, &refds
);
851 if (s
->ike
.do_dpd
|| enable_keepalives
)
852 tvp
= &select_timeout
;
853 presult
= select(nfds
, &refds
, NULL
, NULL
, tvp
);
854 if (presult
== 0 && (s
->ike
.do_dpd
|| enable_keepalives
)) {
855 /* reset to max timeout */
856 select_timeout
= normal_timeout
;
857 if (enable_keepalives
) {
858 if (s
->ike_fd
!= s
->esp_fd
) {
859 /* send nat ike keepalive packet */
860 next_ike_keepalive
= time(NULL
) + 9;
863 /* send nat keepalive packet */
864 if (send(s
->esp_fd
, keepalive
, keepalive_size
, 0) == -1) {
865 syslog(LOG_ERR
, "keepalive sendto: %m");
869 time_t now
= time(NULL
);
870 if (s
->ike
.dpd_seqno
!= s
->ike
.dpd_seqno_ack
) {
871 /* Wake up more often for dpd attempts */
872 select_timeout
.tv_sec
= 5;
873 select_timeout
.tv_usec
= 0;
875 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
877 else if (now
>= next_ike_dpd
) {
879 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
883 DEBUG(2,printf("lifetime status: %ld of %u seconds used, %u|%u of %u kbytes used\n",
884 time(NULL
) - s
->ipsec
.life
.start
,
885 s
->ipsec
.life
.seconds
,
886 s
->ipsec
.life
.rx
/1024,
887 s
->ipsec
.life
.tx
/1024,
888 s
->ipsec
.life
.kbytes
));
889 } while ((presult
== 0 || (presult
== -1 && errno
== EINTR
)) && !do_kill
);
891 syslog(LOG_ERR
, "select: %m");
895 #if !defined(__CYGWIN__)
896 if (FD_ISSET(s
->tun_fd
, &refds
)) {
901 if (FD_ISSET(s
->esp_fd
, &refds
) ) {
905 if (s
->ike_fd
!= s
->esp_fd
&& FD_ISSET(s
->ike_fd
, &refds
) ) {
906 DEBUG(3,printf("received something on ike fd..\n"));
907 len
= recv(s
->ike_fd
, global_buffer_tx
, MAX_HEADER
+ MAX_PACKET
, 0);
908 process_late_ike(s
, global_buffer_tx
, len
);
912 time_t now
= time(NULL
);
913 time_t next_up
= now
+ 86400;
914 if (enable_keepalives
) {
915 /* never wait more than 9 seconds for a UDP keepalive */
917 if (s
->ike_fd
!= s
->esp_fd
) {
918 if (now
>= next_ike_keepalive
) {
919 /* send nat ike keepalive packet now */
920 next_ike_keepalive
= now
+ 9;
922 select_timeout
= normal_timeout
;
924 if (next_ike_keepalive
< next_up
)
925 next_up
= next_ike_keepalive
;
929 if (s
->ike
.dpd_seqno
!= s
->ike
.dpd_seqno_ack
) {
931 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
932 if (now
+ 5 < next_up
)
935 else if (now
>= next_ike_dpd
) {
937 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
939 if (next_ike_dpd
< next_up
)
940 next_up
= next_ike_dpd
;
942 /* Reduce timeout so next activity happens on schedule */
943 select_timeout
.tv_sec
= next_up
- now
;
944 select_timeout
.tv_usec
= 0;
951 syslog(LOG_NOTICE
, "connection terminated by dead peer detection");
954 syslog(LOG_NOTICE
, "connection terminated by peer");
957 syslog(LOG_NOTICE
, "terminated by signal: %d", do_kill
);
962 static void killit(int signum
)
967 static void write_pidfile(const char *pidfile
)
971 if (pidfile
== NULL
|| pidfile
[0] == '\0')
974 pf
= fopen(pidfile
, "w");
976 syslog(LOG_WARNING
, "can't open pidfile %s for writing", pidfile
);
980 fprintf(pf
, "%d\n", (int)getpid());
984 void vpnc_doit(struct sa_block
*s
)
986 struct sigaction act
;
987 struct encap_method meth
;
989 const char *pidfile
= config
[CONFIG_PID_FILE
];
991 switch (s
->ipsec
.encap_mode
) {
992 case IPSEC_ENCAP_TUNNEL
:
993 encap_esp_new(&meth
);
994 gcry_create_nonce(&s
->ipsec
.ip_id
, sizeof(uint16_t));
996 case IPSEC_ENCAP_UDP_TUNNEL
:
997 case IPSEC_ENCAP_UDP_TUNNEL_OLD
:
998 encap_udp_new(&meth
);
1003 s
->ipsec
.em
= &meth
;
1005 s
->ipsec
.rx
.key_cry
= s
->ipsec
.rx
.key
;
1006 hex_dump("rx.key_cry", s
->ipsec
.rx
.key_cry
, s
->ipsec
.key_len
, NULL
);
1008 s
->ipsec
.rx
.key_md
= s
->ipsec
.rx
.key
+ s
->ipsec
.key_len
;
1009 hex_dump("rx.key_md", s
->ipsec
.rx
.key_md
, s
->ipsec
.md_len
, NULL
);
1011 if (s
->ipsec
.cry_algo
) {
1012 gcry_cipher_open(&s
->ipsec
.rx
.cry_ctx
, s
->ipsec
.cry_algo
, GCRY_CIPHER_MODE_CBC
, 0);
1013 gcry_cipher_setkey(s
->ipsec
.rx
.cry_ctx
, s
->ipsec
.rx
.key_cry
, s
->ipsec
.key_len
);
1015 s
->ipsec
.rx
.cry_ctx
= NULL
;
1018 s
->ipsec
.tx
.key_cry
= s
->ipsec
.tx
.key
;
1019 hex_dump("tx.key_cry", s
->ipsec
.tx
.key_cry
, s
->ipsec
.key_len
, NULL
);
1021 s
->ipsec
.tx
.key_md
= s
->ipsec
.tx
.key
+ s
->ipsec
.key_len
;
1022 hex_dump("tx.key_md", s
->ipsec
.tx
.key_md
, s
->ipsec
.md_len
, NULL
);
1024 if (s
->ipsec
.cry_algo
) {
1025 gcry_cipher_open(&s
->ipsec
.tx
.cry_ctx
, s
->ipsec
.cry_algo
, GCRY_CIPHER_MODE_CBC
, 0);
1026 gcry_cipher_setkey(s
->ipsec
.tx
.cry_ctx
, s
->ipsec
.tx
.key_cry
, s
->ipsec
.key_len
);
1028 s
->ipsec
.tx
.cry_ctx
= NULL
;
1031 DEBUG(2, printf("remote -> local spi: %#08x\n", ntohl(s
->ipsec
.rx
.spi
)));
1032 DEBUG(2, printf("local -> remote spi: %#08x\n", ntohl(s
->ipsec
.tx
.spi
)));
1036 sigaction(SIGHUP
, NULL
, &act
);
1037 if (act
.sa_handler
== SIG_DFL
)
1038 signal(SIGHUP
, killit
);
1040 signal(SIGINT
, killit
);
1041 signal(SIGTERM
, killit
);
1047 if ((pid
= fork()) < 0) {
1048 fprintf(stderr
, "Warning, could not fork the child process!\n");
1049 } else if (pid
== 0) {
1050 close(0); open("/dev/null", O_RDONLY
, 0666);
1051 close(1); open("/dev/null", O_WRONLY
, 0666);
1052 close(2); open("/dev/null", O_WRONLY
, 0666);
1055 printf("VPNC started in background (pid: %d)...\n", (int)pid
);
1059 printf("VPNC started in foreground...\n");
1061 openlog("vpnc", LOG_PID
| LOG_PERROR
, LOG_DAEMON
);
1062 write_pidfile(pidfile
);
1067 unlink(pidfile
); /* ignore errors */