2 * OpenVPN -- An application to securely tunnel IP networks
3 * over a single TCP/UDP port, with support for SSL/TLS-based
4 * session authentication and key exchange,
5 * packet encryption, packet authentication, and
8 * Copyright (C) 2002-2005 OpenVPN Solutions LLC <info@openvpn.net>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program (see the file COPYING included with this
21 * distribution); if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include "config-win32.h"
36 #include "forward-inline.h"
44 #define TA_SOCKET_READ 1
45 #define TA_SOCKET_READ_RESIDUAL 2
46 #define TA_SOCKET_WRITE 3
47 #define TA_SOCKET_WRITE_READY 4
48 #define TA_SOCKET_WRITE_DEFERRED 5
50 #define TA_TUN_WRITE 7
53 #define TA_TUN_WRITE_TIMEOUT 10
56 * Special tags passed to event.[ch] functions
58 #define MTCP_SOCKET ((void*)1)
59 #define MTCP_TUN ((void*)2)
60 #define MTCP_SIG ((void*)3) /* Only on Windows */
61 #ifdef ENABLE_MANAGEMENT
62 # define MTCP_MANAGEMENT ((void*)4)
65 #define MTCP_N ((void*)16) /* upper bound on MTCP_x */
83 return "TA_SOCKET_READ";
84 case TA_SOCKET_READ_RESIDUAL
:
85 return "TA_SOCKET_READ_RESIDUAL";
87 return "TA_SOCKET_WRITE";
88 case TA_SOCKET_WRITE_READY
:
89 return "TA_SOCKET_WRITE_READY";
90 case TA_SOCKET_WRITE_DEFERRED
:
91 return "TA_SOCKET_WRITE_DEFERRED";
95 return "TA_TUN_WRITE";
100 case TA_TUN_WRITE_TIMEOUT
:
101 return "TA_TUN_WRITE_TIMEOUT";
107 static struct multi_instance
*
108 multi_create_instance_tcp (struct multi_context
*m
)
110 struct gc_arena gc
= gc_new ();
111 struct multi_instance
*mi
= NULL
;
112 struct hash
*hash
= m
->hash
;
114 mi
= multi_create_instance (m
, NULL
);
117 struct hash_element
*he
;
118 const uint32_t hv
= hash_value (hash
, &mi
->real
);
119 struct hash_bucket
*bucket
= hash_bucket (hash
, hv
);
121 hash_bucket_lock (bucket
);
122 he
= hash_lookup_fast (hash
, bucket
, &mi
->real
, hv
);
126 struct multi_instance
*oldmi
= (struct multi_instance
*) he
->value
;
127 msg (D_MULTI_LOW
, "MULTI TCP: new incoming client address matches existing client address -- new client takes precedence");
128 oldmi
->did_real_hash
= false;
129 multi_close_instance (m
, oldmi
, false);
134 hash_add_fast (hash
, bucket
, &mi
->real
, hv
, mi
);
136 mi
->did_real_hash
= true;
138 hash_bucket_unlock (bucket
);
143 dmsg (D_MULTI_DEBUG
, "MULTI TCP: instance added: %s", mroute_addr_print (&mi
->real
, &gc
));
145 dmsg (D_MULTI_DEBUG
, "MULTI TCP: new client instance failed");
149 ASSERT (!(mi
&& mi
->halt
));
154 multi_tcp_instance_specific_init (struct multi_context
*m
, struct multi_instance
*mi
)
156 /* buffer for queued TCP socket output packets */
157 mi
->tcp_link_out_deferred
= mbuf_init (m
->top
.options
.n_bcast_buf
);
159 ASSERT (mi
->context
.c2
.link_socket
);
160 ASSERT (mi
->context
.c2
.link_socket
->info
.lsa
);
161 ASSERT (mi
->context
.c2
.link_socket
->mode
== LS_MODE_TCP_ACCEPT_FROM
);
162 if (!mroute_extract_sockaddr_in (&mi
->real
, &mi
->context
.c2
.link_socket
->info
.lsa
->actual
, true))
164 msg (D_MULTI_ERRORS
, "MULTI TCP: TCP client address is undefined");
171 multi_tcp_instance_specific_free (struct multi_instance
*mi
)
173 mbuf_free (mi
->tcp_link_out_deferred
);
177 multi_tcp_init (int maxevents
, int *maxclients
)
179 struct multi_tcp
*mtcp
;
180 const int extra_events
= BASE_N_EVENTS
;
182 ASSERT (maxevents
>= 1);
185 ALLOC_OBJ_CLEAR (mtcp
, struct multi_tcp
);
186 mtcp
->maxevents
= maxevents
+ extra_events
;
187 mtcp
->es
= event_set_init (&mtcp
->maxevents
, 0);
188 wait_signal (mtcp
->es
, MTCP_SIG
);
189 ALLOC_ARRAY (mtcp
->esr
, struct event_set_return
, mtcp
->maxevents
);
190 *maxclients
= max_int (min_int (mtcp
->maxevents
- extra_events
, *maxclients
), 1);
191 msg (D_MULTI_LOW
, "MULTI: TCP INIT maxclients=%d maxevents=%d", *maxclients
, mtcp
->maxevents
);
196 multi_tcp_delete_event (struct multi_tcp
*mtcp
, event_t event
)
198 if (mtcp
&& mtcp
->es
)
199 event_del (mtcp
->es
, event
);
203 multi_tcp_free (struct multi_tcp
*mtcp
)
207 event_free (mtcp
->es
);
215 multi_tcp_dereference_instance (struct multi_tcp
*mtcp
, struct multi_instance
*mi
)
217 struct link_socket
*ls
= mi
->context
.c2
.link_socket
;
218 if (ls
&& mi
->socket_set_called
)
219 event_del (mtcp
->es
, socket_event_handle (ls
));
224 multi_tcp_set_global_rw_flags (struct multi_context
*m
, struct multi_instance
*mi
)
228 mi
->socket_set_called
= true;
229 socket_set (mi
->context
.c2
.link_socket
,
231 mbuf_defined (mi
->tcp_link_out_deferred
) ? EVENT_WRITE
: EVENT_READ
,
238 multi_tcp_wait (const struct context
*c
,
239 struct multi_tcp
*mtcp
)
242 socket_set_listen_persistent (c
->c2
.link_socket
, mtcp
->es
, MTCP_SOCKET
);
243 tun_set (c
->c1
.tuntap
, mtcp
->es
, EVENT_READ
, MTCP_TUN
, &mtcp
->tun_rwflags
);
244 #ifdef ENABLE_MANAGEMENT
246 management_socket_set (management
, mtcp
->es
, MTCP_MANAGEMENT
, &mtcp
->management_persist_flags
);
248 status
= event_wait (mtcp
->es
, &c
->c2
.timeval
, mtcp
->esr
, mtcp
->maxevents
);
252 mtcp
->n_esr
= status
;
256 static inline struct context
*
257 multi_tcp_context (struct multi_context
*m
, struct multi_instance
*mi
)
266 multi_tcp_process_outgoing_link_ready (struct multi_context
*m
, struct multi_instance
*mi
, const unsigned int mpp_flags
)
268 struct mbuf_item item
;
272 /* extract from queue */
273 if (mbuf_extract_item (mi
->tcp_link_out_deferred
, &item
, true)) /* ciphertext IP packet */
275 dmsg (D_MULTI_TCP
, "MULTI TCP: transmitting previously deferred packet");
277 ASSERT (mi
== item
.instance
);
278 mi
->context
.c2
.to_link
= item
.buffer
->buf
;
279 ret
= multi_process_outgoing_link_dowork (m
, mi
, mpp_flags
);
282 mbuf_free_buf (item
.buffer
);
288 multi_tcp_process_outgoing_link (struct multi_context
*m
, bool defer
, const unsigned int mpp_flags
)
290 struct multi_instance
*mi
= multi_process_outgoing_link_pre (m
);
295 if (defer
|| mbuf_defined (mi
->tcp_link_out_deferred
))
298 struct buffer
*buf
= &mi
->context
.c2
.to_link
;
301 struct mbuf_buffer
*mb
= mbuf_alloc_buf (buf
);
302 struct mbuf_item item
;
305 dmsg (D_MULTI_TCP
, "MULTI TCP: queuing deferred packet");
308 mbuf_add_item (mi
->tcp_link_out_deferred
, &item
);
311 ret
= multi_process_post (m
, mi
, mpp_flags
);
319 ret
= multi_process_outgoing_link_dowork (m
, mi
, mpp_flags
);
328 multi_tcp_wait_lite (struct multi_context
*m
, struct multi_instance
*mi
, const int action
, bool *tun_input_pending
)
330 struct context
*c
= multi_tcp_context (m
, mi
);
331 unsigned int looking_for
= 0;
333 dmsg (D_MULTI_DEBUG
, "MULTI TCP: multi_tcp_wait_lite a=%s mi=" ptr_format
,
337 tv_clear (&c
->c2
.timeval
); /* ZERO-TIMEOUT */
342 looking_for
= TUN_READ
;
343 tun_input_pending
= NULL
;
344 io_wait (c
, IOW_READ_TUN
);
347 looking_for
= SOCKET_READ
;
348 tun_input_pending
= NULL
;
349 io_wait (c
, IOW_READ_LINK
);
352 looking_for
= TUN_WRITE
;
353 tun_input_pending
= NULL
;
354 c
->c2
.timeval
.tv_sec
= 1; /* For some reason, the Linux 2.2 TUN/TAP driver hits this timeout */
355 perf_push (PERF_PROC_OUT_TUN_MTCP
);
356 io_wait (c
, IOW_TO_TUN
);
359 case TA_SOCKET_WRITE
:
360 looking_for
= SOCKET_WRITE
;
361 io_wait (c
, IOW_TO_LINK
|IOW_READ_TUN_FORCE
);
364 msg (M_FATAL
, "MULTI TCP: multi_tcp_wait_lite, unhandled action=%d", action
);
367 if (tun_input_pending
&& (c
->c2
.event_set_status
& TUN_READ
))
368 *tun_input_pending
= true;
370 if (c
->c2
.event_set_status
& looking_for
)
378 /* TCP socket output buffer is full */
379 case TA_SOCKET_WRITE
:
380 return TA_SOCKET_WRITE_DEFERRED
;
382 /* TUN device timed out on accepting write */
384 return TA_TUN_WRITE_TIMEOUT
;
391 static struct multi_instance
*
392 multi_tcp_dispatch (struct multi_context
*m
, struct multi_instance
*mi
, const int action
)
394 const unsigned int mpp_flags
= MPP_PRE_SELECT
|MPP_RECORD_TOUCH
;
395 struct multi_instance
*touched
= mi
;
396 m
->mpp_touched
= &touched
;
398 dmsg (D_MULTI_DEBUG
, "MULTI TCP: multi_tcp_dispatch a=%s mi=" ptr_format
,
405 read_incoming_tun (&m
->top
);
406 if (!IS_SIG (&m
->top
))
407 multi_process_incoming_tun (m
, mpp_flags
);
410 case TA_SOCKET_READ_RESIDUAL
:
412 ASSERT (mi
->context
.c2
.link_socket
);
414 read_incoming_link (&mi
->context
);
416 if (!IS_SIG (&mi
->context
))
418 multi_process_incoming_link (m
, mi
, mpp_flags
);
419 if (!IS_SIG (&mi
->context
))
420 stream_buf_read_setup (mi
->context
.c2
.link_socket
);
424 multi_process_timeout (m
, mpp_flags
);
427 multi_process_outgoing_tun (m
, mpp_flags
);
429 case TA_TUN_WRITE_TIMEOUT
:
430 multi_process_drop_outgoing_tun (m
, mpp_flags
);
432 case TA_SOCKET_WRITE_READY
:
434 multi_tcp_process_outgoing_link_ready (m
, mi
, mpp_flags
);
436 case TA_SOCKET_WRITE
:
437 multi_tcp_process_outgoing_link (m
, false, mpp_flags
);
439 case TA_SOCKET_WRITE_DEFERRED
:
440 multi_tcp_process_outgoing_link (m
, true, mpp_flags
);
444 multi_tcp_set_global_rw_flags (m
, mi
);
445 multi_process_post (m
, mi
, mpp_flags
);
448 msg (M_FATAL
, "MULTI TCP: multi_tcp_dispatch, unhandled action=%d", action
);
451 m
->mpp_touched
= NULL
;
456 multi_tcp_post (struct multi_context
*m
, struct multi_instance
*mi
, const int action
)
458 struct context
*c
= multi_tcp_context (m
, mi
);
459 int newaction
= TA_UNDEF
;
462 # define MTP_TUN_OUT (1<<0)
463 # define MTP_LINK_OUT (1<<1)
464 unsigned int flags
= MTP_NONE
;
467 flags
|= MTP_TUN_OUT
;
469 flags
|= MTP_LINK_OUT
;
473 case MTP_TUN_OUT
|MTP_LINK_OUT
:
475 newaction
= TA_TUN_WRITE
;
478 newaction
= TA_SOCKET_WRITE
;
481 if (mi
&& socket_read_residual (c
->c2
.link_socket
))
482 newaction
= TA_SOCKET_READ_RESIDUAL
;
484 multi_tcp_set_global_rw_flags (m
, mi
);
488 struct gc_arena gc
= gc_new ();
489 msg (M_FATAL
, "MULTI TCP: multi_tcp_post bad state, mi=%s flags=%d",
490 multi_instance_string (mi
, false, &gc
),
497 dmsg (D_MULTI_DEBUG
, "MULTI TCP: multi_tcp_post %s -> %s",
505 multi_tcp_action (struct multi_context
*m
, struct multi_instance
*mi
, int action
, bool poll
)
507 bool tun_input_pending
= false;
510 dmsg (D_MULTI_DEBUG
, "MULTI TCP: multi_tcp_action a=%s p=%d",
515 * If TA_SOCKET_READ_RESIDUAL, it means we still have pending
516 * input packets which were read by a prior TCP recv.
518 * Otherwise do a "lite" wait, which means we wait with 0 timeout
519 * on I/O events only related to the current instance, not
520 * the big list of events.
522 * On our first pass, poll will be false because we already know
523 * that input is available, and to call io_wait would be redundant.
525 if (poll
&& action
!= TA_SOCKET_READ_RESIDUAL
)
527 const int orig_action
= action
;
528 action
= multi_tcp_wait_lite (m
, mi
, action
, &tun_input_pending
);
529 if (action
== TA_UNDEF
)
530 msg (M_FATAL
, "MULTI TCP: I/O wait required blocking in multi_tcp_action, action=%d", orig_action
);
534 * Dispatch the action
537 struct multi_instance
*touched
= multi_tcp_dispatch (m
, mi
, action
);
540 * Signal received or TCP connection
543 if (touched
&& IS_SIG (&touched
->context
))
547 multi_close_instance_on_signal (m
, touched
);
552 * If dispatch produced any pending output
553 * for a particular instance, point to
560 * Based on the effects of the action,
561 * such as generating pending output,
562 * possibly transition to a new action state.
564 action
= multi_tcp_post (m
, mi
, action
);
567 * If we are finished processing the original action,
568 * check if we have any TUN input. If so, transition
569 * our action state to processing this input.
571 if (tun_input_pending
&& action
== TA_UNDEF
)
573 action
= TA_TUN_READ
;
575 tun_input_pending
= false;
581 } while (action
!= TA_UNDEF
);
585 multi_tcp_process_io (struct multi_context
*m
)
587 struct multi_tcp
*mtcp
= m
->mtcp
;
590 for (i
= 0; i
< mtcp
->n_esr
; ++i
)
592 struct event_set_return
*e
= &mtcp
->esr
[i
];
594 /* incoming data for instance? */
595 if (e
->arg
>= MTCP_N
)
597 struct multi_instance
*mi
= (struct multi_instance
*) e
->arg
;
600 if (e
->rwflags
& EVENT_WRITE
)
601 multi_tcp_action (m
, mi
, TA_SOCKET_WRITE_READY
, false);
602 else if (e
->rwflags
& EVENT_READ
)
603 multi_tcp_action (m
, mi
, TA_SOCKET_READ
, false);
608 #ifdef ENABLE_MANAGEMENT
609 if (e
->arg
== MTCP_MANAGEMENT
)
612 management_io (management
);
616 /* incoming data on TUN? */
617 if (e
->arg
== MTCP_TUN
)
619 if (e
->rwflags
& EVENT_WRITE
)
620 multi_tcp_action (m
, NULL
, TA_TUN_WRITE
, false);
621 else if (e
->rwflags
& EVENT_READ
)
622 multi_tcp_action (m
, NULL
, TA_TUN_READ
, false);
624 /* new incoming TCP client attempting to connect? */
625 else if (e
->arg
== MTCP_SOCKET
)
627 struct multi_instance
*mi
;
628 ASSERT (m
->top
.c2
.link_socket
);
629 socket_reset_listen_persistent (m
->top
.c2
.link_socket
);
630 mi
= multi_create_instance_tcp (m
);
632 multi_tcp_action (m
, mi
, TA_INITIAL
, false);
634 /* signal received? */
635 else if (e
->arg
== MTCP_SIG
)
637 get_signal (&m
->top
.sig
->signal_received
);
640 if (IS_SIG (&m
->top
))
646 * Process queued mbuf packets destined for TCP socket
649 struct multi_instance
*mi
;
650 while (!IS_SIG (&m
->top
) && (mi
= mbuf_peek (m
->mbuf
)) != NULL
)
652 multi_tcp_action (m
, mi
, TA_SOCKET_WRITE
, true);
658 * Top level event loop for single-threaded operation.
662 tunnel_server_tcp (struct context
*top
)
664 struct multi_context multi
;
668 context_clear_2 (top
);
670 /* initialize top-tunnel instance */
671 init_instance_handle_signals (top
, top
->es
, CC_HARD_USR1_TO_HUP
);
675 /* initialize global multi_context object */
676 multi_init (&multi
, top
, true, MC_SINGLE_THREADED
);
678 /* initialize our cloned top object */
679 multi_top_init (&multi
, top
, true);
681 /* initialize management interface */
682 init_management_callback_multi (&multi
);
684 /* finished with initialization */
685 initialization_sequence_completed (top
, ISC_SERVER
); /* --mode server --proto tcp-server */
687 /* per-packet event loop */
690 perf_push (PERF_EVENT_LOOP
);
692 /* wait on tun/socket list */
693 multi_get_timeout (&multi
, &multi
.top
.c2
.timeval
);
694 status
= multi_tcp_wait (&multi
.top
, multi
.mtcp
);
695 MULTI_CHECK_SIG (&multi
);
697 /* check on status of coarse timers */
698 multi_process_per_second_timers (&multi
);
703 /* process the I/O which triggered select */
704 multi_tcp_process_io (&multi
);
705 MULTI_CHECK_SIG (&multi
);
707 else if (status
== 0)
709 multi_tcp_action (&multi
, NULL
, TA_TIMEOUT
, false);
715 /* shut down management interface */
716 uninit_management_callback_multi (&multi
);
718 /* save ifconfig-pool */
719 multi_ifconfig_pool_persist (&multi
, true);
721 /* tear down tunnel instance (unless --persist-tun) */
722 multi_uninit (&multi
);
723 multi_top_free (&multi
);
724 close_instance (top
);