2 * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/types.h>
29 #include "event2/event-config.h"
33 /* Minimum required for InitializeCriticalSectionAndSpinCount */
34 #define _WIN32_WINNT 0x0403
41 #ifdef _EVENT_HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
44 #ifdef _EVENT_HAVE_FCNTL_H
47 #ifdef _EVENT_HAVE_UNISTD_H
51 #include "event2/listener.h"
52 #include "event2/util.h"
53 #include "event2/event.h"
54 #include "event2/event_struct.h"
55 #include "mm-internal.h"
56 #include "util-internal.h"
57 #include "log-internal.h"
58 #include "evthread-internal.h"
60 #include "iocp-internal.h"
61 #include "defer-internal.h"
62 #include "event-internal.h"
65 struct evconnlistener_ops
{
66 int (*enable
)(struct evconnlistener
*);
67 int (*disable
)(struct evconnlistener
*);
68 void (*destroy
)(struct evconnlistener
*);
69 void (*shutdown
)(struct evconnlistener
*);
70 evutil_socket_t (*getfd
)(struct evconnlistener
*);
71 struct event_base
*(*getbase
)(struct evconnlistener
*);
74 struct evconnlistener
{
75 const struct evconnlistener_ops
*ops
;
78 evconnlistener_errorcb errorcb
;
85 struct evconnlistener_event
{
86 struct evconnlistener base
;
87 struct event listener
;
91 struct evconnlistener_iocp
{
92 struct evconnlistener base
;
94 struct event_base
*event_base
;
95 struct event_iocp_port
*port
;
97 unsigned shutting_down
: 1;
98 unsigned event_added
: 1;
99 struct accepting_socket
**accepting
;
103 #define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0)
104 #define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0)
106 struct evconnlistener
*
107 evconnlistener_new_async(struct event_base
*base
,
108 evconnlistener_cb cb
, void *ptr
, unsigned flags
, int backlog
,
109 evutil_socket_t fd
); /* XXXX export this? */
111 static int event_listener_enable(struct evconnlistener
*);
112 static int event_listener_disable(struct evconnlistener
*);
113 static void event_listener_destroy(struct evconnlistener
*);
114 static evutil_socket_t
event_listener_getfd(struct evconnlistener
*);
115 static struct event_base
*event_listener_getbase(struct evconnlistener
*);
119 listener_incref_and_lock(struct evconnlistener
*listener
)
127 listener_decref_and_unlock(struct evconnlistener
*listener
)
129 int refcnt
= --listener
->refcnt
;
131 listener
->ops
->destroy(listener
);
133 EVTHREAD_FREE_LOCK(listener
->lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
142 static const struct evconnlistener_ops evconnlistener_event_ops
= {
143 event_listener_enable
,
144 event_listener_disable
,
145 event_listener_destroy
,
147 event_listener_getfd
,
148 event_listener_getbase
151 static void listener_read_cb(evutil_socket_t
, short, void *);
153 struct evconnlistener
*
154 evconnlistener_new(struct event_base
*base
,
155 evconnlistener_cb cb
, void *ptr
, unsigned flags
, int backlog
,
158 struct evconnlistener_event
*lev
;
161 if (base
&& event_base_get_iocp(base
)) {
162 const struct win32_extension_fns
*ext
=
163 event_get_win32_extension_fns();
164 if (ext
->AcceptEx
&& ext
->GetAcceptExSockaddrs
)
165 return evconnlistener_new_async(base
, cb
, ptr
, flags
,
171 if (listen(fd
, backlog
) < 0)
173 } else if (backlog
< 0) {
174 if (listen(fd
, 128) < 0)
178 lev
= mm_calloc(1, sizeof(struct evconnlistener_event
));
182 lev
->base
.ops
= &evconnlistener_event_ops
;
184 lev
->base
.user_data
= ptr
;
185 lev
->base
.flags
= flags
;
186 lev
->base
.refcnt
= 1;
188 if (flags
& LEV_OPT_THREADSAFE
) {
189 EVTHREAD_ALLOC_LOCK(lev
->base
.lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
192 event_assign(&lev
->listener
, base
, fd
, EV_READ
|EV_PERSIST
,
193 listener_read_cb
, lev
);
195 evconnlistener_enable(&lev
->base
);
200 struct evconnlistener
*
201 evconnlistener_new_bind(struct event_base
*base
, evconnlistener_cb cb
,
202 void *ptr
, unsigned flags
, int backlog
, const struct sockaddr
*sa
,
205 struct evconnlistener
*listener
;
208 int family
= sa
? sa
->sa_family
: AF_UNSPEC
;
213 fd
= socket(family
, SOCK_STREAM
, 0);
217 if (evutil_make_socket_nonblocking(fd
) < 0) {
218 evutil_closesocket(fd
);
222 if (flags
& LEV_OPT_CLOSE_ON_EXEC
) {
223 if (evutil_make_socket_closeonexec(fd
) < 0) {
224 evutil_closesocket(fd
);
229 setsockopt(fd
, SOL_SOCKET
, SO_KEEPALIVE
, (void*)&on
, sizeof(on
));
230 if (flags
& LEV_OPT_REUSEABLE
) {
231 evutil_make_listen_socket_reuseable(fd
);
235 if (bind(fd
, sa
, socklen
)<0) {
236 evutil_closesocket(fd
);
241 listener
= evconnlistener_new(base
, cb
, ptr
, flags
, backlog
, fd
);
243 evutil_closesocket(fd
);
251 evconnlistener_free(struct evconnlistener
*lev
)
256 if (lev
->ops
->shutdown
)
257 lev
->ops
->shutdown(lev
);
258 listener_decref_and_unlock(lev
);
262 event_listener_destroy(struct evconnlistener
*lev
)
264 struct evconnlistener_event
*lev_e
=
265 EVUTIL_UPCAST(lev
, struct evconnlistener_event
, base
);
267 event_del(&lev_e
->listener
);
268 if (lev
->flags
& LEV_OPT_CLOSE_ON_FREE
)
269 evutil_closesocket(event_get_fd(&lev_e
->listener
));
270 event_debug_unassign(&lev_e
->listener
);
274 evconnlistener_enable(struct evconnlistener
*lev
)
280 r
= lev
->ops
->enable(lev
);
288 evconnlistener_disable(struct evconnlistener
*lev
)
293 r
= lev
->ops
->disable(lev
);
299 event_listener_enable(struct evconnlistener
*lev
)
301 struct evconnlistener_event
*lev_e
=
302 EVUTIL_UPCAST(lev
, struct evconnlistener_event
, base
);
303 return event_add(&lev_e
->listener
, NULL
);
307 event_listener_disable(struct evconnlistener
*lev
)
309 struct evconnlistener_event
*lev_e
=
310 EVUTIL_UPCAST(lev
, struct evconnlistener_event
, base
);
311 return event_del(&lev_e
->listener
);
315 evconnlistener_get_fd(struct evconnlistener
*lev
)
319 fd
= lev
->ops
->getfd(lev
);
324 static evutil_socket_t
325 event_listener_getfd(struct evconnlistener
*lev
)
327 struct evconnlistener_event
*lev_e
=
328 EVUTIL_UPCAST(lev
, struct evconnlistener_event
, base
);
329 return event_get_fd(&lev_e
->listener
);
333 evconnlistener_get_base(struct evconnlistener
*lev
)
335 struct event_base
*base
;
337 base
= lev
->ops
->getbase(lev
);
342 static struct event_base
*
343 event_listener_getbase(struct evconnlistener
*lev
)
345 struct evconnlistener_event
*lev_e
=
346 EVUTIL_UPCAST(lev
, struct evconnlistener_event
, base
);
347 return event_get_base(&lev_e
->listener
);
351 evconnlistener_set_cb(struct evconnlistener
*lev
,
352 evconnlistener_cb cb
, void *arg
)
356 if (lev
->enabled
&& !lev
->cb
)
359 lev
->user_data
= arg
;
361 evconnlistener_enable(lev
);
366 evconnlistener_set_error_cb(struct evconnlistener
*lev
,
367 evconnlistener_errorcb errorcb
)
370 lev
->errorcb
= errorcb
;
375 listener_read_cb(evutil_socket_t fd
, short what
, void *p
)
377 struct evconnlistener
*lev
= p
;
379 evconnlistener_cb cb
;
380 evconnlistener_errorcb errorcb
;
384 struct sockaddr_storage ss
;
386 int socklen
= sizeof(ss
);
388 socklen_t socklen
= sizeof(ss
);
390 evutil_socket_t new_fd
= accept(fd
, (struct sockaddr
*)&ss
, &socklen
);
394 /* This can happen with some older linux kernels in
395 * response to nmap. */
396 evutil_closesocket(new_fd
);
400 if (!(lev
->flags
& LEV_OPT_LEAVE_SOCKETS_BLOCKING
))
401 evutil_make_socket_nonblocking(new_fd
);
403 if (lev
->cb
== NULL
) {
409 user_data
= lev
->user_data
;
411 cb(lev
, new_fd
, (struct sockaddr
*)&ss
, (int)socklen
,
414 if (lev
->refcnt
== 1) {
415 int freed
= listener_decref_and_unlock(lev
);
416 EVUTIL_ASSERT(freed
);
421 err
= evutil_socket_geterror(fd
);
422 if (EVUTIL_ERR_ACCEPT_RETRIABLE(err
)) {
426 if (lev
->errorcb
!= NULL
) {
428 errorcb
= lev
->errorcb
;
429 user_data
= lev
->user_data
;
431 errorcb(lev
, user_data
);
433 listener_decref_and_unlock(lev
);
435 event_sock_warn(fd
, "Error from accept() call");
440 struct accepting_socket
{
441 CRITICAL_SECTION lock
;
442 struct event_overlapped overlapped
;
445 struct deferred_cb deferred
;
446 struct evconnlistener_iocp
*lev
;
449 unsigned free_on_cb
:1;
453 static void accepted_socket_cb(struct event_overlapped
*o
, ev_uintptr_t key
,
454 ev_ssize_t n
, int ok
);
455 static void accepted_socket_invoke_user_cb(struct deferred_cb
*cb
, void *arg
);
458 iocp_listener_event_add(struct evconnlistener_iocp
*lev
)
460 if (lev
->event_added
)
463 lev
->event_added
= 1;
464 event_base_add_virtual(lev
->event_base
);
468 iocp_listener_event_del(struct evconnlistener_iocp
*lev
)
470 if (!lev
->event_added
)
473 lev
->event_added
= 0;
474 event_base_del_virtual(lev
->event_base
);
477 static struct accepting_socket
*
478 new_accepting_socket(struct evconnlistener_iocp
*lev
, int family
)
480 struct accepting_socket
*res
;
484 if (family
== AF_INET
)
485 addrlen
= sizeof(struct sockaddr_in
);
486 else if (family
== AF_INET6
)
487 addrlen
= sizeof(struct sockaddr_in6
);
490 buflen
= (addrlen
+16)*2;
492 res
= mm_calloc(1,sizeof(struct accepting_socket
)-1+buflen
);
496 event_overlapped_init(&res
->overlapped
, accepted_socket_cb
);
497 res
->s
= INVALID_SOCKET
;
499 res
->buflen
= buflen
;
500 res
->family
= family
;
502 event_deferred_cb_init(&res
->deferred
,
503 accepted_socket_invoke_user_cb
, res
);
505 InitializeCriticalSectionAndSpinCount(&res
->lock
, 1000);
511 free_and_unlock_accepting_socket(struct accepting_socket
*as
)
514 if (as
->s
!= INVALID_SOCKET
)
517 LeaveCriticalSection(&as
->lock
);
518 DeleteCriticalSection(&as
->lock
);
523 start_accepting(struct accepting_socket
*as
)
526 const struct win32_extension_fns
*ext
= event_get_win32_extension_fns();
528 SOCKET s
= socket(as
->family
, SOCK_STREAM
, 0);
531 if (!as
->lev
->base
.enabled
)
534 if (s
== INVALID_SOCKET
) {
535 error
= WSAGetLastError();
539 /* XXXX It turns out we need to do this again later. Does this call
540 * have any effect? */
541 setsockopt(s
, SOL_SOCKET
, SO_UPDATE_ACCEPT_CONTEXT
,
542 (char *)&as
->lev
->fd
, sizeof(&as
->lev
->fd
));
544 if (!(as
->lev
->base
.flags
& LEV_OPT_LEAVE_SOCKETS_BLOCKING
))
545 evutil_make_socket_nonblocking(s
);
547 if (event_iocp_port_associate(as
->lev
->port
, s
, 1) < 0) {
554 if (ext
->AcceptEx(as
->lev
->fd
, s
, as
->addrbuf
, 0,
555 as
->buflen
/2, as
->buflen
/2, &pending
, &as
->overlapped
.overlapped
))
557 /* Immediate success! */
558 accepted_socket_cb(&as
->overlapped
, 1, 0, 1);
560 error
= WSAGetLastError();
561 if (error
!= ERROR_IO_PENDING
) {
570 event_deferred_cb_schedule(
571 event_base_get_deferred_cb_queue(as
->lev
->event_base
),
577 stop_accepting(struct accepting_socket
*as
)
581 as
->s
= INVALID_SOCKET
;
586 accepted_socket_invoke_user_cb(struct deferred_cb
*dcb
, void *arg
)
588 struct accepting_socket
*as
= arg
;
590 struct sockaddr
*sa_local
=NULL
, *sa_remote
=NULL
;
591 int socklen_local
=0, socklen_remote
=0;
592 const struct win32_extension_fns
*ext
= event_get_win32_extension_fns();
593 struct evconnlistener
*lev
= &as
->lev
->base
;
594 evutil_socket_t sock
=-1;
596 evconnlistener_cb cb
=NULL
;
597 evconnlistener_errorcb errorcb
=NULL
;
600 EVUTIL_ASSERT(ext
->GetAcceptExSockaddrs
);
603 EnterCriticalSection(&as
->lock
);
604 if (as
->free_on_cb
) {
605 free_and_unlock_accepting_socket(as
);
606 listener_decref_and_unlock(lev
);
615 errorcb
= lev
->errorcb
;
617 ext
->GetAcceptExSockaddrs(
618 as
->addrbuf
, 0, as
->buflen
/2, as
->buflen
/2,
619 &sa_local
, &socklen_local
, &sa_remote
,
623 as
->s
= INVALID_SOCKET
;
625 /* We need to call this so getsockname, getpeername, and
626 * shutdown work correctly on the accepted socket. */
627 /* XXXX handle error? */
628 setsockopt(sock
, SOL_SOCKET
, SO_UPDATE_ACCEPT_CONTEXT
,
629 (char *)&as
->lev
->fd
, sizeof(&as
->lev
->fd
));
631 data
= lev
->user_data
;
633 LeaveCriticalSection(&as
->lock
);
637 WSASetLastError(error
);
640 cb(lev
, sock
, sa_remote
, socklen_remote
, data
);
644 if (listener_decref_and_unlock(lev
))
647 EnterCriticalSection(&as
->lock
);
649 LeaveCriticalSection(&as
->lock
);
653 accepted_socket_cb(struct event_overlapped
*o
, ev_uintptr_t key
, ev_ssize_t n
, int ok
)
655 struct accepting_socket
*as
=
656 EVUTIL_UPCAST(o
, struct accepting_socket
, overlapped
);
658 LOCK(&as
->lev
->base
);
659 EnterCriticalSection(&as
->lock
);
661 /* XXXX Don't do this if some EV_MT flag is set. */
662 event_deferred_cb_schedule(
663 event_base_get_deferred_cb_queue(as
->lev
->event_base
),
665 LeaveCriticalSection(&as
->lock
);
666 } else if (as
->free_on_cb
) {
667 struct evconnlistener
*lev
= &as
->lev
->base
;
668 free_and_unlock_accepting_socket(as
);
669 listener_decref_and_unlock(lev
);
671 } else if (as
->s
== INVALID_SOCKET
) {
672 /* This is okay; we were disabled by iocp_listener_disable. */
673 LeaveCriticalSection(&as
->lock
);
675 /* Some error on accept that we couldn't actually handle. */
677 DWORD transfer
= 0, flags
=0;
678 event_sock_warn(as
->s
, "Unexpected error on AcceptEx");
679 ok
= WSAGetOverlappedResult(as
->s
, &o
->overlapped
,
680 &transfer
, FALSE
, &flags
);
682 /* well, that was confusing! */
685 as
->error
= WSAGetLastError();
687 event_deferred_cb_schedule(
688 event_base_get_deferred_cb_queue(as
->lev
->event_base
),
690 LeaveCriticalSection(&as
->lock
);
692 UNLOCK(&as
->lev
->base
);
696 iocp_listener_enable(struct evconnlistener
*lev
)
699 struct evconnlistener_iocp
*lev_iocp
=
700 EVUTIL_UPCAST(lev
, struct evconnlistener_iocp
, base
);
703 iocp_listener_event_add(lev_iocp
);
704 for (i
= 0; i
< lev_iocp
->n_accepting
; ++i
) {
705 struct accepting_socket
*as
= lev_iocp
->accepting
[i
];
708 EnterCriticalSection(&as
->lock
);
709 if (!as
->free_on_cb
&& as
->s
== INVALID_SOCKET
)
711 LeaveCriticalSection(&as
->lock
);
718 iocp_listener_disable_impl(struct evconnlistener
*lev
, int shutdown
)
721 struct evconnlistener_iocp
*lev_iocp
=
722 EVUTIL_UPCAST(lev
, struct evconnlistener_iocp
, base
);
725 iocp_listener_event_del(lev_iocp
);
726 for (i
= 0; i
< lev_iocp
->n_accepting
; ++i
) {
727 struct accepting_socket
*as
= lev_iocp
->accepting
[i
];
730 EnterCriticalSection(&as
->lock
);
731 if (!as
->free_on_cb
&& as
->s
!= INVALID_SOCKET
) {
736 LeaveCriticalSection(&as
->lock
);
743 iocp_listener_disable(struct evconnlistener
*lev
)
745 return iocp_listener_disable_impl(lev
,0);
749 iocp_listener_destroy(struct evconnlistener
*lev
)
751 struct evconnlistener_iocp
*lev_iocp
=
752 EVUTIL_UPCAST(lev
, struct evconnlistener_iocp
, base
);
754 if (! lev_iocp
->shutting_down
) {
755 lev_iocp
->shutting_down
= 1;
756 iocp_listener_disable_impl(lev
,1);
761 static evutil_socket_t
762 iocp_listener_getfd(struct evconnlistener
*lev
)
764 struct evconnlistener_iocp
*lev_iocp
=
765 EVUTIL_UPCAST(lev
, struct evconnlistener_iocp
, base
);
768 static struct event_base
*
769 iocp_listener_getbase(struct evconnlistener
*lev
)
771 struct evconnlistener_iocp
*lev_iocp
=
772 EVUTIL_UPCAST(lev
, struct evconnlistener_iocp
, base
);
773 return lev_iocp
->event_base
;
776 static const struct evconnlistener_ops evconnlistener_iocp_ops
= {
777 iocp_listener_enable
,
778 iocp_listener_disable
,
779 iocp_listener_destroy
,
780 iocp_listener_destroy
, /* shutdown */
782 iocp_listener_getbase
785 /* XXX define some way to override this. */
786 #define N_SOCKETS_PER_LISTENER 4
788 struct evconnlistener
*
789 evconnlistener_new_async(struct event_base
*base
,
790 evconnlistener_cb cb
, void *ptr
, unsigned flags
, int backlog
,
793 struct sockaddr_storage ss
;
794 int socklen
= sizeof(ss
);
795 struct evconnlistener_iocp
*lev
;
798 flags
|= LEV_OPT_THREADSAFE
;
800 if (!base
|| !event_base_get_iocp(base
))
803 /* XXXX duplicate code */
805 if (listen(fd
, backlog
) < 0)
807 } else if (backlog
< 0) {
808 if (listen(fd
, 128) < 0)
811 if (getsockname(fd
, (struct sockaddr
*)&ss
, &socklen
)) {
812 event_sock_warn(fd
, "getsockname");
815 lev
= mm_calloc(1, sizeof(struct evconnlistener_iocp
));
817 event_warn("calloc");
820 lev
->base
.ops
= &evconnlistener_iocp_ops
;
822 lev
->base
.user_data
= ptr
;
823 lev
->base
.flags
= flags
;
824 lev
->base
.refcnt
= 1;
825 lev
->base
.enabled
= 1;
827 lev
->port
= event_base_get_iocp(base
);
829 lev
->event_base
= base
;
832 if (event_iocp_port_associate(lev
->port
, fd
, 1) < 0)
835 EVTHREAD_ALLOC_LOCK(lev
->base
.lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
837 lev
->n_accepting
= N_SOCKETS_PER_LISTENER
;
838 lev
->accepting
= mm_calloc(lev
->n_accepting
,
839 sizeof(struct accepting_socket
*));
840 if (!lev
->accepting
) {
841 event_warn("calloc");
842 goto err_delete_lock
;
844 for (i
= 0; i
< lev
->n_accepting
; ++i
) {
845 lev
->accepting
[i
] = new_accepting_socket(lev
, ss
.ss_family
);
846 if (!lev
->accepting
[i
]) {
847 event_warnx("Couldn't create accepting socket");
848 goto err_free_accepting
;
850 if (cb
&& start_accepting(lev
->accepting
[i
]) < 0) {
851 event_warnx("Couldn't start accepting on socket");
852 EnterCriticalSection(&lev
->accepting
[i
]->lock
);
853 free_and_unlock_accepting_socket(lev
->accepting
[i
]);
854 goto err_free_accepting
;
859 iocp_listener_event_add(lev
);
864 mm_free(lev
->accepting
);
865 /* XXXX free the other elements. */
867 EVTHREAD_FREE_LOCK(lev
->base
.lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
871 /* Don't close the fd, it is caller's responsibility. */