1 /* SPDX-License-Identifier: BSD-2-Clause */
3 * eloop - portable event based main loop.
4 * Copyright (c) 2006-2023 Roy Marples <roy@marples.name>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Basically for a small number of fd's (total, not max fd)
31 * of say a few hundred, ppoll(2) performs just fine, if not faster than others.
32 * It also has the smallest memory and binary size footprint.
33 * ppoll(2) is available on all modern OS my software runs on and should be
34 * an up and coming POSIX standard interface.
35 * If ppoll is not available, then pselect(2) can be used instead which has
36 * even smaller memory and binary size footprint.
37 * However, this difference is quite tiny and the ppoll API is superior.
38 * pselect cannot return error conditions such as EOF for example.
40 * Both epoll(7) and kqueue(2) require an extra fd per process to manage
41 * their respective list of interest AND syscalls to manage it.
42 * So for a small number of fd's, these are more resource intensive,
43 * especially when used with more than one process.
45 * epoll avoids the resource limit RLIMIT_NOFILE Linux poll stupidly applies.
46 * kqueue avoids the same limit on OpenBSD.
47 * ppoll can still be secured in both by using SEECOMP or pledge.
49 * kqueue can avoid the signal trick we use here so that we function calls
50 * other than those listed in sigaction(2) in our signal handlers which is
51 * probably more robust than ours at surviving a signal storm.
52 * signalfd(2) is available for Linux which probably works in a similar way
53 * but it's yet another fd to use.
55 * Taking this all into account, ppoll(2) is the default mechanism used here.
58 #if (defined(__unix__) || defined(unix)) && !defined(USG)
59 #include <sys/param.h>
75 /* config.h should define HAVE_PPOLL, etc. */
76 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
80 /* Prioritise which mechanism we want to use.*/
81 #if defined(HAVE_PPOLL)
85 #elif defined(HAVE_POLLTS)
91 #elif defined(HAVE_KQUEUE)
94 #elif defined(HAVE_EPOLL)
97 #elif !defined(HAVE_PSELECT)
101 #if defined(HAVE_KQUEUE)
102 #include <sys/event.h>
103 #if defined(__DragonFly__) || defined(__FreeBSD__)
104 #define _kevent(kq, cl, ncl, el, nel, t) \
105 kevent((kq), (cl), (int)(ncl), (el), (int)(nel), (t))
107 #define _kevent kevent
110 #elif defined(HAVE_EPOLL)
111 #include <sys/epoll.h>
113 #elif defined(HAVE_PPOLL)
116 #elif defined(HAVE_PSELECT)
117 #include <sys/select.h>
123 #define UNUSED(a) (void)((a))
127 #define __unused __attribute__((__unused__))
133 /* Our structures require TAILQ macros, which really every libc should
134 * ship as they are useful beyond belief.
135 * Sadly some libc's don't have sys/queue.h and some that do don't have
136 * the TAILQ_FOREACH macro. For those that don't, the application using
137 * this implementation will need to ship a working queue.h somewhere.
138 * If we don't have sys/queue.h found in config.h, then
139 * allow QUEUE_H to override loading queue.h in the current directory. */
140 #ifndef TAILQ_FOREACH
141 #ifdef HAVE_SYS_QUEUE_H
142 #include <sys/queue.h>
143 #elif defined(QUEUE_H)
144 #define __QUEUE_HEADER(x) #x
145 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
146 #include _QUEUE_HEADER(QUEUE_H)
157 # define __arraycount(__x) (sizeof(__x) / sizeof(__x[0]))
161 * Allow a backlog of signals.
162 * If you use many eloops in the same process, they should all
163 * use the same signal handler or have the signal handler unset.
164 * Otherwise the signal might not behave as expected.
166 #define ELOOP_NSIGNALS 5
169 * time_t is a signed integer of an unspecified size.
170 * To adjust for time_t wrapping, we need to work the maximum signed
171 * value and use that as a maximum.
174 #define TIME_MAX ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
176 /* The unsigned maximum is then simple - multiply by two and add one. */
178 #define UTIME_MAX (TIME_MAX * 2) + 1
182 TAILQ_ENTRY(eloop_event
) next
;
184 void (*cb
)(void *, unsigned short);
186 unsigned short events
;
188 struct pollfd
*pollfd
;
192 struct eloop_timeout
{
193 TAILQ_ENTRY(eloop_timeout
) next
;
194 unsigned int seconds
;
195 unsigned int nseconds
;
196 void (*callback
)(void *);
202 TAILQ_HEAD (event_head
, eloop_event
) events
;
204 struct event_head free_events
;
207 TAILQ_HEAD (timeout_head
, eloop_timeout
) timeouts
;
208 struct timeout_head free_timeouts
;
212 void (*signal_cb
)(int, void *);
215 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
218 #if defined(HAVE_KQUEUE)
220 #elif defined(HAVE_EPOLL)
221 struct epoll_event
*fds
;
222 #elif defined(HAVE_PPOLL)
225 #if !defined(HAVE_PSELECT)
231 bool events_need_setup
;
235 #ifdef HAVE_REALLOCARRAY
236 #define eloop_realloca reallocarray
238 /* Handy routing to check for potential overflow.
239 * reallocarray(3) and reallocarr(3) are not portable. */
240 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
242 eloop_realloca(void *ptr
, size_t n
, size_t size
)
245 if ((n
| size
) >= SQRT_SIZE_MAX
&& n
> SIZE_MAX
/ size
) {
249 return realloc(ptr
, n
* size
);
255 eloop_event_setup_fds(struct eloop
*eloop
)
257 struct eloop_event
*e
, *ne
;
258 #if defined(HAVE_KQUEUE)
260 size_t nfds
= eloop
->nsignals
;
261 #elif defined(HAVE_EPOLL)
262 struct epoll_event
*pfd
;
264 #elif defined(HAVE_PPOLL)
270 nfds
+= eloop
->nevents
* NFD
;
271 if (eloop
->nfds
< nfds
) {
272 pfd
= eloop_realloca(eloop
->fds
, nfds
, sizeof(*pfd
));
283 TAILQ_FOREACH_SAFE(e
, &eloop
->events
, next
, ne
) {
285 TAILQ_REMOVE(&eloop
->events
, e
, next
);
286 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
293 if (e
->events
& ELE_READ
)
294 pfd
->events
|= POLLIN
;
295 if (e
->events
& ELE_WRITE
)
296 pfd
->events
|= POLLOUT
;
302 eloop
->events_need_setup
= false;
307 eloop_event_count(const struct eloop
*eloop
)
310 return eloop
->nevents
;
314 eloop_event_add(struct eloop
*eloop
, int fd
, unsigned short events
,
315 void (*cb
)(void *, unsigned short), void *cb_arg
)
317 struct eloop_event
*e
;
319 #if defined(HAVE_KQUEUE)
320 struct kevent ke
[2], *kep
= &ke
[0];
322 #elif defined(HAVE_EPOLL)
323 struct epoll_event epe
;
327 assert(eloop
!= NULL
);
328 assert(cb
!= NULL
&& cb_arg
!= NULL
);
329 if (fd
== -1 || !(events
& (ELE_READ
| ELE_WRITE
| ELE_HANGUP
))) {
334 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
341 e
= TAILQ_FIRST(&eloop
->free_events
);
343 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
345 e
= malloc(sizeof(*e
));
350 TAILQ_INSERT_HEAD(&eloop
->events
, e
, next
);
360 #if defined(HAVE_KQUEUE)
362 if (events
& ELE_READ
&& !(e
->events
& ELE_READ
))
363 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_READ
, EV_ADD
, 0, 0, e
);
364 else if (!(events
& ELE_READ
) && e
->events
& ELE_READ
)
365 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_READ
, EV_DELETE
, 0, 0, e
);
368 if (events
& ELE_WRITE
&& !(e
->events
& ELE_WRITE
))
369 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_WRITE
, EV_ADD
, 0, 0, e
);
370 else if (!(events
& ELE_WRITE
) && e
->events
& ELE_WRITE
)
371 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_WRITE
, EV_DELETE
, 0, 0, e
);
374 #ifdef EVFILT_PROCDESC
375 if (events
& ELE_HANGUP
)
376 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_PROCDESC
, EV_ADD
,
381 if (n
!= 0 && _kevent(eloop
->fd
, ke
, n
, NULL
, 0, NULL
) == -1) {
383 TAILQ_REMOVE(&eloop
->events
, e
, next
);
384 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
388 #elif defined(HAVE_EPOLL)
389 memset(&epe
, 0, sizeof(epe
));
391 if (events
& ELE_READ
)
392 epe
.events
|= EPOLLIN
;
393 if (events
& ELE_WRITE
)
394 epe
.events
|= EPOLLOUT
;
395 op
= added
? EPOLL_CTL_ADD
: EPOLL_CTL_MOD
;
396 if (epe
.events
!= 0 && epoll_ctl(eloop
->fd
, op
, fd
, &epe
) == -1) {
398 TAILQ_REMOVE(&eloop
->events
, e
, next
);
399 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
403 #elif defined(HAVE_PPOLL)
410 eloop
->events_need_setup
= true;
415 eloop_event_delete(struct eloop
*eloop
, int fd
)
417 struct eloop_event
*e
;
418 #if defined(HAVE_KQUEUE)
419 struct kevent ke
[2], *kep
= &ke
[0];
423 assert(eloop
!= NULL
);
429 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
438 #if defined(HAVE_KQUEUE)
440 if (e
->events
& ELE_READ
) {
441 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_READ
, EV_DELETE
, 0, 0, e
);
444 if (e
->events
& ELE_WRITE
) {
445 EV_SET(kep
++, (uintptr_t)fd
, EVFILT_WRITE
, EV_DELETE
, 0, 0, e
);
448 if (n
!= 0 && _kevent(eloop
->fd
, ke
, n
, NULL
, 0, NULL
) == -1)
450 #elif defined(HAVE_EPOLL)
451 if (epoll_ctl(eloop
->fd
, EPOLL_CTL_DEL
, fd
, NULL
) == -1)
456 eloop
->events_need_setup
= true;
461 eloop_timespec_diff(const struct timespec
*tsp
, const struct timespec
*usp
,
464 unsigned long long tsecs
, usecs
, secs
;
467 if (tsp
->tv_sec
< 0) /* time wreapped */
468 tsecs
= UTIME_MAX
- (unsigned long long)(-tsp
->tv_sec
);
470 tsecs
= (unsigned long long)tsp
->tv_sec
;
471 if (usp
->tv_sec
< 0) /* time wrapped */
472 usecs
= UTIME_MAX
- (unsigned long long)(-usp
->tv_sec
);
474 usecs
= (unsigned long long)usp
->tv_sec
;
476 if (usecs
> tsecs
) /* time wrapped */
477 secs
= (UTIME_MAX
- usecs
) + tsecs
;
479 secs
= tsecs
- usecs
;
481 nsecs
= tsp
->tv_nsec
- usp
->tv_nsec
;
487 nsecs
+= NSEC_PER_SEC
;
491 *nsp
= (unsigned int)nsecs
;
496 eloop_reduce_timers(struct eloop
*eloop
)
499 unsigned long long secs
;
501 struct eloop_timeout
*t
;
503 clock_gettime(CLOCK_MONOTONIC
, &now
);
504 secs
= eloop_timespec_diff(&now
, &eloop
->now
, &nsecs
);
506 TAILQ_FOREACH(t
, &eloop
->timeouts
, next
) {
507 if (secs
> t
->seconds
) {
511 t
->seconds
-= (unsigned int)secs
;
512 if (nsecs
> t
->nseconds
) {
517 t
->nseconds
= NSEC_PER_SEC
518 - (nsecs
- t
->nseconds
);
521 t
->nseconds
-= nsecs
;
529 * This implementation should cope with UINT_MAX seconds on a system
530 * where time_t is INT32_MAX. It should also cope with the monotonic timer
531 * wrapping, although this is highly unlikely.
532 * unsigned int should match or be greater than any on wire specified timeout.
535 eloop_q_timeout_add(struct eloop
*eloop
, int queue
,
536 unsigned int seconds
, unsigned int nseconds
,
537 void (*callback
)(void *), void *arg
)
539 struct eloop_timeout
*t
, *tt
= NULL
;
541 assert(eloop
!= NULL
);
542 assert(callback
!= NULL
);
543 assert(nseconds
<= NSEC_PER_SEC
);
545 /* Remove existing timeout if present. */
546 TAILQ_FOREACH(t
, &eloop
->timeouts
, next
) {
547 if (t
->callback
== callback
&& t
->arg
== arg
) {
548 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
554 /* No existing, so allocate or grab one from the free pool. */
555 if ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
556 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
558 if ((t
= malloc(sizeof(*t
))) == NULL
)
563 eloop_reduce_timers(eloop
);
565 t
->seconds
= seconds
;
566 t
->nseconds
= nseconds
;
567 t
->callback
= callback
;
571 /* The timeout list should be in chronological order,
573 TAILQ_FOREACH(tt
, &eloop
->timeouts
, next
) {
574 if (t
->seconds
< tt
->seconds
||
575 (t
->seconds
== tt
->seconds
&& t
->nseconds
< tt
->nseconds
))
577 TAILQ_INSERT_BEFORE(tt
, t
, next
);
581 TAILQ_INSERT_TAIL(&eloop
->timeouts
, t
, next
);
586 eloop_q_timeout_add_tv(struct eloop
*eloop
, int queue
,
587 const struct timespec
*when
, void (*callback
)(void *), void *arg
)
590 if (when
->tv_sec
< 0 || (unsigned long)when
->tv_sec
> UINT_MAX
) {
594 if (when
->tv_nsec
< 0 || when
->tv_nsec
> NSEC_PER_SEC
) {
599 return eloop_q_timeout_add(eloop
, queue
,
600 (unsigned int)when
->tv_sec
, (unsigned int)when
->tv_sec
,
605 eloop_q_timeout_add_sec(struct eloop
*eloop
, int queue
, unsigned int seconds
,
606 void (*callback
)(void *), void *arg
)
609 return eloop_q_timeout_add(eloop
, queue
, seconds
, 0, callback
, arg
);
613 eloop_q_timeout_add_msec(struct eloop
*eloop
, int queue
, unsigned long when
,
614 void (*callback
)(void *), void *arg
)
616 unsigned long seconds
, nseconds
;
618 seconds
= when
/ MSEC_PER_SEC
;
619 if (seconds
> UINT_MAX
) {
624 nseconds
= (when
% MSEC_PER_SEC
) * NSEC_PER_MSEC
;
625 return eloop_q_timeout_add(eloop
, queue
,
626 (unsigned int)seconds
, (unsigned int)nseconds
, callback
, arg
);
630 eloop_q_timeout_delete(struct eloop
*eloop
, int queue
,
631 void (*callback
)(void *), void *arg
)
633 struct eloop_timeout
*t
, *tt
;
636 assert(eloop
!= NULL
);
639 TAILQ_FOREACH_SAFE(t
, &eloop
->timeouts
, next
, tt
) {
640 if ((queue
== 0 || t
->queue
== queue
) &&
642 (!callback
|| t
->callback
== callback
))
644 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
645 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
653 eloop_exit(struct eloop
*eloop
, int code
)
656 assert(eloop
!= NULL
);
658 eloop
->exitcode
= code
;
659 eloop
->exitnow
= true;
663 eloop_enter(struct eloop
*eloop
)
666 assert(eloop
!= NULL
);
668 eloop
->exitnow
= false;
671 /* Must be called after fork(2) */
673 eloop_forked(struct eloop
*eloop
)
675 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
676 struct eloop_event
*e
;
677 #if defined(HAVE_KQUEUE)
678 struct kevent
*pfds
, *pfd
;
680 #elif defined(HAVE_EPOLL)
681 struct epoll_event epe
= { .events
= 0 };
684 assert(eloop
!= NULL
);
685 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
688 if (eloop_open(eloop
) == -1)
693 pfds
= malloc((eloop
->nsignals
+ (eloop
->nevents
* NFD
)) * sizeof(*pfds
));
696 if (eloop
->signal_cb
!= NULL
) {
697 for (i
= 0; i
< eloop
->nsignals
; i
++) {
698 EV_SET(pfd
++, (uintptr_t)eloop
->signals
[i
],
699 EVFILT_SIGNAL
, EV_ADD
, 0, 0, NULL
);
705 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
708 #if defined(HAVE_KQUEUE)
709 if (e
->events
& ELE_READ
) {
710 EV_SET(pfd
++, (uintptr_t)e
->fd
,
711 EVFILT_READ
, EV_ADD
, 0, 0, e
);
714 if (e
->events
& ELE_WRITE
) {
715 EV_SET(pfd
++, (uintptr_t)e
->fd
,
716 EVFILT_WRITE
, EV_ADD
, 0, 0, e
);
719 #elif defined(HAVE_EPOLL)
720 memset(&epe
, 0, sizeof(epe
));
722 if (e
->events
& ELE_READ
)
723 epe
.events
|= EPOLLIN
;
724 if (e
->events
& ELE_WRITE
)
725 epe
.events
|= EPOLLOUT
;
726 if (epoll_ctl(eloop
->fd
, EPOLL_CTL_ADD
, e
->fd
, &epe
) == -1)
731 #if defined(HAVE_KQUEUE)
734 return _kevent(eloop
->fd
, pfds
, i
, NULL
, 0, NULL
);
745 eloop_open(struct eloop
*eloop
)
747 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
750 assert(eloop
!= NULL
);
751 #if defined(HAVE_KQUEUE1)
752 fd
= kqueue1(O_CLOEXEC
);
753 #elif defined(HAVE_KQUEUE)
757 flags
= fcntl(fd
, F_GETFD
, 0);
758 if (!(flags
!= -1 && !(flags
& FD_CLOEXEC
) &&
759 fcntl(fd
, F_SETFD
, flags
| FD_CLOEXEC
) == 0))
764 #elif defined(HAVE_EPOLL)
765 fd
= epoll_create1(EPOLL_CLOEXEC
);
777 eloop_signal_set_cb(struct eloop
*eloop
,
778 const int *signals
, size_t nsignals
,
779 void (*signal_cb
)(int, void *), void *signal_cb_ctx
)
783 struct kevent
*ke
, *kes
;
787 assert(eloop
!= NULL
);
790 ke
= kes
= malloc(MAX(eloop
->nsignals
, nsignals
) * sizeof(*kes
));
793 for (i
= 0; i
< eloop
->nsignals
; i
++) {
794 EV_SET(ke
++, (uintptr_t)eloop
->signals
[i
],
795 EVFILT_SIGNAL
, EV_DELETE
, 0, 0, NULL
);
797 if (i
!= 0 && _kevent(eloop
->fd
, kes
, i
, NULL
, 0, NULL
) == -1) {
803 eloop
->signals
= signals
;
804 eloop
->nsignals
= nsignals
;
805 eloop
->signal_cb
= signal_cb
;
806 eloop
->signal_cb_ctx
= signal_cb_ctx
;
809 if (signal_cb
== NULL
)
812 for (i
= 0; i
< eloop
->nsignals
; i
++) {
813 EV_SET(ke
++, (uintptr_t)eloop
->signals
[i
],
814 EVFILT_SIGNAL
, EV_ADD
, 0, 0, NULL
);
816 if (i
!= 0 && _kevent(eloop
->fd
, kes
, i
, NULL
, 0, NULL
) == -1)
826 static volatile int _eloop_sig
[ELOOP_NSIGNALS
];
827 static volatile size_t _eloop_nsig
;
830 eloop_signal3(int sig
, __unused siginfo_t
*siginfo
, __unused
void *arg
)
833 if (_eloop_nsig
== __arraycount(_eloop_sig
)) {
835 fprintf(stderr
, "%s: signal storm, discarding signal %d\n",
841 _eloop_sig
[_eloop_nsig
++] = sig
;
846 eloop_signal_mask(struct eloop
*eloop
, sigset_t
*oldset
)
851 struct sigaction sa
= {
852 .sa_sigaction
= eloop_signal3
,
853 .sa_flags
= SA_SIGINFO
,
857 assert(eloop
!= NULL
);
859 sigemptyset(&newset
);
860 for (i
= 0; i
< eloop
->nsignals
; i
++)
861 sigaddset(&newset
, eloop
->signals
[i
]);
862 if (sigprocmask(SIG_SETMASK
, &newset
, oldset
) == -1)
866 sigemptyset(&sa
.sa_mask
);
868 for (i
= 0; i
< eloop
->nsignals
; i
++) {
869 if (sigaction(eloop
->signals
[i
], &sa
, NULL
) == -1)
882 eloop
= calloc(1, sizeof(*eloop
));
886 /* Check we have a working monotonic clock. */
887 if (clock_gettime(CLOCK_MONOTONIC
, &eloop
->now
) == -1) {
892 TAILQ_INIT(&eloop
->events
);
893 TAILQ_INIT(&eloop
->free_events
);
894 TAILQ_INIT(&eloop
->timeouts
);
895 TAILQ_INIT(&eloop
->free_timeouts
);
896 eloop
->exitcode
= EXIT_FAILURE
;
898 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
899 if (eloop_open(eloop
) == -1) {
909 eloop_clear(struct eloop
*eloop
, ...)
913 struct eloop_event
*e
, *ne
;
914 struct eloop_timeout
*t
;
919 va_start(va1
, eloop
);
920 TAILQ_FOREACH_SAFE(e
, &eloop
->events
, next
, ne
) {
923 except_fd
= va_arg(va2
, int);
924 while (except_fd
!= -1 && except_fd
!= e
->fd
);
926 if (e
->fd
== except_fd
&& e
->fd
!= -1)
928 TAILQ_REMOVE(&eloop
->events
, e
, next
);
937 #if !defined(HAVE_PSELECT)
938 /* Free the pollfd buffer and ensure it's re-created before
939 * the next run. This allows us to shrink it incase we use a lot less
940 * signals and fds to respond to after forking. */
944 eloop
->events_need_setup
= true;
947 while ((e
= TAILQ_FIRST(&eloop
->free_events
))) {
948 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
951 while ((t
= TAILQ_FIRST(&eloop
->timeouts
))) {
952 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
955 while ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
956 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
959 eloop
->cleared
= true;
963 eloop_free(struct eloop
*eloop
)
966 eloop_clear(eloop
, -1);
967 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
968 if (eloop
!= NULL
&& eloop
->fd
!= -1)
974 #if defined(HAVE_KQUEUE)
976 eloop_run_kqueue(struct eloop
*eloop
, const struct timespec
*ts
)
980 struct eloop_event
*e
;
981 unsigned short events
;
983 n
= _kevent(eloop
->fd
, NULL
, 0, eloop
->fds
, eloop
->nevents
, ts
);
987 for (nn
= n
, ke
= eloop
->fds
; nn
!= 0; nn
--, ke
++) {
988 if (eloop
->cleared
|| eloop
->exitnow
)
990 e
= (struct eloop_event
*)ke
->udata
;
991 if (ke
->filter
== EVFILT_SIGNAL
) {
992 eloop
->signal_cb((int)ke
->ident
,
993 eloop
->signal_cb_ctx
);
996 if (ke
->filter
== EVFILT_READ
)
998 else if (ke
->filter
== EVFILT_WRITE
)
1000 #ifdef EVFILT_PROCDESC
1001 else if (ke
->filter
== EVFILT_PROCDESC
&&
1002 ke
->fflags
& NOTE_EXIT
)
1003 /* exit status is in ke->data.
1004 * As we default to using ppoll anyway
1005 * we don't have to do anything with it right now. */
1006 events
= ELE_HANGUP
;
1009 continue; /* assert? */
1010 if (ke
->flags
& EV_EOF
)
1011 events
|= ELE_HANGUP
;
1012 if (ke
->flags
& EV_ERROR
)
1013 events
|= ELE_ERROR
;
1014 e
->cb(e
->cb_arg
, events
);
1019 #elif defined(HAVE_EPOLL)
1022 eloop_run_epoll(struct eloop
*eloop
,
1023 const struct timespec
*ts
, const sigset_t
*signals
)
1026 struct epoll_event
*epe
;
1027 struct eloop_event
*e
;
1028 unsigned short events
;
1031 if (ts
->tv_sec
> INT_MAX
/ 1000 ||
1032 (ts
->tv_sec
== INT_MAX
/ 1000 &&
1033 ((ts
->tv_nsec
+ 999999) / 1000000 > INT_MAX
% 1000000)))
1036 timeout
= (int)(ts
->tv_sec
* 1000 +
1037 (ts
->tv_nsec
+ 999999) / 1000000);
1041 if (signals
!= NULL
)
1042 n
= epoll_pwait(eloop
->fd
, eloop
->fds
,
1043 (int)eloop
->nevents
, timeout
, signals
);
1045 n
= epoll_wait(eloop
->fd
, eloop
->fds
,
1046 (int)eloop
->nevents
, timeout
);
1050 for (nn
= n
, epe
= eloop
->fds
; nn
!= 0; nn
--, epe
++) {
1051 if (eloop
->cleared
|| eloop
->exitnow
)
1053 e
= (struct eloop_event
*)epe
->data
.ptr
;
1057 if (epe
->events
& EPOLLIN
)
1059 if (epe
->events
& EPOLLOUT
)
1060 events
|= ELE_WRITE
;
1061 if (epe
->events
& EPOLLHUP
)
1062 events
|= ELE_HANGUP
;
1063 if (epe
->events
& EPOLLERR
)
1064 events
|= ELE_ERROR
;
1065 e
->cb(e
->cb_arg
, events
);
1070 #elif defined(HAVE_PPOLL)
1073 eloop_run_ppoll(struct eloop
*eloop
,
1074 const struct timespec
*ts
, const sigset_t
*signals
)
1077 struct eloop_event
*e
;
1079 unsigned short events
;
1081 n
= ppoll(eloop
->fds
, (nfds_t
)eloop
->nevents
, ts
, signals
);
1082 if (n
== -1 || n
== 0)
1086 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
1087 if (eloop
->cleared
|| eloop
->exitnow
)
1089 /* Skip freshly added events */
1090 if ((pfd
= e
->pollfd
) == NULL
)
1092 if (e
->pollfd
->revents
) {
1095 if (pfd
->revents
& POLLIN
)
1097 if (pfd
->revents
& POLLOUT
)
1098 events
|= ELE_WRITE
;
1099 if (pfd
->revents
& POLLHUP
)
1100 events
|= ELE_HANGUP
;
1101 if (pfd
->revents
& POLLERR
)
1102 events
|= ELE_ERROR
;
1103 if (pfd
->revents
& POLLNVAL
)
1106 e
->cb(e
->cb_arg
, events
);
1114 #elif defined(HAVE_PSELECT)
1117 eloop_run_pselect(struct eloop
*eloop
,
1118 const struct timespec
*ts
, const sigset_t
*sigmask
)
1120 fd_set read_fds
, write_fds
;
1122 struct eloop_event
*e
;
1123 unsigned short events
;
1126 FD_ZERO(&write_fds
);
1128 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
1131 if (e
->events
& ELE_READ
) {
1132 FD_SET(e
->fd
, &read_fds
);
1136 if (e
->events
& ELE_WRITE
) {
1137 FD_SET(e
->fd
, &write_fds
);
1143 /* except_fd's is for STREAMS devices which we don't use. */
1144 n
= pselect(maxfd
+ 1, &read_fds
, &write_fds
, NULL
, ts
, sigmask
);
1145 if (n
== -1 || n
== 0)
1148 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
1149 if (eloop
->cleared
|| eloop
->exitnow
)
1154 if (FD_ISSET(e
->fd
, &read_fds
))
1156 if (FD_ISSET(e
->fd
, &write_fds
))
1157 events
|= ELE_WRITE
;
1159 e
->cb(e
->cb_arg
, events
);
1167 eloop_start(struct eloop
*eloop
, sigset_t
*signals
)
1170 struct eloop_timeout
*t
;
1171 struct timespec ts
, *tsp
;
1173 assert(eloop
!= NULL
);
1183 if (_eloop_nsig
!= 0) {
1184 int n
= _eloop_sig
[--_eloop_nsig
];
1186 if (eloop
->signal_cb
!= NULL
)
1187 eloop
->signal_cb(n
, eloop
->signal_cb_ctx
);
1192 t
= TAILQ_FIRST(&eloop
->timeouts
);
1193 if (t
== NULL
&& eloop
->nevents
== 0)
1197 eloop_reduce_timers(eloop
);
1199 if (t
!= NULL
&& t
->seconds
== 0 && t
->nseconds
== 0) {
1200 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
1201 t
->callback(t
->arg
);
1202 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
1207 if (t
->seconds
> INT_MAX
) {
1208 ts
.tv_sec
= (time_t)INT_MAX
;
1211 ts
.tv_sec
= (time_t)t
->seconds
;
1212 ts
.tv_nsec
= (long)t
->nseconds
;
1218 eloop
->cleared
= false;
1219 if (eloop
->events_need_setup
)
1220 eloop_event_setup_fds(eloop
);
1222 #if defined(HAVE_KQUEUE)
1224 error
= eloop_run_kqueue(eloop
, tsp
);
1225 #elif defined(HAVE_EPOLL)
1226 error
= eloop_run_epoll(eloop
, tsp
, signals
);
1227 #elif defined(HAVE_PPOLL)
1228 error
= eloop_run_ppoll(eloop
, tsp
, signals
);
1229 #elif defined(HAVE_PSELECT)
1230 error
= eloop_run_pselect(eloop
, tsp
, signals
);
1232 #error no polling mechanism to run!
1241 return eloop
->exitcode
;