1 /* SPDX-License-Identifier: BSD-2-Clause */
3 * eloop - portable event based main loop.
4 * Copyright (c) 2006-2020 Roy Marples <roy@marples.name>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 /* config.h should define HAVE_PPOLL, etc. */
42 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
46 #if defined(HAVE_PPOLL)
47 #elif defined(HAVE_POLLTS)
49 #elif !defined(HAVE_PSELECT)
50 #pragma message("Compiling eloop with pselect(2) support.")
52 #define ppoll eloop_ppoll
58 #define UNUSED(a) (void)((a))
62 #define __unused __attribute__((__unused__))
69 #include <sys/select.h>
72 /* Our structures require TAILQ macros, which really every libc should
73 * ship as they are useful beyond belief.
74 * Sadly some libc's don't have sys/queue.h and some that do don't have
75 * the TAILQ_FOREACH macro. For those that don't, the application using
76 * this implementation will need to ship a working queue.h somewhere.
77 * If we don't have sys/queue.h found in config.h, then
78 * allow QUEUE_H to override loading queue.h in the current directory. */
80 #ifdef HAVE_SYS_QUEUE_H
81 #include <sys/queue.h>
82 #elif defined(QUEUE_H)
83 #define __QUEUE_HEADER(x) #x
84 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
85 #include _QUEUE_HEADER(QUEUE_H)
96 * Allow a backlog of signals.
97 * If you use many eloops in the same process, they should all
98 * use the same signal handler or have the signal handler unset.
99 * Otherwise the signal might not behave as expected.
101 #define ELOOP_NSIGNALS 5
104 * time_t is a signed integer of an unspecified size.
105 * To adjust for time_t wrapping, we need to work the maximum signed
106 * value and use that as a maximum.
109 #define TIME_MAX ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
111 /* The unsigned maximum is then simple - multiply by two and add one. */
113 #define UTIME_MAX (TIME_MAX * 2) + 1
117 TAILQ_ENTRY(eloop_event
) next
;
119 void (*read_cb
)(void *);
121 void (*write_cb
)(void *);
123 struct pollfd
*pollfd
;
126 struct eloop_timeout
{
127 TAILQ_ENTRY(eloop_timeout
) next
;
128 unsigned int seconds
;
129 unsigned int nseconds
;
130 void (*callback
)(void *);
136 TAILQ_HEAD (event_head
, eloop_event
) events
;
138 struct event_head free_events
;
141 TAILQ_HEAD (timeout_head
, eloop_timeout
) timeouts
;
142 struct timeout_head free_timeouts
;
146 void (*signal_cb
)(int, void *);
156 #ifdef HAVE_REALLOCARRAY
157 #define eloop_realloca reallocarray
159 /* Handy routing to check for potential overflow.
160 * reallocarray(3) and reallocarr(3) are not portable. */
161 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
163 eloop_realloca(void *ptr
, size_t n
, size_t size
)
166 if ((n
| size
) >= SQRT_SIZE_MAX
&& n
> SIZE_MAX
/ size
) {
170 return realloc(ptr
, n
* size
);
175 /* Wrapper around pselect, to imitate the ppoll call. */
177 eloop_ppoll(struct pollfd
* fds
, nfds_t nfds
,
178 const struct timespec
*ts
, const sigset_t
*sigmask
)
180 fd_set read_fds
, write_fds
;
187 for (n
= 0; n
< nfds
; n
++) {
188 if (fds
[n
].events
& POLLIN
) {
189 FD_SET(fds
[n
].fd
, &read_fds
);
190 if (fds
[n
].fd
> maxfd
)
193 if (fds
[n
].events
& POLLOUT
) {
194 FD_SET(fds
[n
].fd
, &write_fds
);
195 if (fds
[n
].fd
> maxfd
)
200 r
= pselect(maxfd
+ 1, &read_fds
, &write_fds
, NULL
, ts
, sigmask
);
202 for (n
= 0; n
< nfds
; n
++) {
204 FD_ISSET(fds
[n
].fd
, &read_fds
) ? POLLIN
: 0;
205 if (FD_ISSET(fds
[n
].fd
, &write_fds
))
206 fds
[n
].revents
|= POLLOUT
;
215 eloop_timespec_diff(const struct timespec
*tsp
, const struct timespec
*usp
,
218 unsigned long long tsecs
, usecs
, secs
;
221 if (tsp
->tv_sec
< 0) /* time wreapped */
222 tsecs
= UTIME_MAX
- (unsigned long long)(-tsp
->tv_sec
);
224 tsecs
= (unsigned long long)tsp
->tv_sec
;
225 if (usp
->tv_sec
< 0) /* time wrapped */
226 usecs
= UTIME_MAX
- (unsigned long long)(-usp
->tv_sec
);
228 usecs
= (unsigned long long)usp
->tv_sec
;
230 if (usecs
> tsecs
) /* time wrapped */
231 secs
= (UTIME_MAX
- usecs
) + tsecs
;
233 secs
= tsecs
- usecs
;
235 nsecs
= tsp
->tv_nsec
- usp
->tv_nsec
;
241 nsecs
+= NSEC_PER_SEC
;
245 *nsp
= (unsigned int)nsecs
;
250 eloop_reduce_timers(struct eloop
*eloop
)
253 unsigned long long secs
;
255 struct eloop_timeout
*t
;
257 clock_gettime(CLOCK_MONOTONIC
, &now
);
258 secs
= eloop_timespec_diff(&now
, &eloop
->now
, &nsecs
);
260 TAILQ_FOREACH(t
, &eloop
->timeouts
, next
) {
261 if (secs
> t
->seconds
) {
265 t
->seconds
-= (unsigned int)secs
;
266 if (nsecs
> t
->nseconds
) {
271 t
->nseconds
= NSEC_PER_SEC
272 - (nsecs
- t
->nseconds
);
275 t
->nseconds
-= nsecs
;
283 eloop_event_setup_fds(struct eloop
*eloop
)
285 struct eloop_event
*e
;
289 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
291 fprintf(stderr
, "%s(%d) fd=%d, rcb=%p, wcb=%p\n",
292 __func__
, getpid(), e
->fd
, e
->read_cb
, e
->write_cb
);
297 if (e
->read_cb
!= NULL
)
298 pfd
->events
|= POLLIN
;
299 if (e
->write_cb
!= NULL
)
300 pfd
->events
|= POLLOUT
;
307 eloop_event_count(const struct eloop
*eloop
)
310 return eloop
->nevents
;
314 eloop_event_add_rw(struct eloop
*eloop
, int fd
,
315 void (*read_cb
)(void *), void *read_cb_arg
,
316 void (*write_cb
)(void *), void *write_cb_arg
)
318 struct eloop_event
*e
;
321 assert(eloop
!= NULL
);
322 assert(read_cb
!= NULL
|| write_cb
!= NULL
);
328 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
334 if (eloop
->nevents
+ 1 > eloop
->nfds
) {
335 pfd
= eloop_realloca(eloop
->fds
, eloop
->nevents
+ 1,
343 e
= TAILQ_FIRST(&eloop
->free_events
);
345 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
347 e
= malloc(sizeof(*e
));
351 TAILQ_INSERT_HEAD(&eloop
->events
, e
, next
);
354 e
->read_cb
= read_cb
;
355 e
->read_cb_arg
= read_cb_arg
;
356 e
->write_cb
= write_cb
;
357 e
->write_cb_arg
= write_cb_arg
;
362 e
->read_cb
= read_cb
;
363 e
->read_cb_arg
= read_cb_arg
;
366 e
->write_cb
= write_cb
;
367 e
->write_cb_arg
= write_cb_arg
;
371 eloop_event_setup_fds(eloop
);
376 eloop_event_add(struct eloop
*eloop
, int fd
,
377 void (*read_cb
)(void *), void *read_cb_arg
)
380 return eloop_event_add_rw(eloop
, fd
, read_cb
, read_cb_arg
, NULL
, NULL
);
384 eloop_event_add_w(struct eloop
*eloop
, int fd
,
385 void (*write_cb
)(void *), void *write_cb_arg
)
388 return eloop_event_add_rw(eloop
, fd
, NULL
,NULL
, write_cb
, write_cb_arg
);
392 eloop_event_delete_write(struct eloop
*eloop
, int fd
, int write_only
)
394 struct eloop_event
*e
;
396 assert(eloop
!= NULL
);
398 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
408 if (e
->read_cb
== NULL
)
411 e
->write_cb_arg
= NULL
;
416 TAILQ_REMOVE(&eloop
->events
, e
, next
);
417 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
421 eloop_event_setup_fds(eloop
);
426 * This implementation should cope with UINT_MAX seconds on a system
427 * where time_t is INT32_MAX. It should also cope with the monotonic timer
428 * wrapping, although this is highly unlikely.
429 * unsigned int should match or be greater than any on wire specified timeout.
432 eloop_q_timeout_add(struct eloop
*eloop
, int queue
,
433 unsigned int seconds
, unsigned int nseconds
,
434 void (*callback
)(void *), void *arg
)
436 struct eloop_timeout
*t
, *tt
= NULL
;
438 assert(eloop
!= NULL
);
439 assert(callback
!= NULL
);
440 assert(nseconds
<= NSEC_PER_SEC
);
442 /* Remove existing timeout if present. */
443 TAILQ_FOREACH(t
, &eloop
->timeouts
, next
) {
444 if (t
->callback
== callback
&& t
->arg
== arg
) {
445 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
451 /* No existing, so allocate or grab one from the free pool. */
452 if ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
453 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
455 if ((t
= malloc(sizeof(*t
))) == NULL
)
460 eloop_reduce_timers(eloop
);
462 t
->seconds
= seconds
;
463 t
->nseconds
= nseconds
;
464 t
->callback
= callback
;
468 /* The timeout list should be in chronological order,
470 TAILQ_FOREACH(tt
, &eloop
->timeouts
, next
) {
471 if (t
->seconds
< tt
->seconds
||
472 (t
->seconds
== tt
->seconds
&& t
->nseconds
< tt
->nseconds
))
474 TAILQ_INSERT_BEFORE(tt
, t
, next
);
478 TAILQ_INSERT_TAIL(&eloop
->timeouts
, t
, next
);
483 eloop_q_timeout_add_tv(struct eloop
*eloop
, int queue
,
484 const struct timespec
*when
, void (*callback
)(void *), void *arg
)
487 if (when
->tv_sec
< 0 || (unsigned long)when
->tv_sec
> UINT_MAX
) {
491 if (when
->tv_nsec
< 0 || when
->tv_nsec
> NSEC_PER_SEC
) {
496 return eloop_q_timeout_add(eloop
, queue
,
497 (unsigned int)when
->tv_sec
, (unsigned int)when
->tv_sec
,
502 eloop_q_timeout_add_sec(struct eloop
*eloop
, int queue
, unsigned int seconds
,
503 void (*callback
)(void *), void *arg
)
506 return eloop_q_timeout_add(eloop
, queue
, seconds
, 0, callback
, arg
);
510 eloop_q_timeout_add_msec(struct eloop
*eloop
, int queue
, unsigned long when
,
511 void (*callback
)(void *), void *arg
)
513 unsigned long seconds
, nseconds
;
515 seconds
= when
/ MSEC_PER_SEC
;
516 if (seconds
> UINT_MAX
) {
521 nseconds
= (when
% MSEC_PER_SEC
) * NSEC_PER_MSEC
;
522 return eloop_q_timeout_add(eloop
, queue
,
523 (unsigned int)seconds
, (unsigned int)nseconds
, callback
, arg
);
527 eloop_q_timeout_delete(struct eloop
*eloop
, int queue
,
528 void (*callback
)(void *), void *arg
)
530 struct eloop_timeout
*t
, *tt
;
533 assert(eloop
!= NULL
);
536 TAILQ_FOREACH_SAFE(t
, &eloop
->timeouts
, next
, tt
) {
537 if ((queue
== 0 || t
->queue
== queue
) &&
539 (!callback
|| t
->callback
== callback
))
541 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
542 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
550 eloop_exit(struct eloop
*eloop
, int code
)
553 assert(eloop
!= NULL
);
555 eloop
->exitcode
= code
;
560 eloop_enter(struct eloop
*eloop
)
567 eloop_signal_set_cb(struct eloop
*eloop
,
568 const int *signals
, size_t signals_len
,
569 void (*signal_cb
)(int, void *), void *signal_cb_ctx
)
572 assert(eloop
!= NULL
);
574 eloop
->signals
= signals
;
575 eloop
->signals_len
= signals_len
;
576 eloop
->signal_cb
= signal_cb
;
577 eloop
->signal_cb_ctx
= signal_cb_ctx
;
580 static volatile int _eloop_sig
[ELOOP_NSIGNALS
];
581 static volatile size_t _eloop_nsig
;
584 eloop_signal3(int sig
, __unused siginfo_t
*siginfo
, __unused
void *arg
)
587 if (_eloop_nsig
== __arraycount(_eloop_sig
)) {
589 fprintf(stderr
, "%s: signal storm, discarding signal %d\n",
595 _eloop_sig
[_eloop_nsig
++] = sig
;
599 eloop_signal_mask(struct eloop
*eloop
, sigset_t
*oldset
)
603 struct sigaction sa
= {
604 .sa_sigaction
= eloop_signal3
,
605 .sa_flags
= SA_SIGINFO
,
608 assert(eloop
!= NULL
);
610 sigemptyset(&newset
);
611 for (i
= 0; i
< eloop
->signals_len
; i
++)
612 sigaddset(&newset
, eloop
->signals
[i
]);
613 if (sigprocmask(SIG_SETMASK
, &newset
, oldset
) == -1)
616 sigemptyset(&sa
.sa_mask
);
618 for (i
= 0; i
< eloop
->signals_len
; i
++) {
619 if (sigaction(eloop
->signals
[i
], &sa
, NULL
) == -1)
630 eloop
= calloc(1, sizeof(*eloop
));
634 /* Check we have a working monotonic clock. */
635 if (clock_gettime(CLOCK_MONOTONIC
, &eloop
->now
) == -1) {
640 TAILQ_INIT(&eloop
->events
);
641 TAILQ_INIT(&eloop
->free_events
);
642 TAILQ_INIT(&eloop
->timeouts
);
643 TAILQ_INIT(&eloop
->free_timeouts
);
644 eloop
->exitcode
= EXIT_FAILURE
;
650 eloop_clear(struct eloop
*eloop
)
652 struct eloop_event
*e
;
653 struct eloop_timeout
*t
;
659 eloop
->signals
= NULL
;
660 eloop
->signals_len
= 0;
662 while ((e
= TAILQ_FIRST(&eloop
->events
))) {
663 TAILQ_REMOVE(&eloop
->events
, e
, next
);
666 while ((e
= TAILQ_FIRST(&eloop
->free_events
))) {
667 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
670 while ((t
= TAILQ_FIRST(&eloop
->timeouts
))) {
671 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
674 while ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
675 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
685 eloop_free(struct eloop
*eloop
)
693 eloop_start(struct eloop
*eloop
, sigset_t
*signals
)
696 struct eloop_event
*e
;
697 struct eloop_timeout
*t
;
698 struct timespec ts
, *tsp
;
700 assert(eloop
!= NULL
);
706 if (_eloop_nsig
!= 0) {
707 n
= _eloop_sig
[--_eloop_nsig
];
708 if (eloop
->signal_cb
!= NULL
)
709 eloop
->signal_cb(n
, eloop
->signal_cb_ctx
);
713 t
= TAILQ_FIRST(&eloop
->timeouts
);
714 if (t
== NULL
&& eloop
->nevents
== 0)
718 eloop_reduce_timers(eloop
);
720 if (t
!= NULL
&& t
->seconds
== 0 && t
->nseconds
== 0) {
721 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
723 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
728 if (t
->seconds
> INT_MAX
) {
729 ts
.tv_sec
= (time_t)INT_MAX
;
732 ts
.tv_sec
= (time_t)t
->seconds
;
733 ts
.tv_nsec
= (long)t
->nseconds
;
739 n
= ppoll(eloop
->fds
, (nfds_t
)eloop
->nevents
, tsp
, signals
);
748 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
749 if (e
->pollfd
->revents
& POLLOUT
) {
750 if (e
->write_cb
!= NULL
) {
751 e
->write_cb(e
->write_cb_arg
);
755 if (e
->pollfd
->revents
) {
756 if (e
->read_cb
!= NULL
) {
757 e
->read_cb(e
->read_cb_arg
);
764 return eloop
->exitcode
;