2 * uloop - event loop implementation
4 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/types.h>
34 #include <sys/event.h>
37 #include <sys/epoll.h>
41 struct uloop_fd_event
{
46 struct uloop_fd_stack
{
47 struct uloop_fd_stack
*next
;
52 static struct uloop_fd_stack
*fd_stack
= NULL
;
54 #define ULOOP_MAX_EVENTS 10
56 static struct list_head timeouts
= LIST_HEAD_INIT(timeouts
);
57 static struct list_head processes
= LIST_HEAD_INIT(processes
);
59 static int poll_fd
= -1;
60 bool uloop_cancelled
= false;
61 static bool do_sigchld
= false;
63 static struct uloop_fd_event cur_fds
[ULOOP_MAX_EVENTS
];
64 static int cur_fd
, cur_nfds
;
70 struct timespec timeout
= { 0, 0 };
71 struct kevent ev
= {};
80 EV_SET(&ev
, SIGCHLD
, EVFILT_SIGNAL
, EV_ADD
, 0, 0, 0);
81 kevent(poll_fd
, &ev
, 1, NULL
, 0, &timeout
);
87 static uint16_t get_flags(unsigned int flags
, unsigned int mask
)
95 if (flags
& ULOOP_EDGE_TRIGGER
)
101 static struct kevent events
[ULOOP_MAX_EVENTS
];
103 static int register_kevent(struct uloop_fd
*fd
, unsigned int flags
)
105 struct timespec timeout
= { 0, 0 };
109 unsigned int changed
;
112 if (flags
& ULOOP_EDGE_DEFER
)
113 flags
&= ~ULOOP_EDGE_TRIGGER
;
115 changed
= flags
^ fd
->flags
;
116 if (changed
& ULOOP_EDGE_TRIGGER
)
119 if (changed
& ULOOP_READ
) {
120 kflags
= get_flags(flags
, ULOOP_READ
);
121 EV_SET(&ev
[nev
++], fd
->fd
, EVFILT_READ
, kflags
, 0, 0, fd
);
124 if (changed
& ULOOP_WRITE
) {
125 kflags
= get_flags(flags
, ULOOP_WRITE
);
126 EV_SET(&ev
[nev
++], fd
->fd
, EVFILT_WRITE
, kflags
, 0, 0, fd
);
133 if (kevent(poll_fd
, ev
, nev
, NULL
, fl
, &timeout
) == -1)
139 static int register_poll(struct uloop_fd
*fd
, unsigned int flags
)
141 if (flags
& ULOOP_EDGE_TRIGGER
)
142 flags
|= ULOOP_EDGE_DEFER
;
144 flags
&= ~ULOOP_EDGE_DEFER
;
146 return register_kevent(fd
, flags
);
149 static int __uloop_fd_delete(struct uloop_fd
*fd
)
151 return register_poll(fd
, 0);
154 static int uloop_fetch_events(int timeout
)
160 ts
.tv_sec
= timeout
/ 1000;
161 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
164 nfds
= kevent(poll_fd
, NULL
, 0, events
, ARRAY_SIZE(events
), timeout
>= 0 ? &ts
: NULL
);
165 for (n
= 0; n
< nfds
; n
++) {
166 struct uloop_fd_event
*cur
= &cur_fds
[n
];
167 struct uloop_fd
*u
= events
[n
].udata
;
174 if (events
[n
].flags
& EV_ERROR
) {
176 if (!(u
->flags
& ULOOP_ERROR_CB
))
180 if(events
[n
].filter
== EVFILT_READ
)
182 else if (events
[n
].filter
== EVFILT_WRITE
)
185 if (events
[n
].flags
& EV_EOF
)
191 if (u
->flags
& ULOOP_EDGE_DEFER
) {
192 u
->flags
&= ~ULOOP_EDGE_DEFER
;
193 u
->flags
|= ULOOP_EDGE_TRIGGER
;
194 register_kevent(u
, u
->flags
);
205 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
208 #define EPOLLRDHUP 0x2000
216 poll_fd
= epoll_create(32);
220 fcntl(poll_fd
, F_SETFD
, fcntl(poll_fd
, F_GETFD
) | FD_CLOEXEC
);
224 static int register_poll(struct uloop_fd
*fd
, unsigned int flags
)
226 struct epoll_event ev
;
227 int op
= fd
->registered
? EPOLL_CTL_MOD
: EPOLL_CTL_ADD
;
229 memset(&ev
, 0, sizeof(struct epoll_event
));
231 if (flags
& ULOOP_READ
)
232 ev
.events
|= EPOLLIN
| EPOLLRDHUP
;
234 if (flags
& ULOOP_WRITE
)
235 ev
.events
|= EPOLLOUT
;
237 if (flags
& ULOOP_EDGE_TRIGGER
)
238 ev
.events
|= EPOLLET
;
244 return epoll_ctl(poll_fd
, op
, fd
->fd
, &ev
);
247 static struct epoll_event events
[ULOOP_MAX_EVENTS
];
249 static int __uloop_fd_delete(struct uloop_fd
*sock
)
252 return epoll_ctl(poll_fd
, EPOLL_CTL_DEL
, sock
->fd
, 0);
255 static int uloop_fetch_events(int timeout
)
259 nfds
= epoll_wait(poll_fd
, events
, ARRAY_SIZE(events
), timeout
);
260 for (n
= 0; n
< nfds
; ++n
) {
261 struct uloop_fd_event
*cur
= &cur_fds
[n
];
262 struct uloop_fd
*u
= events
[n
].data
.ptr
;
269 if (events
[n
].events
& (EPOLLERR
|EPOLLHUP
)) {
271 if (!(u
->flags
& ULOOP_ERROR_CB
))
275 if(!(events
[n
].events
& (EPOLLRDHUP
|EPOLLIN
|EPOLLOUT
|EPOLLERR
|EPOLLHUP
))) {
280 if(events
[n
].events
& EPOLLRDHUP
)
283 if(events
[n
].events
& EPOLLIN
)
286 if(events
[n
].events
& EPOLLOUT
)
297 static bool uloop_fd_stack_event(struct uloop_fd
*fd
, int events
)
299 struct uloop_fd_stack
*cur
;
302 * Do not buffer events for level-triggered fds, they will keep firing.
303 * Caller needs to take care of recursion issues.
305 if (!(fd
->flags
& ULOOP_EDGE_TRIGGER
))
308 for (cur
= fd_stack
; cur
; cur
= cur
->next
) {
315 cur
->events
|= events
| ULOOP_EVENT_BUFFERED
;
323 static void uloop_run_events(int timeout
)
325 struct uloop_fd_event
*cur
;
330 cur_nfds
= uloop_fetch_events(timeout
);
335 while (cur_nfds
> 0) {
336 struct uloop_fd_stack stack_cur
;
339 cur
= &cur_fds
[cur_fd
++];
343 events
= cur
->events
;
350 if (uloop_fd_stack_event(fd
, cur
->events
))
353 stack_cur
.next
= fd_stack
;
355 fd_stack
= &stack_cur
;
357 stack_cur
.events
= 0;
359 events
= stack_cur
.events
& ULOOP_EVENT_MASK
;
360 } while (stack_cur
.fd
&& events
);
361 fd_stack
= stack_cur
.next
;
367 int uloop_fd_add(struct uloop_fd
*sock
, unsigned int flags
)
372 if (!(flags
& (ULOOP_READ
| ULOOP_WRITE
)))
373 return uloop_fd_delete(sock
);
375 if (!sock
->registered
&& !(flags
& ULOOP_BLOCKING
)) {
376 fl
= fcntl(sock
->fd
, F_GETFL
, 0);
378 fcntl(sock
->fd
, F_SETFL
, fl
);
381 ret
= register_poll(sock
, flags
);
385 sock
->registered
= true;
393 int uloop_fd_delete(struct uloop_fd
*fd
)
397 for (i
= 0; i
< cur_nfds
; i
++) {
398 if (cur_fds
[cur_fd
+ i
].fd
!= fd
)
401 cur_fds
[cur_fd
+ i
].fd
= NULL
;
407 fd
->registered
= false;
408 uloop_fd_stack_event(fd
, -1);
409 return __uloop_fd_delete(fd
);
412 static int tv_diff(struct timeval
*t1
, struct timeval
*t2
)
415 (t1
->tv_sec
- t2
->tv_sec
) * 1000 +
416 (t1
->tv_usec
- t2
->tv_usec
) / 1000;
419 int uloop_timeout_add(struct uloop_timeout
*timeout
)
421 struct uloop_timeout
*tmp
;
422 struct list_head
*h
= &timeouts
;
424 if (timeout
->pending
)
427 list_for_each_entry(tmp
, &timeouts
, list
) {
428 if (tv_diff(&tmp
->time
, &timeout
->time
) > 0) {
434 list_add_tail(&timeout
->list
, h
);
435 timeout
->pending
= true;
440 static void uloop_gettime(struct timeval
*tv
)
444 clock_gettime(CLOCK_MONOTONIC
, &ts
);
445 tv
->tv_sec
= ts
.tv_sec
;
446 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
449 int uloop_timeout_set(struct uloop_timeout
*timeout
, int msecs
)
451 struct timeval
*time
= &timeout
->time
;
453 if (timeout
->pending
)
454 uloop_timeout_cancel(timeout
);
456 uloop_gettime(&timeout
->time
);
458 time
->tv_sec
+= msecs
/ 1000;
459 time
->tv_usec
+= (msecs
% 1000) * 1000;
461 if (time
->tv_usec
> 1000000) {
463 time
->tv_usec
%= 1000000;
466 return uloop_timeout_add(timeout
);
469 int uloop_timeout_cancel(struct uloop_timeout
*timeout
)
471 if (!timeout
->pending
)
474 list_del(&timeout
->list
);
475 timeout
->pending
= false;
480 int uloop_timeout_remaining(struct uloop_timeout
*timeout
)
484 if (!timeout
->pending
)
489 return tv_diff(&timeout
->time
, &now
);
492 int uloop_process_add(struct uloop_process
*p
)
494 struct uloop_process
*tmp
;
495 struct list_head
*h
= &processes
;
500 list_for_each_entry(tmp
, &processes
, list
) {
501 if (tmp
->pid
> p
->pid
) {
507 list_add_tail(&p
->list
, h
);
513 int uloop_process_delete(struct uloop_process
*p
)
524 static void uloop_handle_processes(void)
526 struct uloop_process
*p
, *tmp
;
533 pid
= waitpid(-1, &ret
, WNOHANG
);
537 list_for_each_entry_safe(p
, tmp
, &processes
, list
) {
544 uloop_process_delete(p
);
551 static void uloop_handle_sigint(int signo
)
553 uloop_cancelled
= true;
556 static void uloop_sigchld(int signo
)
561 static void uloop_install_handler(int signum
, void (*handler
)(int), struct sigaction
* old
, bool add
)
564 struct sigaction
*act
;
567 sigaction(signum
, NULL
, &s
);
570 if (s
.sa_handler
== SIG_DFL
) { /* Do not override existing custom signal handlers */
571 memcpy(old
, &s
, sizeof(struct sigaction
));
572 s
.sa_handler
= handler
;
577 else if (s
.sa_handler
== handler
) { /* Do not restore if someone modified our handler */
582 sigaction(signum
, act
, NULL
);
585 static void uloop_setup_signals(bool add
)
587 static struct sigaction old_sigint
, old_sigchld
, old_sigterm
;
589 uloop_install_handler(SIGINT
, uloop_handle_sigint
, &old_sigint
, add
);
590 uloop_install_handler(SIGTERM
, uloop_handle_sigint
, &old_sigterm
, add
);
591 uloop_install_handler(SIGCHLD
, uloop_sigchld
, &old_sigchld
, add
);
594 static int uloop_get_next_timeout(struct timeval
*tv
)
596 struct uloop_timeout
*timeout
;
599 if (list_empty(&timeouts
))
602 timeout
= list_first_entry(&timeouts
, struct uloop_timeout
, list
);
603 diff
= tv_diff(&timeout
->time
, tv
);
610 static void uloop_process_timeouts(struct timeval
*tv
)
612 struct uloop_timeout
*t
;
614 while (!list_empty(&timeouts
)) {
615 t
= list_first_entry(&timeouts
, struct uloop_timeout
, list
);
617 if (tv_diff(&t
->time
, tv
) > 0)
620 uloop_timeout_cancel(t
);
626 static void uloop_clear_timeouts(void)
628 struct uloop_timeout
*t
, *tmp
;
630 list_for_each_entry_safe(t
, tmp
, &timeouts
, list
)
631 uloop_timeout_cancel(t
);
634 static void uloop_clear_processes(void)
636 struct uloop_process
*p
, *tmp
;
638 list_for_each_entry_safe(p
, tmp
, &processes
, list
)
639 uloop_process_delete(p
);
644 static int recursive_calls
= 0;
648 * Handlers are only updated for the first call to uloop_run() (and restored
649 * when this call is done).
651 if (!recursive_calls
++)
652 uloop_setup_signals(true);
654 uloop_cancelled
= false;
655 while(!uloop_cancelled
)
658 uloop_process_timeouts(&tv
);
663 uloop_handle_processes();
665 uloop_run_events(uloop_get_next_timeout(&tv
));
668 if (!--recursive_calls
)
669 uloop_setup_signals(false);
672 void uloop_done(void)
680 uloop_clear_timeouts();
681 uloop_clear_processes();