libsodium: updated to 1.0.10
[tomato.git] / release / src / router / uqmi / libubox / uloop.c
blob9a77ce49a5a7c2444a768eac3eb5059f52681fb3
1 /*
2 * uloop - event loop implementation
4 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <sys/time.h>
19 #include <sys/types.h>
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
30 #include "uloop.h"
31 #include "utils.h"
33 #ifdef USE_KQUEUE
34 #include <sys/event.h>
35 #endif
36 #ifdef USE_EPOLL
37 #include <sys/epoll.h>
38 #endif
39 #include <sys/wait.h>
41 struct uloop_fd_event {
42 struct uloop_fd *fd;
43 unsigned int events;
46 struct uloop_fd_stack {
47 struct uloop_fd_stack *next;
48 struct uloop_fd *fd;
49 unsigned int events;
52 static struct uloop_fd_stack *fd_stack = NULL;
54 #define ULOOP_MAX_EVENTS 10
56 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
57 static struct list_head processes = LIST_HEAD_INIT(processes);
59 static int poll_fd = -1;
60 bool uloop_cancelled = false;
61 static bool do_sigchld = false;
63 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
64 static int cur_fd, cur_nfds;
66 #ifdef USE_KQUEUE
68 int uloop_init(void)
70 struct timespec timeout = { 0, 0 };
71 struct kevent ev = {};
73 if (poll_fd >= 0)
74 return 0;
76 poll_fd = kqueue();
77 if (poll_fd < 0)
78 return -1;
80 EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
81 kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
83 return 0;
87 static uint16_t get_flags(unsigned int flags, unsigned int mask)
89 uint16_t kflags = 0;
91 if (!(flags & mask))
92 return EV_DELETE;
94 kflags = EV_ADD;
95 if (flags & ULOOP_EDGE_TRIGGER)
96 kflags |= EV_CLEAR;
98 return kflags;
101 static struct kevent events[ULOOP_MAX_EVENTS];
103 static int register_kevent(struct uloop_fd *fd, unsigned int flags)
105 struct timespec timeout = { 0, 0 };
106 struct kevent ev[2];
107 int nev = 0;
108 unsigned int fl = 0;
109 unsigned int changed;
110 uint16_t kflags;
112 if (flags & ULOOP_EDGE_DEFER)
113 flags &= ~ULOOP_EDGE_TRIGGER;
115 changed = flags ^ fd->flags;
116 if (changed & ULOOP_EDGE_TRIGGER)
117 changed |= flags;
119 if (changed & ULOOP_READ) {
120 kflags = get_flags(flags, ULOOP_READ);
121 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
124 if (changed & ULOOP_WRITE) {
125 kflags = get_flags(flags, ULOOP_WRITE);
126 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
129 if (!flags)
130 fl |= EV_DELETE;
132 fd->flags = flags;
133 if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
134 return -1;
136 return 0;
139 static int register_poll(struct uloop_fd *fd, unsigned int flags)
141 if (flags & ULOOP_EDGE_TRIGGER)
142 flags |= ULOOP_EDGE_DEFER;
143 else
144 flags &= ~ULOOP_EDGE_DEFER;
146 return register_kevent(fd, flags);
149 static int __uloop_fd_delete(struct uloop_fd *fd)
151 return register_poll(fd, 0);
154 static int uloop_fetch_events(int timeout)
156 struct timespec ts;
157 int nfds, n;
159 if (timeout >= 0) {
160 ts.tv_sec = timeout / 1000;
161 ts.tv_nsec = (timeout % 1000) * 1000000;
164 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
165 for (n = 0; n < nfds; n++) {
166 struct uloop_fd_event *cur = &cur_fds[n];
167 struct uloop_fd *u = events[n].udata;
168 unsigned int ev = 0;
170 cur->fd = u;
171 if (!u)
172 continue;
174 if (events[n].flags & EV_ERROR) {
175 u->error = true;
176 if (!(u->flags & ULOOP_ERROR_CB))
177 uloop_fd_delete(u);
180 if(events[n].filter == EVFILT_READ)
181 ev |= ULOOP_READ;
182 else if (events[n].filter == EVFILT_WRITE)
183 ev |= ULOOP_WRITE;
185 if (events[n].flags & EV_EOF)
186 u->eof = true;
187 else if (!ev)
188 cur->fd = NULL;
190 cur->events = ev;
191 if (u->flags & ULOOP_EDGE_DEFER) {
192 u->flags &= ~ULOOP_EDGE_DEFER;
193 u->flags |= ULOOP_EDGE_TRIGGER;
194 register_kevent(u, u->flags);
197 return nfds;
200 #endif
202 #ifdef USE_EPOLL
205 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
207 #ifndef EPOLLRDHUP
208 #define EPOLLRDHUP 0x2000
209 #endif
211 int uloop_init(void)
213 if (poll_fd >= 0)
214 return 0;
216 poll_fd = epoll_create(32);
217 if (poll_fd < 0)
218 return -1;
220 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
221 return 0;
224 static int register_poll(struct uloop_fd *fd, unsigned int flags)
226 struct epoll_event ev;
227 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
229 memset(&ev, 0, sizeof(struct epoll_event));
231 if (flags & ULOOP_READ)
232 ev.events |= EPOLLIN | EPOLLRDHUP;
234 if (flags & ULOOP_WRITE)
235 ev.events |= EPOLLOUT;
237 if (flags & ULOOP_EDGE_TRIGGER)
238 ev.events |= EPOLLET;
240 ev.data.fd = fd->fd;
241 ev.data.ptr = fd;
242 fd->flags = flags;
244 return epoll_ctl(poll_fd, op, fd->fd, &ev);
247 static struct epoll_event events[ULOOP_MAX_EVENTS];
249 static int __uloop_fd_delete(struct uloop_fd *sock)
251 sock->flags = 0;
252 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
255 static int uloop_fetch_events(int timeout)
257 int n, nfds;
259 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
260 for (n = 0; n < nfds; ++n) {
261 struct uloop_fd_event *cur = &cur_fds[n];
262 struct uloop_fd *u = events[n].data.ptr;
263 unsigned int ev = 0;
265 cur->fd = u;
266 if (!u)
267 continue;
269 if (events[n].events & (EPOLLERR|EPOLLHUP)) {
270 u->error = true;
271 if (!(u->flags & ULOOP_ERROR_CB))
272 uloop_fd_delete(u);
275 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP))) {
276 cur->fd = NULL;
277 continue;
280 if(events[n].events & EPOLLRDHUP)
281 u->eof = true;
283 if(events[n].events & EPOLLIN)
284 ev |= ULOOP_READ;
286 if(events[n].events & EPOLLOUT)
287 ev |= ULOOP_WRITE;
289 cur->events = ev;
292 return nfds;
295 #endif
297 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
299 struct uloop_fd_stack *cur;
302 * Do not buffer events for level-triggered fds, they will keep firing.
303 * Caller needs to take care of recursion issues.
305 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
306 return false;
308 for (cur = fd_stack; cur; cur = cur->next) {
309 if (cur->fd != fd)
310 continue;
312 if (events < 0)
313 cur->fd = NULL;
314 else
315 cur->events |= events | ULOOP_EVENT_BUFFERED;
317 return true;
320 return false;
323 static void uloop_run_events(int timeout)
325 struct uloop_fd_event *cur;
326 struct uloop_fd *fd;
328 if (!cur_nfds) {
329 cur_fd = 0;
330 cur_nfds = uloop_fetch_events(timeout);
331 if (cur_nfds < 0)
332 cur_nfds = 0;
335 while (cur_nfds > 0) {
336 struct uloop_fd_stack stack_cur;
337 unsigned int events;
339 cur = &cur_fds[cur_fd++];
340 cur_nfds--;
342 fd = cur->fd;
343 events = cur->events;
344 if (!fd)
345 continue;
347 if (!fd->cb)
348 continue;
350 if (uloop_fd_stack_event(fd, cur->events))
351 continue;
353 stack_cur.next = fd_stack;
354 stack_cur.fd = fd;
355 fd_stack = &stack_cur;
356 do {
357 stack_cur.events = 0;
358 fd->cb(fd, events);
359 events = stack_cur.events & ULOOP_EVENT_MASK;
360 } while (stack_cur.fd && events);
361 fd_stack = stack_cur.next;
363 return;
367 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
369 unsigned int fl;
370 int ret;
372 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
373 return uloop_fd_delete(sock);
375 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
376 fl = fcntl(sock->fd, F_GETFL, 0);
377 fl |= O_NONBLOCK;
378 fcntl(sock->fd, F_SETFL, fl);
381 ret = register_poll(sock, flags);
382 if (ret < 0)
383 goto out;
385 sock->registered = true;
386 sock->eof = false;
387 sock->error = false;
389 out:
390 return ret;
393 int uloop_fd_delete(struct uloop_fd *fd)
395 int i;
397 for (i = 0; i < cur_nfds; i++) {
398 if (cur_fds[cur_fd + i].fd != fd)
399 continue;
401 cur_fds[cur_fd + i].fd = NULL;
404 if (!fd->registered)
405 return 0;
407 fd->registered = false;
408 uloop_fd_stack_event(fd, -1);
409 return __uloop_fd_delete(fd);
412 static int tv_diff(struct timeval *t1, struct timeval *t2)
414 return
415 (t1->tv_sec - t2->tv_sec) * 1000 +
416 (t1->tv_usec - t2->tv_usec) / 1000;
419 int uloop_timeout_add(struct uloop_timeout *timeout)
421 struct uloop_timeout *tmp;
422 struct list_head *h = &timeouts;
424 if (timeout->pending)
425 return -1;
427 list_for_each_entry(tmp, &timeouts, list) {
428 if (tv_diff(&tmp->time, &timeout->time) > 0) {
429 h = &tmp->list;
430 break;
434 list_add_tail(&timeout->list, h);
435 timeout->pending = true;
437 return 0;
440 static void uloop_gettime(struct timeval *tv)
442 struct timespec ts;
444 clock_gettime(CLOCK_MONOTONIC, &ts);
445 tv->tv_sec = ts.tv_sec;
446 tv->tv_usec = ts.tv_nsec / 1000;
449 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
451 struct timeval *time = &timeout->time;
453 if (timeout->pending)
454 uloop_timeout_cancel(timeout);
456 uloop_gettime(&timeout->time);
458 time->tv_sec += msecs / 1000;
459 time->tv_usec += (msecs % 1000) * 1000;
461 if (time->tv_usec > 1000000) {
462 time->tv_sec++;
463 time->tv_usec %= 1000000;
466 return uloop_timeout_add(timeout);
469 int uloop_timeout_cancel(struct uloop_timeout *timeout)
471 if (!timeout->pending)
472 return -1;
474 list_del(&timeout->list);
475 timeout->pending = false;
477 return 0;
480 int uloop_timeout_remaining(struct uloop_timeout *timeout)
482 struct timeval now;
484 if (!timeout->pending)
485 return -1;
487 uloop_gettime(&now);
489 return tv_diff(&timeout->time, &now);
492 int uloop_process_add(struct uloop_process *p)
494 struct uloop_process *tmp;
495 struct list_head *h = &processes;
497 if (p->pending)
498 return -1;
500 list_for_each_entry(tmp, &processes, list) {
501 if (tmp->pid > p->pid) {
502 h = &tmp->list;
503 break;
507 list_add_tail(&p->list, h);
508 p->pending = true;
510 return 0;
513 int uloop_process_delete(struct uloop_process *p)
515 if (!p->pending)
516 return -1;
518 list_del(&p->list);
519 p->pending = false;
521 return 0;
524 static void uloop_handle_processes(void)
526 struct uloop_process *p, *tmp;
527 pid_t pid;
528 int ret;
530 do_sigchld = false;
532 while (1) {
533 pid = waitpid(-1, &ret, WNOHANG);
534 if (pid <= 0)
535 return;
537 list_for_each_entry_safe(p, tmp, &processes, list) {
538 if (p->pid < pid)
539 continue;
541 if (p->pid > pid)
542 break;
544 uloop_process_delete(p);
545 p->cb(p, ret);
551 static void uloop_handle_sigint(int signo)
553 uloop_cancelled = true;
556 static void uloop_sigchld(int signo)
558 do_sigchld = true;
561 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
563 struct sigaction s;
564 struct sigaction *act;
566 act = NULL;
567 sigaction(signum, NULL, &s);
569 if (add) {
570 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
571 memcpy(old, &s, sizeof(struct sigaction));
572 s.sa_handler = handler;
573 s.sa_flags = 0;
574 act = &s;
577 else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
578 act = old;
581 if (act != NULL)
582 sigaction(signum, act, NULL);
585 static void uloop_setup_signals(bool add)
587 static struct sigaction old_sigint, old_sigchld, old_sigterm;
589 uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
590 uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
591 uloop_install_handler(SIGCHLD, uloop_sigchld, &old_sigchld, add);
594 static int uloop_get_next_timeout(struct timeval *tv)
596 struct uloop_timeout *timeout;
597 int diff;
599 if (list_empty(&timeouts))
600 return -1;
602 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
603 diff = tv_diff(&timeout->time, tv);
604 if (diff < 0)
605 return 0;
607 return diff;
610 static void uloop_process_timeouts(struct timeval *tv)
612 struct uloop_timeout *t;
614 while (!list_empty(&timeouts)) {
615 t = list_first_entry(&timeouts, struct uloop_timeout, list);
617 if (tv_diff(&t->time, tv) > 0)
618 break;
620 uloop_timeout_cancel(t);
621 if (t->cb)
622 t->cb(t);
626 static void uloop_clear_timeouts(void)
628 struct uloop_timeout *t, *tmp;
630 list_for_each_entry_safe(t, tmp, &timeouts, list)
631 uloop_timeout_cancel(t);
634 static void uloop_clear_processes(void)
636 struct uloop_process *p, *tmp;
638 list_for_each_entry_safe(p, tmp, &processes, list)
639 uloop_process_delete(p);
642 void uloop_run(void)
644 static int recursive_calls = 0;
645 struct timeval tv;
648 * Handlers are only updated for the first call to uloop_run() (and restored
649 * when this call is done).
651 if (!recursive_calls++)
652 uloop_setup_signals(true);
654 uloop_cancelled = false;
655 while(!uloop_cancelled)
657 uloop_gettime(&tv);
658 uloop_process_timeouts(&tv);
659 if (uloop_cancelled)
660 break;
662 if (do_sigchld)
663 uloop_handle_processes();
664 uloop_gettime(&tv);
665 uloop_run_events(uloop_get_next_timeout(&tv));
668 if (!--recursive_calls)
669 uloop_setup_signals(false);
672 void uloop_done(void)
674 if (poll_fd < 0)
675 return;
677 close(poll_fd);
678 poll_fd = -1;
680 uloop_clear_timeouts();
681 uloop_clear_processes();