dnscrypto-proxy: Update to release 1.3.0
[tomato.git] / release / src / router / dnscrypt / src / libevent-modified / event.c
bloba979f1f2649695678a1b152368baa6f5b14b35e4
1 /*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
29 #ifdef WIN32
30 #include <winsock2.h>
31 #define WIN32_LEAN_AND_MEAN
32 #include <windows.h>
33 #undef WIN32_LEAN_AND_MEAN
34 #endif
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
42 #endif
43 #include <stdio.h>
44 #include <stdlib.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
46 #include <unistd.h>
47 #endif
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
74 #endif
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
77 #endif
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
80 #endif
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
83 #endif
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
86 #endif
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
89 #endif
90 #ifdef WIN32
91 extern const struct eventop win32ops;
92 #endif
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
97 &evportops,
98 #endif
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
100 &kqops,
101 #endif
102 #ifdef _EVENT_HAVE_EPOLL
103 &epollops,
104 #endif
105 #ifdef _EVENT_HAVE_DEVPOLL
106 &devpollops,
107 #endif
108 #ifdef _EVENT_HAVE_POLL
109 &pollops,
110 #endif
111 #ifdef _EVENT_HAVE_SELECT
112 &selectops,
113 #endif
114 #ifdef WIN32
115 &win32ops,
116 #endif
117 NULL
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
124 /* Global state */
126 static int use_monotonic;
128 /* Prototypes */
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
158 unsigned added : 1;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
171 * just say >>6. */
172 return (u >> 6);
175 static inline int
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
186 static void *_event_debug_map_lock = NULL;
187 #endif
188 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
189 HT_INITIALIZER();
191 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
192 eq_debug_entry)
193 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
194 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
196 /* Macro: record that ev is now setup (that is, ready for an add) */
197 #define _event_debug_note_setup(ev) do { \
198 if (_event_debug_mode_on) { \
199 struct event_debug_entry *dent,find; \
200 find.ptr = (ev); \
201 EVLOCK_LOCK(_event_debug_map_lock, 0); \
202 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
203 if (dent) { \
204 dent->added = 0; \
205 } else { \
206 dent = mm_malloc(sizeof(*dent)); \
207 if (!dent) \
208 event_err(1, \
209 "Out of memory in debugging code"); \
210 dent->ptr = (ev); \
211 dent->added = 0; \
212 HT_INSERT(event_debug_map, &global_debug_map, dent); \
214 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
216 event_debug_mode_too_late = 1; \
217 } while (0)
218 /* Macro: record that ev is no longer setup */
219 #define _event_debug_note_teardown(ev) do { \
220 if (_event_debug_mode_on) { \
221 struct event_debug_entry *dent,find; \
222 find.ptr = (ev); \
223 EVLOCK_LOCK(_event_debug_map_lock, 0); \
224 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
225 if (dent) \
226 mm_free(dent); \
227 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
229 event_debug_mode_too_late = 1; \
230 } while (0)
231 /* Macro: record that ev is now added */
232 #define _event_debug_note_add(ev) do { \
233 if (_event_debug_mode_on) { \
234 struct event_debug_entry *dent,find; \
235 find.ptr = (ev); \
236 EVLOCK_LOCK(_event_debug_map_lock, 0); \
237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
238 if (dent) { \
239 dent->added = 1; \
240 } else { \
241 event_errx(_EVENT_ERR_ABORT, \
242 "%s: noting an add on a non-setup event %p" \
243 " (events: 0x%x, fd: "EV_SOCK_FMT \
244 ", flags: 0x%x)", \
245 __func__, (ev), (ev)->ev_events, \
246 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
248 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
250 event_debug_mode_too_late = 1; \
251 } while (0)
252 /* Macro: record that ev is no longer added */
253 #define _event_debug_note_del(ev) do { \
254 if (_event_debug_mode_on) { \
255 struct event_debug_entry *dent,find; \
256 find.ptr = (ev); \
257 EVLOCK_LOCK(_event_debug_map_lock, 0); \
258 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
259 if (dent) { \
260 dent->added = 0; \
261 } else { \
262 event_errx(_EVENT_ERR_ABORT, \
263 "%s: noting a del on a non-setup event %p" \
264 " (events: 0x%x, fd: "EV_SOCK_FMT \
265 ", flags: 0x%x)", \
266 __func__, (ev), (ev)->ev_events, \
267 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
269 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
271 event_debug_mode_too_late = 1; \
272 } while (0)
273 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
274 #define _event_debug_assert_is_setup(ev) do { \
275 if (_event_debug_mode_on) { \
276 struct event_debug_entry *dent,find; \
277 find.ptr = (ev); \
278 EVLOCK_LOCK(_event_debug_map_lock, 0); \
279 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
280 if (!dent) { \
281 event_errx(_EVENT_ERR_ABORT, \
282 "%s called on a non-initialized event %p" \
283 " (events: 0x%x, fd: "EV_SOCK_FMT\
284 ", flags: 0x%x)", \
285 __func__, (ev), (ev)->ev_events, \
286 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
288 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
290 } while (0)
291 /* Macro: assert that ev is not added (i.e., okay to tear down or set
292 * up again) */
293 #define _event_debug_assert_not_added(ev) do { \
294 if (_event_debug_mode_on) { \
295 struct event_debug_entry *dent,find; \
296 find.ptr = (ev); \
297 EVLOCK_LOCK(_event_debug_map_lock, 0); \
298 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
299 if (dent && dent->added) { \
300 event_errx(_EVENT_ERR_ABORT, \
301 "%s called on an already added event %p" \
302 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
303 "flags: 0x%x)", \
304 __func__, (ev), (ev)->ev_events, \
305 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
307 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
309 } while (0)
310 #else
311 #define _event_debug_note_setup(ev) \
312 ((void)0)
313 #define _event_debug_note_teardown(ev) \
314 ((void)0)
315 #define _event_debug_note_add(ev) \
316 ((void)0)
317 #define _event_debug_note_del(ev) \
318 ((void)0)
319 #define _event_debug_assert_is_setup(ev) \
320 ((void)0)
321 #define _event_debug_assert_not_added(ev) \
322 ((void)0)
323 #endif
325 #define EVENT_BASE_ASSERT_LOCKED(base) \
326 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
328 /* The first time this function is called, it sets use_monotonic to 1
329 * if we have a clock function that supports monotonic time */
330 static void
331 detect_monotonic(void)
333 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
334 struct timespec ts;
335 static int use_monotonic_initialized = 0;
337 if (use_monotonic_initialized)
338 return;
340 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
341 use_monotonic = 1;
343 use_monotonic_initialized = 1;
344 #endif
347 /* How often (in seconds) do we check for changes in wall clock time relative
348 * to monotonic time? Set this to -1 for 'never.' */
349 #define CLOCK_SYNC_INTERVAL -1
351 /** Set 'tp' to the current time according to 'base'. We must hold the lock
352 * on 'base'. If there is a cached time, return it. Otherwise, use
353 * clock_gettime or gettimeofday as appropriate to find out the right time.
354 * Return 0 on success, -1 on failure.
356 static int
357 gettime(struct event_base *base, struct timeval *tp)
359 EVENT_BASE_ASSERT_LOCKED(base);
361 if (base->tv_cache.tv_sec) {
362 *tp = base->tv_cache;
363 return (0);
366 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
367 if (use_monotonic) {
368 struct timespec ts;
370 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
371 return (-1);
373 tp->tv_sec = ts.tv_sec;
374 tp->tv_usec = ts.tv_nsec / 1000;
375 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
376 < ts.tv_sec) {
377 struct timeval tv;
378 evutil_gettimeofday(&tv,NULL);
379 evutil_timersub(&tv, tp, &base->tv_clock_diff);
380 base->last_updated_clock_diff = ts.tv_sec;
383 return (0);
385 #endif
387 return (evutil_gettimeofday(tp, NULL));
391 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
393 int r;
394 if (!base) {
395 base = current_base;
396 if (!current_base)
397 return evutil_gettimeofday(tv, NULL);
400 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
401 if (base->tv_cache.tv_sec == 0) {
402 r = evutil_gettimeofday(tv, NULL);
403 } else {
404 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
405 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
406 #else
407 *tv = base->tv_cache;
408 #endif
409 r = 0;
411 EVBASE_RELEASE_LOCK(base, th_base_lock);
412 return r;
415 /** Make 'base' have no current cached time. */
416 static inline void
417 clear_time_cache(struct event_base *base)
419 base->tv_cache.tv_sec = 0;
422 /** Replace the cached time in 'base' with the current time. */
423 static inline void
424 update_time_cache(struct event_base *base)
426 base->tv_cache.tv_sec = 0;
427 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
428 gettime(base, &base->tv_cache);
431 struct event_base *
432 event_init(void)
434 struct event_base *base = event_base_new_with_config(NULL);
436 if (base == NULL) {
437 event_errx(1, "%s: Unable to construct event_base", __func__);
438 return NULL;
441 current_base = base;
443 return (base);
446 struct event_base *
447 event_base_new(void)
449 struct event_base *base = NULL;
450 struct event_config *cfg = event_config_new();
451 if (cfg) {
452 base = event_base_new_with_config(cfg);
453 event_config_free(cfg);
455 return base;
458 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
459 * avoid. */
460 static int
461 event_config_is_avoided_method(const struct event_config *cfg,
462 const char *method)
464 struct event_config_entry *entry;
466 TAILQ_FOREACH(entry, &cfg->entries, next) {
467 if (entry->avoid_method != NULL &&
468 strcmp(entry->avoid_method, method) == 0)
469 return (1);
472 return (0);
475 /** Return true iff 'method' is disabled according to the environment. */
476 static int
477 event_is_method_disabled(const char *name)
479 char environment[64];
480 int i;
482 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
483 for (i = 8; environment[i] != '\0'; ++i)
484 environment[i] = EVUTIL_TOUPPER(environment[i]);
485 /* Note that evutil_getenv() ignores the environment entirely if
486 * we're setuid */
487 return (evutil_getenv(environment) != NULL);
491 event_base_get_features(const struct event_base *base)
493 return base->evsel->features;
496 void
497 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
499 memset(cb, 0, sizeof(struct deferred_cb_queue));
500 TAILQ_INIT(&cb->deferred_cb_list);
503 /** Helper for the deferred_cb queue: wake up the event base. */
504 static void
505 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
507 struct event_base *base = baseptr;
508 if (EVBASE_NEED_NOTIFY(base))
509 evthread_notify_base(base);
512 struct deferred_cb_queue *
513 event_base_get_deferred_cb_queue(struct event_base *base)
515 return base ? &base->defer_queue : NULL;
518 void
519 event_enable_debug_mode(void)
521 #ifndef _EVENT_DISABLE_DEBUG_MODE
522 if (_event_debug_mode_on)
523 event_errx(1, "%s was called twice!", __func__);
524 if (event_debug_mode_too_late)
525 event_errx(1, "%s must be called *before* creating any events "
526 "or event_bases",__func__);
528 _event_debug_mode_on = 1;
530 HT_INIT(event_debug_map, &global_debug_map);
531 #endif
534 #if 0
535 void
536 event_disable_debug_mode(void)
538 struct event_debug_entry **ent, *victim;
540 EVLOCK_LOCK(_event_debug_map_lock, 0);
541 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
542 victim = *ent;
543 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
544 mm_free(victim);
546 HT_CLEAR(event_debug_map, &global_debug_map);
547 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
549 #endif
551 struct event_base *
552 event_base_new_with_config(const struct event_config *cfg)
554 int i;
555 struct event_base *base;
556 int should_check_environment;
558 #ifndef _EVENT_DISABLE_DEBUG_MODE
559 event_debug_mode_too_late = 1;
560 #endif
562 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
563 event_warn("%s: calloc", __func__);
564 return NULL;
566 detect_monotonic();
567 gettime(base, &base->event_tv);
569 min_heap_ctor(&base->timeheap);
570 TAILQ_INIT(&base->eventqueue);
571 base->sig.ev_signal_pair[0] = -1;
572 base->sig.ev_signal_pair[1] = -1;
573 base->th_notify_fd[0] = -1;
574 base->th_notify_fd[1] = -1;
576 event_deferred_cb_queue_init(&base->defer_queue);
577 base->defer_queue.notify_fn = notify_base_cbq_callback;
578 base->defer_queue.notify_arg = base;
579 if (cfg)
580 base->flags = cfg->flags;
582 evmap_io_initmap(&base->io);
583 evmap_signal_initmap(&base->sigmap);
584 event_changelist_init(&base->changelist);
586 base->evbase = NULL;
588 should_check_environment =
589 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
591 for (i = 0; eventops[i] && !base->evbase; i++) {
592 if (cfg != NULL) {
593 /* determine if this backend should be avoided */
594 if (event_config_is_avoided_method(cfg,
595 eventops[i]->name))
596 continue;
597 if ((eventops[i]->features & cfg->require_features)
598 != cfg->require_features)
599 continue;
602 /* also obey the environment variables */
603 if (should_check_environment &&
604 event_is_method_disabled(eventops[i]->name))
605 continue;
607 base->evsel = eventops[i];
609 base->evbase = base->evsel->init(base);
612 if (base->evbase == NULL) {
613 event_warnx("%s: no event mechanism available",
614 __func__);
615 base->evsel = NULL;
616 event_base_free(base);
617 return NULL;
620 if (evutil_getenv("EVENT_SHOW_METHOD"))
621 event_msgx("libevent using: %s", base->evsel->name);
623 /* allocate a single active event queue */
624 if (event_base_priority_init(base, 1) < 0) {
625 event_base_free(base);
626 return NULL;
629 /* prepare for threading */
631 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
632 if (EVTHREAD_LOCKING_ENABLED() &&
633 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
634 int r;
635 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
636 EVTHREAD_LOCKTYPE_RECURSIVE);
637 base->defer_queue.lock = base->th_base_lock;
638 EVTHREAD_ALLOC_COND(base->current_event_cond);
639 r = evthread_make_base_notifiable(base);
640 if (r<0) {
641 event_warnx("%s: Unable to make base notifiable.", __func__);
642 event_base_free(base);
643 return NULL;
646 #endif
648 #ifdef WIN32
649 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
650 event_base_start_iocp(base, cfg->n_cpus_hint);
651 #endif
653 return (base);
657 event_base_start_iocp(struct event_base *base, int n_cpus)
659 #ifdef WIN32
660 if (base->iocp)
661 return 0;
662 base->iocp = event_iocp_port_launch(n_cpus);
663 if (!base->iocp) {
664 event_warnx("%s: Couldn't launch IOCP", __func__);
665 return -1;
667 return 0;
668 #else
669 return -1;
670 #endif
673 void
674 event_base_stop_iocp(struct event_base *base)
676 #ifdef WIN32
677 int rv;
679 if (!base->iocp)
680 return;
681 rv = event_iocp_shutdown(base->iocp, -1);
682 EVUTIL_ASSERT(rv >= 0);
683 base->iocp = NULL;
684 #endif
687 void
688 event_base_free(struct event_base *base)
690 int i, n_deleted=0;
691 struct event *ev;
692 /* XXXX grab the lock? If there is contention when one thread frees
693 * the base, then the contending thread will be very sad soon. */
695 /* event_base_free(NULL) is how to free the current_base if we
696 * made it with event_init and forgot to hold a reference to it. */
697 if (base == NULL && current_base)
698 base = current_base;
699 /* If we're freeing current_base, there won't be a current_base. */
700 if (base == current_base)
701 current_base = NULL;
702 /* Don't actually free NULL. */
703 if (base == NULL) {
704 event_warnx("%s: no base to free", __func__);
705 return;
707 /* XXX(niels) - check for internal events first */
709 #ifdef WIN32
710 event_base_stop_iocp(base);
711 #endif
713 /* threading fds if we have them */
714 if (base->th_notify_fd[0] != -1) {
715 event_del(&base->th_notify);
716 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
717 if (base->th_notify_fd[1] != -1)
718 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
719 base->th_notify_fd[0] = -1;
720 base->th_notify_fd[1] = -1;
721 event_debug_unassign(&base->th_notify);
724 /* Delete all non-internal events. */
725 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
726 struct event *next = TAILQ_NEXT(ev, ev_next);
727 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
728 event_del(ev);
729 ++n_deleted;
731 ev = next;
733 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
734 event_del(ev);
735 ++n_deleted;
737 for (i = 0; i < base->n_common_timeouts; ++i) {
738 struct common_timeout_list *ctl =
739 base->common_timeout_queues[i];
740 event_del(&ctl->timeout_event); /* Internal; doesn't count */
741 event_debug_unassign(&ctl->timeout_event);
742 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
743 struct event *next = TAILQ_NEXT(ev,
744 ev_timeout_pos.ev_next_with_common_timeout);
745 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
746 event_del(ev);
747 ++n_deleted;
749 ev = next;
751 mm_free(ctl);
753 if (base->common_timeout_queues)
754 mm_free(base->common_timeout_queues);
756 for (i = 0; i < base->nactivequeues; ++i) {
757 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
758 struct event *next = TAILQ_NEXT(ev, ev_active_next);
759 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
760 event_del(ev);
761 ++n_deleted;
763 ev = next;
767 if (n_deleted)
768 event_debug(("%s: %d events were still set in base",
769 __func__, n_deleted));
771 if (base->evsel != NULL && base->evsel->dealloc != NULL)
772 base->evsel->dealloc(base);
774 for (i = 0; i < base->nactivequeues; ++i)
775 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
777 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
778 min_heap_dtor(&base->timeheap);
780 mm_free(base->activequeues);
782 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
784 evmap_io_clear(&base->io);
785 evmap_signal_clear(&base->sigmap);
786 event_changelist_freemem(&base->changelist);
788 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
789 EVTHREAD_FREE_COND(base->current_event_cond);
791 mm_free(base);
794 /* reinitialize the event base after a fork */
796 event_reinit(struct event_base *base)
798 const struct eventop *evsel;
799 int res = 0;
800 struct event *ev;
801 int was_notifiable = 0;
803 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
805 evsel = base->evsel;
807 #if 0
808 /* Right now, reinit always takes effect, since even if the
809 backend doesn't require it, the signal socketpair code does.
813 /* check if this event mechanism requires reinit */
814 if (!evsel->need_reinit)
815 goto done;
816 #endif
818 /* prevent internal delete */
819 if (base->sig.ev_signal_added) {
820 /* we cannot call event_del here because the base has
821 * not been reinitialized yet. */
822 event_queue_remove(base, &base->sig.ev_signal,
823 EVLIST_INSERTED);
824 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
825 event_queue_remove(base, &base->sig.ev_signal,
826 EVLIST_ACTIVE);
827 if (base->sig.ev_signal_pair[0] != -1)
828 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
829 if (base->sig.ev_signal_pair[1] != -1)
830 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
831 base->sig.ev_signal_added = 0;
833 if (base->th_notify_fd[0] != -1) {
834 /* we cannot call event_del here because the base has
835 * not been reinitialized yet. */
836 was_notifiable = 1;
837 event_queue_remove(base, &base->th_notify,
838 EVLIST_INSERTED);
839 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
840 event_queue_remove(base, &base->th_notify,
841 EVLIST_ACTIVE);
842 base->sig.ev_signal_added = 0;
843 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
844 if (base->th_notify_fd[1] != -1)
845 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
846 base->th_notify_fd[0] = -1;
847 base->th_notify_fd[1] = -1;
848 event_debug_unassign(&base->th_notify);
851 if (base->evsel->dealloc != NULL)
852 base->evsel->dealloc(base);
853 base->evbase = evsel->init(base);
854 if (base->evbase == NULL) {
855 event_errx(1, "%s: could not reinitialize event mechanism",
856 __func__);
857 res = -1;
858 goto done;
861 event_changelist_freemem(&base->changelist); /* XXX */
862 evmap_io_clear(&base->io);
863 evmap_signal_clear(&base->sigmap);
865 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
866 if (ev->ev_events & (EV_READ|EV_WRITE)) {
867 if (ev == &base->sig.ev_signal) {
868 /* If we run into the ev_signal event, it's only
869 * in eventqueue because some signal event was
870 * added, which made evsig_add re-add ev_signal.
871 * So don't double-add it. */
872 continue;
874 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
875 res = -1;
876 } else if (ev->ev_events & EV_SIGNAL) {
877 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
878 res = -1;
882 if (was_notifiable && res == 0)
883 res = evthread_make_base_notifiable(base);
885 done:
886 EVBASE_RELEASE_LOCK(base, th_base_lock);
887 return (res);
890 const char **
891 event_get_supported_methods(void)
893 static const char **methods = NULL;
894 const struct eventop **method;
895 const char **tmp;
896 int i = 0, k;
898 /* count all methods */
899 for (method = &eventops[0]; *method != NULL; ++method) {
900 ++i;
903 /* allocate one more than we need for the NULL pointer */
904 tmp = mm_calloc((i + 1), sizeof(char *));
905 if (tmp == NULL)
906 return (NULL);
908 /* populate the array with the supported methods */
909 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
910 tmp[i++] = eventops[k]->name;
912 tmp[i] = NULL;
914 if (methods != NULL)
915 mm_free((char**)methods);
917 methods = tmp;
919 return (methods);
922 struct event_config *
923 event_config_new(void)
925 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
927 if (cfg == NULL)
928 return (NULL);
930 TAILQ_INIT(&cfg->entries);
932 return (cfg);
935 static void
936 event_config_entry_free(struct event_config_entry *entry)
938 if (entry->avoid_method != NULL)
939 mm_free((char *)entry->avoid_method);
940 mm_free(entry);
943 void
944 event_config_free(struct event_config *cfg)
946 struct event_config_entry *entry;
948 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
949 TAILQ_REMOVE(&cfg->entries, entry, next);
950 event_config_entry_free(entry);
952 mm_free(cfg);
956 event_config_set_flag(struct event_config *cfg, int flag)
958 if (!cfg)
959 return -1;
960 cfg->flags |= flag;
961 return 0;
965 event_config_avoid_method(struct event_config *cfg, const char *method)
967 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
968 if (entry == NULL)
969 return (-1);
971 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
972 mm_free(entry);
973 return (-1);
976 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
978 return (0);
982 event_config_require_features(struct event_config *cfg,
983 int features)
985 if (!cfg)
986 return (-1);
987 cfg->require_features = features;
988 return (0);
992 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
994 if (!cfg)
995 return (-1);
996 cfg->n_cpus_hint = cpus;
997 return (0);
1001 event_priority_init(int npriorities)
1003 return event_base_priority_init(current_base, npriorities);
1007 event_base_priority_init(struct event_base *base, int npriorities)
1009 int i;
1011 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1012 || npriorities >= EVENT_MAX_PRIORITIES)
1013 return (-1);
1015 if (npriorities == base->nactivequeues)
1016 return (0);
1018 if (base->nactivequeues) {
1019 mm_free(base->activequeues);
1020 base->nactivequeues = 0;
1023 /* Allocate our priority queues */
1024 base->activequeues = (struct event_list *)
1025 mm_calloc(npriorities, sizeof(struct event_list));
1026 if (base->activequeues == NULL) {
1027 event_warn("%s: calloc", __func__);
1028 return (-1);
1030 base->nactivequeues = npriorities;
1032 for (i = 0; i < base->nactivequeues; ++i) {
1033 TAILQ_INIT(&base->activequeues[i]);
1036 return (0);
1039 /* Returns true iff we're currently watching any events. */
1040 static int
1041 event_haveevents(struct event_base *base)
1043 /* Caller must hold th_base_lock */
1044 return (base->virtual_event_count > 0 || base->event_count > 0);
1047 /* "closure" function called when processing active signal events */
1048 static inline void
1049 event_signal_closure(struct event_base *base, struct event *ev)
1051 short ncalls;
1052 int should_break;
1054 /* Allows deletes to work */
1055 ncalls = ev->ev_ncalls;
1056 if (ncalls != 0)
1057 ev->ev_pncalls = &ncalls;
1058 EVBASE_RELEASE_LOCK(base, th_base_lock);
1059 while (ncalls) {
1060 ncalls--;
1061 ev->ev_ncalls = ncalls;
1062 if (ncalls == 0)
1063 ev->ev_pncalls = NULL;
1064 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1066 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1067 should_break = base->event_break;
1068 EVBASE_RELEASE_LOCK(base, th_base_lock);
1070 if (should_break) {
1071 if (ncalls != 0)
1072 ev->ev_pncalls = NULL;
1073 return;
1078 /* Common timeouts are special timeouts that are handled as queues rather than
1079 * in the minheap. This is more efficient than the minheap if we happen to
1080 * know that we're going to get several thousands of timeout events all with
1081 * the same timeout value.
1083 * Since all our timeout handling code assumes timevals can be copied,
1084 * assigned, etc, we can't use "magic pointer" to encode these common
1085 * timeouts. Searching through a list to see if every timeout is common could
1086 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1087 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1088 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1089 * of index into the event_base's aray of common timeouts.
1092 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1093 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1094 #define COMMON_TIMEOUT_IDX_SHIFT 20
1095 #define COMMON_TIMEOUT_MASK 0xf0000000
1096 #define COMMON_TIMEOUT_MAGIC 0x50000000
1098 #define COMMON_TIMEOUT_IDX(tv) \
1099 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1101 /** Return true iff if 'tv' is a common timeout in 'base' */
1102 static inline int
1103 is_common_timeout(const struct timeval *tv,
1104 const struct event_base *base)
1106 int idx;
1107 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1108 return 0;
1109 idx = COMMON_TIMEOUT_IDX(tv);
1110 return idx < base->n_common_timeouts;
1113 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1114 * one is a common timeout. */
1115 static inline int
1116 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1118 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1119 (tv2->tv_usec & ~MICROSECONDS_MASK);
1122 /** Requires that 'tv' is a common timeout. Return the corresponding
1123 * common_timeout_list. */
1124 static inline struct common_timeout_list *
1125 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1127 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1130 #if 0
1131 static inline int
1132 common_timeout_ok(const struct timeval *tv,
1133 struct event_base *base)
1135 const struct timeval *expect =
1136 &get_common_timeout_list(base, tv)->duration;
1137 return tv->tv_sec == expect->tv_sec &&
1138 tv->tv_usec == expect->tv_usec;
1140 #endif
1142 /* Add the timeout for the first event in given common timeout list to the
1143 * event_base's minheap. */
1144 static void
1145 common_timeout_schedule(struct common_timeout_list *ctl,
1146 const struct timeval *now, struct event *head)
1148 struct timeval timeout = head->ev_timeout;
1149 timeout.tv_usec &= MICROSECONDS_MASK;
1150 event_add_internal(&ctl->timeout_event, &timeout, 1);
1153 /* Callback: invoked when the timeout for a common timeout queue triggers.
1154 * This means that (at least) the first event in that queue should be run,
1155 * and the timeout should be rescheduled if there are more events. */
1156 static void
1157 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1159 struct timeval now;
1160 struct common_timeout_list *ctl = arg;
1161 struct event_base *base = ctl->base;
1162 struct event *ev = NULL;
1163 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1164 gettime(base, &now);
1165 while (1) {
1166 ev = TAILQ_FIRST(&ctl->events);
1167 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1168 (ev->ev_timeout.tv_sec == now.tv_sec &&
1169 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1170 break;
1171 event_del_internal(ev);
1172 event_active_nolock(ev, EV_TIMEOUT, 1);
1174 if (ev)
1175 common_timeout_schedule(ctl, &now, ev);
1176 EVBASE_RELEASE_LOCK(base, th_base_lock);
1179 #define MAX_COMMON_TIMEOUTS 256
1181 const struct timeval *
1182 event_base_init_common_timeout(struct event_base *base,
1183 const struct timeval *duration)
1185 int i;
1186 struct timeval tv;
1187 const struct timeval *result=NULL;
1188 struct common_timeout_list *new_ctl;
1190 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1191 if (duration->tv_usec > 1000000) {
1192 memcpy(&tv, duration, sizeof(struct timeval));
1193 if (is_common_timeout(duration, base))
1194 tv.tv_usec &= MICROSECONDS_MASK;
1195 tv.tv_sec += tv.tv_usec / 1000000;
1196 tv.tv_usec %= 1000000;
1197 duration = &tv;
1199 for (i = 0; i < base->n_common_timeouts; ++i) {
1200 const struct common_timeout_list *ctl =
1201 base->common_timeout_queues[i];
1202 if (duration->tv_sec == ctl->duration.tv_sec &&
1203 duration->tv_usec ==
1204 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1205 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1206 result = &ctl->duration;
1207 goto done;
1210 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1211 event_warnx("%s: Too many common timeouts already in use; "
1212 "we only support %d per event_base", __func__,
1213 MAX_COMMON_TIMEOUTS);
1214 goto done;
1216 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1217 int n = base->n_common_timeouts < 16 ? 16 :
1218 base->n_common_timeouts*2;
1219 struct common_timeout_list **newqueues =
1220 mm_realloc(base->common_timeout_queues,
1221 n*sizeof(struct common_timeout_queue *));
1222 if (!newqueues) {
1223 event_warn("%s: realloc",__func__);
1224 goto done;
1226 base->n_common_timeouts_allocated = n;
1227 base->common_timeout_queues = newqueues;
1229 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1230 if (!new_ctl) {
1231 event_warn("%s: calloc",__func__);
1232 goto done;
1234 TAILQ_INIT(&new_ctl->events);
1235 new_ctl->duration.tv_sec = duration->tv_sec;
1236 new_ctl->duration.tv_usec =
1237 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1238 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1239 evtimer_assign(&new_ctl->timeout_event, base,
1240 common_timeout_callback, new_ctl);
1241 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1242 event_priority_set(&new_ctl->timeout_event, 0);
1243 new_ctl->base = base;
1244 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1245 result = &new_ctl->duration;
1247 done:
1248 if (result)
1249 EVUTIL_ASSERT(is_common_timeout(result, base));
1251 EVBASE_RELEASE_LOCK(base, th_base_lock);
1252 return result;
1255 /* Closure function invoked when we're activating a persistent event. */
1256 static inline void
1257 event_persist_closure(struct event_base *base, struct event *ev)
1259 /* reschedule the persistent event if we have a timeout. */
1260 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1261 /* If there was a timeout, we want it to run at an interval of
1262 * ev_io_timeout after the last time it was _scheduled_ for,
1263 * not ev_io_timeout after _now_. If it fired for another
1264 * reason, though, the timeout ought to start ticking _now_. */
1265 struct timeval run_at, relative_to, delay, now;
1266 ev_uint32_t usec_mask = 0;
1267 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1268 &ev->ev_io_timeout));
1269 gettime(base, &now);
1270 if (is_common_timeout(&ev->ev_timeout, base)) {
1271 delay = ev->ev_io_timeout;
1272 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1273 delay.tv_usec &= MICROSECONDS_MASK;
1274 if (ev->ev_res & EV_TIMEOUT) {
1275 relative_to = ev->ev_timeout;
1276 relative_to.tv_usec &= MICROSECONDS_MASK;
1277 } else {
1278 relative_to = now;
1280 } else {
1281 delay = ev->ev_io_timeout;
1282 if (ev->ev_res & EV_TIMEOUT) {
1283 relative_to = ev->ev_timeout;
1284 } else {
1285 relative_to = now;
1288 evutil_timeradd(&relative_to, &delay, &run_at);
1289 if (evutil_timercmp(&run_at, &now, <)) {
1290 /* Looks like we missed at least one invocation due to
1291 * a clock jump, not running the event loop for a
1292 * while, really slow callbacks, or
1293 * something. Reschedule relative to now.
1295 evutil_timeradd(&now, &delay, &run_at);
1297 run_at.tv_usec |= usec_mask;
1298 event_add_internal(ev, &run_at, 1);
1300 EVBASE_RELEASE_LOCK(base, th_base_lock);
1301 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1305 Helper for event_process_active to process all the events in a single queue,
1306 releasing the lock as we go. This function requires that the lock be held
1307 when it's invoked. Returns -1 if we get a signal or an event_break that
1308 means we should stop processing any active events now. Otherwise returns
1309 the number of non-internal events that we processed.
1311 static int
1312 event_process_active_single_queue(struct event_base *base,
1313 struct event_list *activeq)
1315 struct event *ev;
1316 int count = 0;
1318 EVUTIL_ASSERT(activeq != NULL);
1320 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1321 if (ev->ev_events & EV_PERSIST)
1322 event_queue_remove(base, ev, EVLIST_ACTIVE);
1323 else
1324 event_del_internal(ev);
1325 if (!(ev->ev_flags & EVLIST_INTERNAL))
1326 ++count;
1328 event_debug((
1329 "event_process_active: event: %p, %s%scall %p",
1331 ev->ev_res & EV_READ ? "EV_READ " : " ",
1332 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1333 ev->ev_callback));
1335 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1336 base->current_event = ev;
1337 base->current_event_waiters = 0;
1338 #endif
1340 switch (ev->ev_closure) {
1341 case EV_CLOSURE_SIGNAL:
1342 event_signal_closure(base, ev);
1343 break;
1344 case EV_CLOSURE_PERSIST:
1345 event_persist_closure(base, ev);
1346 break;
1347 default:
1348 case EV_CLOSURE_NONE:
1349 EVBASE_RELEASE_LOCK(base, th_base_lock);
1350 (*ev->ev_callback)(
1351 ev->ev_fd, ev->ev_res, ev->ev_arg);
1352 break;
1355 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1356 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1357 base->current_event = NULL;
1358 if (base->current_event_waiters) {
1359 base->current_event_waiters = 0;
1360 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1362 #endif
1364 if (base->event_break)
1365 return -1;
1366 if (base->event_continue)
1367 break;
1369 return count;
1373 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1374 *breakptr becomes set to 1, stop. Requires that we start out holding
1375 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1376 we process.
1378 static int
1379 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1381 int count = 0;
1382 struct deferred_cb *cb;
1384 #define MAX_DEFERRED 16
1385 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1386 cb->queued = 0;
1387 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1388 --queue->active_count;
1389 UNLOCK_DEFERRED_QUEUE(queue);
1391 cb->cb(cb, cb->arg);
1393 LOCK_DEFERRED_QUEUE(queue);
1394 if (*breakptr)
1395 return -1;
1396 if (++count == MAX_DEFERRED)
1397 break;
1399 #undef MAX_DEFERRED
1400 return count;
1404 * Active events are stored in priority queues. Lower priorities are always
1405 * process before higher priorities. Low priority events can starve high
1406 * priority ones.
1409 static int
1410 event_process_active(struct event_base *base)
1412 /* Caller must hold th_base_lock */
1413 struct event_list *activeq = NULL;
1414 int i, c = 0;
1416 for (i = 0; i < base->nactivequeues; ++i) {
1417 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1418 base->event_running_priority = i;
1419 activeq = &base->activequeues[i];
1420 c = event_process_active_single_queue(base, activeq);
1421 if (c < 0) {
1422 base->event_running_priority = -1;
1423 return -1;
1424 } else if (c > 0)
1425 break; /* Processed a real event; do not
1426 * consider lower-priority events */
1427 /* If we get here, all of the events we processed
1428 * were internal. Continue. */
1432 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1433 base->event_running_priority = -1;
1434 return c;
1438 * Wait continuously for events. We exit only if no events are left.
1442 event_dispatch(void)
1444 return (event_loop(0));
1448 event_base_dispatch(struct event_base *event_base)
1450 return (event_base_loop(event_base, 0));
1453 const char *
1454 event_base_get_method(const struct event_base *base)
1456 EVUTIL_ASSERT(base);
1457 return (base->evsel->name);
1460 /** Callback: used to implement event_base_loopexit by telling the event_base
1461 * that it's time to exit its loop. */
1462 static void
1463 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1465 struct event_base *base = arg;
1466 base->event_gotterm = 1;
1470 event_loopexit(const struct timeval *tv)
1472 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1473 current_base, tv));
1477 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1479 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1480 event_base, tv));
1484 event_loopbreak(void)
1486 return (event_base_loopbreak(current_base));
1490 event_base_loopbreak(struct event_base *event_base)
1492 int r = 0;
1493 if (event_base == NULL)
1494 return (-1);
1496 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1497 event_base->event_break = 1;
1499 if (EVBASE_NEED_NOTIFY(event_base)) {
1500 r = evthread_notify_base(event_base);
1501 } else {
1502 r = (0);
1504 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1505 return r;
1509 event_base_got_break(struct event_base *event_base)
1511 int res;
1512 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1513 res = event_base->event_break;
1514 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1515 return res;
1519 event_base_got_exit(struct event_base *event_base)
1521 int res;
1522 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1523 res = event_base->event_gotterm;
1524 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1525 return res;
1528 /* not thread safe */
1531 event_loop(int flags)
1533 return event_base_loop(current_base, flags);
1537 event_base_loop(struct event_base *base, int flags)
1539 const struct eventop *evsel = base->evsel;
1540 struct timeval tv;
1541 struct timeval *tv_p;
1542 int res, done, retval = 0;
1544 /* Grab the lock. We will release it inside evsel.dispatch, and again
1545 * as we invoke user callbacks. */
1546 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1548 if (base->running_loop) {
1549 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1550 " can run on each event_base at once.", __func__);
1551 EVBASE_RELEASE_LOCK(base, th_base_lock);
1552 return -1;
1555 base->running_loop = 1;
1557 clear_time_cache(base);
1559 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1560 evsig_set_base(base);
1562 done = 0;
1564 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1565 base->th_owner_id = EVTHREAD_GET_ID();
1566 #endif
1568 base->event_gotterm = base->event_break = 0;
1570 while (!done) {
1571 base->event_continue = 0;
1573 /* Terminate the loop if we have been asked to */
1574 if (base->event_gotterm) {
1575 break;
1578 if (base->event_break) {
1579 break;
1582 timeout_correct(base, &tv);
1584 tv_p = &tv;
1585 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1586 timeout_next(base, &tv_p);
1587 } else {
1589 * if we have active events, we just poll new events
1590 * without waiting.
1592 evutil_timerclear(&tv);
1595 /* If we have no events, we just exit */
1596 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1597 event_debug(("%s: no events registered.", __func__));
1598 retval = 1;
1599 goto done;
1602 /* update last old time */
1603 gettime(base, &base->event_tv);
1605 clear_time_cache(base);
1607 res = evsel->dispatch(base, tv_p);
1609 if (res == -1) {
1610 event_debug(("%s: dispatch returned unsuccessfully.",
1611 __func__));
1612 retval = -1;
1613 goto done;
1616 update_time_cache(base);
1618 timeout_process(base);
1620 if (N_ACTIVE_CALLBACKS(base)) {
1621 int n = event_process_active(base);
1622 if ((flags & EVLOOP_ONCE)
1623 && N_ACTIVE_CALLBACKS(base) == 0
1624 && n != 0)
1625 done = 1;
1626 } else if (flags & EVLOOP_NONBLOCK)
1627 done = 1;
1629 event_debug(("%s: asked to terminate loop.", __func__));
1631 done:
1632 clear_time_cache(base);
1633 base->running_loop = 0;
1635 EVBASE_RELEASE_LOCK(base, th_base_lock);
1637 return (retval);
1640 /* Sets up an event for processing once */
1641 struct event_once {
1642 struct event ev;
1644 void (*cb)(evutil_socket_t, short, void *);
1645 void *arg;
1648 /* One-time callback to implement event_base_once: invokes the user callback,
1649 * then deletes the allocated storage */
1650 static void
1651 event_once_cb(evutil_socket_t fd, short events, void *arg)
1653 struct event_once *eonce = arg;
1655 (*eonce->cb)(fd, events, eonce->arg);
1656 event_debug_unassign(&eonce->ev);
1657 mm_free(eonce);
1660 /* not threadsafe, event scheduled once. */
1662 event_once(evutil_socket_t fd, short events,
1663 void (*callback)(evutil_socket_t, short, void *),
1664 void *arg, const struct timeval *tv)
1666 return event_base_once(current_base, fd, events, callback, arg, tv);
1669 /* Schedules an event once */
1671 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1672 void (*callback)(evutil_socket_t, short, void *),
1673 void *arg, const struct timeval *tv)
1675 struct event_once *eonce;
1676 struct timeval etv;
1677 int res = 0;
1679 /* We cannot support signals that just fire once, or persistent
1680 * events. */
1681 if (events & (EV_SIGNAL|EV_PERSIST))
1682 return (-1);
1684 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1685 return (-1);
1687 eonce->cb = callback;
1688 eonce->arg = arg;
1690 if (events == EV_TIMEOUT) {
1691 if (tv == NULL) {
1692 evutil_timerclear(&etv);
1693 tv = &etv;
1696 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1697 } else if (events & (EV_READ|EV_WRITE)) {
1698 events &= EV_READ|EV_WRITE;
1700 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1701 } else {
1702 /* Bad event combination */
1703 mm_free(eonce);
1704 return (-1);
1707 if (res == 0)
1708 res = event_add(&eonce->ev, tv);
1709 if (res != 0) {
1710 mm_free(eonce);
1711 return (res);
1714 return (0);
1718 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1720 if (!base)
1721 base = current_base;
1723 _event_debug_assert_not_added(ev);
1725 ev->ev_base = base;
1727 ev->ev_callback = callback;
1728 ev->ev_arg = arg;
1729 ev->ev_fd = fd;
1730 ev->ev_events = events;
1731 ev->ev_res = 0;
1732 ev->ev_flags = EVLIST_INIT;
1733 ev->ev_ncalls = 0;
1734 ev->ev_pncalls = NULL;
1736 if (events & EV_SIGNAL) {
1737 if ((events & (EV_READ|EV_WRITE)) != 0) {
1738 event_warnx("%s: EV_SIGNAL is not compatible with "
1739 "EV_READ or EV_WRITE", __func__);
1740 return -1;
1742 ev->ev_closure = EV_CLOSURE_SIGNAL;
1743 } else {
1744 if (events & EV_PERSIST) {
1745 evutil_timerclear(&ev->ev_io_timeout);
1746 ev->ev_closure = EV_CLOSURE_PERSIST;
1747 } else {
1748 ev->ev_closure = EV_CLOSURE_NONE;
1752 min_heap_elem_init(ev);
1754 if (base != NULL) {
1755 /* by default, we put new events into the middle priority */
1756 ev->ev_pri = base->nactivequeues / 2;
1759 _event_debug_note_setup(ev);
1761 return 0;
1765 event_base_set(struct event_base *base, struct event *ev)
1767 /* Only innocent events may be assigned to a different base */
1768 if (ev->ev_flags != EVLIST_INIT)
1769 return (-1);
1771 _event_debug_assert_is_setup(ev);
1773 ev->ev_base = base;
1774 ev->ev_pri = base->nactivequeues/2;
1776 return (0);
1779 void
1780 event_set(struct event *ev, evutil_socket_t fd, short events,
1781 void (*callback)(evutil_socket_t, short, void *), void *arg)
1783 int r;
1784 r = event_assign(ev, current_base, fd, events, callback, arg);
1785 EVUTIL_ASSERT(r == 0);
1788 struct event *
1789 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1791 struct event *ev;
1792 ev = mm_malloc(sizeof(struct event));
1793 if (ev == NULL)
1794 return (NULL);
1795 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1796 mm_free(ev);
1797 return (NULL);
1800 return (ev);
1803 void
1804 event_free(struct event *ev)
1806 _event_debug_assert_is_setup(ev);
1808 /* make sure that this event won't be coming back to haunt us. */
1809 event_del(ev);
1810 _event_debug_note_teardown(ev);
1811 mm_free(ev);
1815 void
1816 event_debug_unassign(struct event *ev)
1818 _event_debug_assert_not_added(ev);
1819 _event_debug_note_teardown(ev);
1821 ev->ev_flags &= ~EVLIST_INIT;
1825 * Set's the priority of an event - if an event is already scheduled
1826 * changing the priority is going to fail.
1830 event_priority_set(struct event *ev, int pri)
1832 _event_debug_assert_is_setup(ev);
1834 if (ev->ev_flags & EVLIST_ACTIVE)
1835 return (-1);
1836 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1837 return (-1);
1839 ev->ev_pri = pri;
1841 return (0);
1845 * Checks if a specific event is pending or scheduled.
1849 event_pending(const struct event *ev, short event, struct timeval *tv)
1851 int flags = 0;
1853 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
1854 event_warnx("%s: event has no event_base set.", __func__);
1855 return 0;
1858 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1859 _event_debug_assert_is_setup(ev);
1861 if (ev->ev_flags & EVLIST_INSERTED)
1862 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1863 if (ev->ev_flags & EVLIST_ACTIVE)
1864 flags |= ev->ev_res;
1865 if (ev->ev_flags & EVLIST_TIMEOUT)
1866 flags |= EV_TIMEOUT;
1868 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1870 /* See if there is a timeout that we should report */
1871 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1872 struct timeval tmp = ev->ev_timeout;
1873 tmp.tv_usec &= MICROSECONDS_MASK;
1874 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1875 /* correctly remamp to real time */
1876 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
1877 #else
1878 *tv = tmp;
1879 #endif
1882 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1884 return (flags & event);
1888 event_initialized(const struct event *ev)
1890 if (!(ev->ev_flags & EVLIST_INIT))
1891 return 0;
1893 return 1;
1896 void
1897 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1899 _event_debug_assert_is_setup(event);
1901 if (base_out)
1902 *base_out = event->ev_base;
1903 if (fd_out)
1904 *fd_out = event->ev_fd;
1905 if (events_out)
1906 *events_out = event->ev_events;
1907 if (callback_out)
1908 *callback_out = event->ev_callback;
1909 if (arg_out)
1910 *arg_out = event->ev_arg;
1913 size_t
1914 event_get_struct_event_size(void)
1916 return sizeof(struct event);
1919 evutil_socket_t
1920 event_get_fd(const struct event *ev)
1922 _event_debug_assert_is_setup(ev);
1923 return ev->ev_fd;
1926 struct event_base *
1927 event_get_base(const struct event *ev)
1929 _event_debug_assert_is_setup(ev);
1930 return ev->ev_base;
1933 short
1934 event_get_events(const struct event *ev)
1936 _event_debug_assert_is_setup(ev);
1937 return ev->ev_events;
1940 event_callback_fn
1941 event_get_callback(const struct event *ev)
1943 _event_debug_assert_is_setup(ev);
1944 return ev->ev_callback;
1947 void *
1948 event_get_callback_arg(const struct event *ev)
1950 _event_debug_assert_is_setup(ev);
1951 return ev->ev_arg;
1955 event_add(struct event *ev, const struct timeval *tv)
1957 int res;
1959 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1960 event_warnx("%s: event has no event_base set.", __func__);
1961 return -1;
1964 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1966 res = event_add_internal(ev, tv, 0);
1968 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1970 return (res);
1973 /* Helper callback: wake an event_base from another thread. This version
1974 * works by writing a byte to one end of a socketpair, so that the event_base
1975 * listening on the other end will wake up as the corresponding event
1976 * triggers */
1977 static int
1978 evthread_notify_base_default(struct event_base *base)
1980 char buf[1];
1981 int r;
1982 buf[0] = (char) 0;
1983 #ifdef WIN32
1984 r = send(base->th_notify_fd[1], buf, 1, 0);
1985 #else
1986 r = write(base->th_notify_fd[1], buf, 1);
1987 #endif
1988 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1991 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1992 /* Helper callback: wake an event_base from another thread. This version
1993 * assumes that you have a working eventfd() implementation. */
1994 static int
1995 evthread_notify_base_eventfd(struct event_base *base)
1997 ev_uint64_t msg = 1;
1998 int r;
1999 do {
2000 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2001 } while (r < 0 && errno == EAGAIN);
2003 return (r < 0) ? -1 : 0;
2005 #endif
2007 /** Tell the thread currently running the event_loop for base (if any) that it
2008 * needs to stop waiting in its dispatch function (if it is) and process all
2009 * active events and deferred callbacks (if there are any). */
2010 static int
2011 evthread_notify_base(struct event_base *base)
2013 EVENT_BASE_ASSERT_LOCKED(base);
2014 if (!base->th_notify_fn)
2015 return -1;
2016 if (base->is_notify_pending)
2017 return 0;
2018 base->is_notify_pending = 1;
2019 return base->th_notify_fn(base);
2022 /* Implementation function to add an event. Works just like event_add,
2023 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2024 * we treat tv as an absolute time, not as an interval to add to the current
2025 * time */
2026 static inline int
2027 event_add_internal(struct event *ev, const struct timeval *tv,
2028 int tv_is_absolute)
2030 struct event_base *base = ev->ev_base;
2031 int res = 0;
2032 int notify = 0;
2034 EVENT_BASE_ASSERT_LOCKED(base);
2035 _event_debug_assert_is_setup(ev);
2037 event_debug((
2038 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%scall %p",
2040 EV_SOCK_ARG(ev->ev_fd),
2041 ev->ev_events & EV_READ ? "EV_READ " : " ",
2042 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2043 tv ? "EV_TIMEOUT " : " ",
2044 ev->ev_callback));
2046 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2049 * prepare for timeout insertion further below, if we get a
2050 * failure on any step, we should not change any state.
2052 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2053 if (min_heap_reserve(&base->timeheap,
2054 1 + min_heap_size(&base->timeheap)) == -1)
2055 return (-1); /* ENOMEM == errno */
2058 /* If the main thread is currently executing a signal event's
2059 * callback, and we are not the main thread, then we want to wait
2060 * until the callback is done before we mess with the event, or else
2061 * we can race on ev_ncalls and ev_pncalls below. */
2062 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2063 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
2064 && !EVBASE_IN_THREAD(base)) {
2065 ++base->current_event_waiters;
2066 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2068 #endif
2070 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
2071 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
2072 if (ev->ev_events & (EV_READ|EV_WRITE))
2073 res = evmap_io_add(base, ev->ev_fd, ev);
2074 else if (ev->ev_events & EV_SIGNAL)
2075 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
2076 if (res != -1)
2077 event_queue_insert(base, ev, EVLIST_INSERTED);
2078 if (res == 1) {
2079 /* evmap says we need to notify the main thread. */
2080 notify = 1;
2081 res = 0;
2086 * we should change the timeout state only if the previous event
2087 * addition succeeded.
2089 if (res != -1 && tv != NULL) {
2090 struct timeval now;
2091 int common_timeout;
2094 * for persistent timeout events, we remember the
2095 * timeout value and re-add the event.
2097 * If tv_is_absolute, this was already set.
2099 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2100 ev->ev_io_timeout = *tv;
2103 * we already reserved memory above for the case where we
2104 * are not replacing an existing timeout.
2106 if (ev->ev_flags & EVLIST_TIMEOUT) {
2107 /* XXX I believe this is needless. */
2108 if (min_heap_elt_is_top(ev))
2109 notify = 1;
2110 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2113 /* Check if it is active due to a timeout. Rescheduling
2114 * this timeout before the callback can be executed
2115 * removes it from the active list. */
2116 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2117 (ev->ev_res & EV_TIMEOUT)) {
2118 if (ev->ev_events & EV_SIGNAL) {
2119 /* See if we are just active executing
2120 * this event in a loop
2122 if (ev->ev_ncalls && ev->ev_pncalls) {
2123 /* Abort loop */
2124 *ev->ev_pncalls = 0;
2128 event_queue_remove(base, ev, EVLIST_ACTIVE);
2131 gettime(base, &now);
2133 common_timeout = is_common_timeout(tv, base);
2134 if (tv_is_absolute) {
2135 ev->ev_timeout = *tv;
2136 } else if (common_timeout) {
2137 struct timeval tmp = *tv;
2138 tmp.tv_usec &= MICROSECONDS_MASK;
2139 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2140 ev->ev_timeout.tv_usec |=
2141 (tv->tv_usec & ~MICROSECONDS_MASK);
2142 } else {
2143 evutil_timeradd(&now, tv, &ev->ev_timeout);
2146 event_debug((
2147 "event_add: timeout in %d seconds, call %p",
2148 (int)tv->tv_sec, ev->ev_callback));
2150 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2151 if (common_timeout) {
2152 struct common_timeout_list *ctl =
2153 get_common_timeout_list(base, &ev->ev_timeout);
2154 if (ev == TAILQ_FIRST(&ctl->events)) {
2155 common_timeout_schedule(ctl, &now, ev);
2157 } else {
2158 /* See if the earliest timeout is now earlier than it
2159 * was before: if so, we will need to tell the main
2160 * thread to wake up earlier than it would
2161 * otherwise. */
2162 if (min_heap_elt_is_top(ev))
2163 notify = 1;
2167 /* if we are not in the right thread, we need to wake up the loop */
2168 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2169 evthread_notify_base(base);
2171 _event_debug_note_add(ev);
2173 return (res);
2177 event_del(struct event *ev)
2179 int res;
2181 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2182 event_warnx("%s: event has no event_base set.", __func__);
2183 return -1;
2186 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2188 res = event_del_internal(ev);
2190 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2192 return (res);
2195 /* Helper for event_del: always called with th_base_lock held. */
2196 static inline int
2197 event_del_internal(struct event *ev)
2199 struct event_base *base;
2200 int res = 0, notify = 0;
2202 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2203 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2205 /* An event without a base has not been added */
2206 if (ev->ev_base == NULL)
2207 return (-1);
2209 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2211 /* If the main thread is currently executing this event's callback,
2212 * and we are not the main thread, then we want to wait until the
2213 * callback is done before we start removing the event. That way,
2214 * when this function returns, it will be safe to free the
2215 * user-supplied argument. */
2216 base = ev->ev_base;
2217 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2218 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2219 ++base->current_event_waiters;
2220 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2222 #endif
2224 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2226 /* See if we are just active executing this event in a loop */
2227 if (ev->ev_events & EV_SIGNAL) {
2228 if (ev->ev_ncalls && ev->ev_pncalls) {
2229 /* Abort loop */
2230 *ev->ev_pncalls = 0;
2234 if (ev->ev_flags & EVLIST_TIMEOUT) {
2235 /* NOTE: We never need to notify the main thread because of a
2236 * deleted timeout event: all that could happen if we don't is
2237 * that the dispatch loop might wake up too early. But the
2238 * point of notifying the main thread _is_ to wake up the
2239 * dispatch loop early anyway, so we wouldn't gain anything by
2240 * doing it.
2242 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2245 if (ev->ev_flags & EVLIST_ACTIVE)
2246 event_queue_remove(base, ev, EVLIST_ACTIVE);
2248 if (ev->ev_flags & EVLIST_INSERTED) {
2249 event_queue_remove(base, ev, EVLIST_INSERTED);
2250 if (ev->ev_events & (EV_READ|EV_WRITE))
2251 res = evmap_io_del(base, ev->ev_fd, ev);
2252 else
2253 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2254 if (res == 1) {
2255 /* evmap says we need to notify the main thread. */
2256 notify = 1;
2257 res = 0;
2261 /* if we are not in the right thread, we need to wake up the loop */
2262 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2263 evthread_notify_base(base);
2265 _event_debug_note_del(ev);
2267 return (res);
2270 void
2271 event_active(struct event *ev, int res, short ncalls)
2273 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2274 event_warnx("%s: event has no event_base set.", __func__);
2275 return;
2278 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2280 _event_debug_assert_is_setup(ev);
2282 event_active_nolock(ev, res, ncalls);
2284 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2288 void
2289 event_active_nolock(struct event *ev, int res, short ncalls)
2291 struct event_base *base;
2293 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2294 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2297 /* We get different kinds of events, add them together */
2298 if (ev->ev_flags & EVLIST_ACTIVE) {
2299 ev->ev_res |= res;
2300 return;
2303 base = ev->ev_base;
2305 EVENT_BASE_ASSERT_LOCKED(base);
2307 ev->ev_res = res;
2309 if (ev->ev_pri < base->event_running_priority)
2310 base->event_continue = 1;
2312 if (ev->ev_events & EV_SIGNAL) {
2313 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2314 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2315 ++base->current_event_waiters;
2316 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2318 #endif
2319 ev->ev_ncalls = ncalls;
2320 ev->ev_pncalls = NULL;
2323 event_queue_insert(base, ev, EVLIST_ACTIVE);
2325 if (EVBASE_NEED_NOTIFY(base))
2326 evthread_notify_base(base);
2329 void
2330 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2332 memset(cb, 0, sizeof(struct deferred_cb));
2333 cb->cb = fn;
2334 cb->arg = arg;
2337 void
2338 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2339 struct deferred_cb *cb)
2341 if (!queue) {
2342 if (current_base)
2343 queue = &current_base->defer_queue;
2344 else
2345 return;
2348 LOCK_DEFERRED_QUEUE(queue);
2349 if (cb->queued) {
2350 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2351 --queue->active_count;
2352 cb->queued = 0;
2354 UNLOCK_DEFERRED_QUEUE(queue);
2357 void
2358 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2359 struct deferred_cb *cb)
2361 if (!queue) {
2362 if (current_base)
2363 queue = &current_base->defer_queue;
2364 else
2365 return;
2368 LOCK_DEFERRED_QUEUE(queue);
2369 if (!cb->queued) {
2370 cb->queued = 1;
2371 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2372 ++queue->active_count;
2373 if (queue->notify_fn)
2374 queue->notify_fn(queue, queue->notify_arg);
2376 UNLOCK_DEFERRED_QUEUE(queue);
2379 static int
2380 timeout_next(struct event_base *base, struct timeval **tv_p)
2382 /* Caller must hold th_base_lock */
2383 struct timeval now;
2384 struct event *ev;
2385 struct timeval *tv = *tv_p;
2386 int res = 0;
2388 ev = min_heap_top(&base->timeheap);
2390 if (ev == NULL) {
2391 /* if no time-based events are active wait for I/O */
2392 *tv_p = NULL;
2393 goto out;
2396 if (gettime(base, &now) == -1) {
2397 res = -1;
2398 goto out;
2401 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2402 evutil_timerclear(tv);
2403 goto out;
2406 evutil_timersub(&ev->ev_timeout, &now, tv);
2408 EVUTIL_ASSERT(tv->tv_sec >= 0);
2409 EVUTIL_ASSERT(tv->tv_usec >= 0);
2410 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2412 out:
2413 return (res);
2417 * Determines if the time is running backwards by comparing the current time
2418 * against the last time we checked. Not needed when using clock monotonic.
2419 * If time is running backwards, we adjust the firing time of every event by
2420 * the amount that time seems to have jumped.
2422 static void
2423 timeout_correct(struct event_base *base, struct timeval *tv)
2425 /* Caller must hold th_base_lock. */
2426 struct event **pev;
2427 unsigned int size;
2428 struct timeval off;
2429 int i;
2431 if (use_monotonic)
2432 return;
2434 /* Check if time is running backwards */
2435 gettime(base, tv);
2437 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2438 base->event_tv = *tv;
2439 return;
2442 event_debug(("%s: time is running backwards, corrected",
2443 __func__));
2444 evutil_timersub(&base->event_tv, tv, &off);
2447 * We can modify the key element of the node without destroying
2448 * the minheap property, because we change every element.
2450 pev = base->timeheap.p;
2451 size = base->timeheap.n;
2452 for (; size-- > 0; ++pev) {
2453 struct timeval *ev_tv = &(**pev).ev_timeout;
2454 evutil_timersub(ev_tv, &off, ev_tv);
2456 for (i=0; i<base->n_common_timeouts; ++i) {
2457 struct event *ev;
2458 struct common_timeout_list *ctl =
2459 base->common_timeout_queues[i];
2460 TAILQ_FOREACH(ev, &ctl->events,
2461 ev_timeout_pos.ev_next_with_common_timeout) {
2462 struct timeval *ev_tv = &ev->ev_timeout;
2463 ev_tv->tv_usec &= MICROSECONDS_MASK;
2464 evutil_timersub(ev_tv, &off, ev_tv);
2465 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2466 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2470 /* Now remember what the new time turned out to be. */
2471 base->event_tv = *tv;
2474 /* Activate every event whose timeout has elapsed. */
2475 static void
2476 timeout_process(struct event_base *base)
2478 /* Caller must hold lock. */
2479 struct timeval now;
2480 struct event *ev;
2482 if (min_heap_empty(&base->timeheap)) {
2483 return;
2486 gettime(base, &now);
2488 while ((ev = min_heap_top(&base->timeheap))) {
2489 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2490 break;
2492 /* delete this event from the I/O queues */
2493 event_del_internal(ev);
2495 event_debug(("timeout_process: call %p",
2496 ev->ev_callback));
2497 event_active_nolock(ev, EV_TIMEOUT, 1);
2501 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2502 static void
2503 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2505 EVENT_BASE_ASSERT_LOCKED(base);
2507 if (!(ev->ev_flags & queue)) {
2508 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
2509 ev, EV_SOCK_ARG(ev->ev_fd), queue);
2510 return;
2513 if (~ev->ev_flags & EVLIST_INTERNAL)
2514 base->event_count--;
2516 ev->ev_flags &= ~queue;
2517 switch (queue) {
2518 case EVLIST_INSERTED:
2519 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2520 break;
2521 case EVLIST_ACTIVE:
2522 base->event_count_active--;
2523 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2524 ev, ev_active_next);
2525 break;
2526 case EVLIST_TIMEOUT:
2527 if (is_common_timeout(&ev->ev_timeout, base)) {
2528 struct common_timeout_list *ctl =
2529 get_common_timeout_list(base, &ev->ev_timeout);
2530 TAILQ_REMOVE(&ctl->events, ev,
2531 ev_timeout_pos.ev_next_with_common_timeout);
2532 } else {
2533 min_heap_erase(&base->timeheap, ev);
2535 break;
2536 default:
2537 event_errx(1, "%s: unknown queue %x", __func__, queue);
2541 /* Add 'ev' to the common timeout list in 'ev'. */
2542 static void
2543 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2544 struct event *ev)
2546 struct event *e;
2547 /* By all logic, we should just be able to append 'ev' to the end of
2548 * ctl->events, since the timeout on each 'ev' is set to {the common
2549 * timeout} + {the time when we add the event}, and so the events
2550 * should arrive in order of their timeeouts. But just in case
2551 * there's some wacky threading issue going on, we do a search from
2552 * the end of 'ev' to find the right insertion point.
2554 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2555 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2556 /* This timercmp is a little sneaky, since both ev and e have
2557 * magic values in tv_usec. Fortunately, they ought to have
2558 * the _same_ magic values in tv_usec. Let's assert for that.
2560 EVUTIL_ASSERT(
2561 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2562 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2563 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2564 ev_timeout_pos.ev_next_with_common_timeout);
2565 return;
2568 TAILQ_INSERT_HEAD(&ctl->events, ev,
2569 ev_timeout_pos.ev_next_with_common_timeout);
2572 static void
2573 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2575 EVENT_BASE_ASSERT_LOCKED(base);
2577 if (ev->ev_flags & queue) {
2578 /* Double insertion is possible for active events */
2579 if (queue & EVLIST_ACTIVE)
2580 return;
2582 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on queue %x", __func__,
2583 ev, EV_SOCK_ARG(ev->ev_fd), queue);
2584 return;
2587 if (~ev->ev_flags & EVLIST_INTERNAL)
2588 base->event_count++;
2590 ev->ev_flags |= queue;
2591 switch (queue) {
2592 case EVLIST_INSERTED:
2593 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2594 break;
2595 case EVLIST_ACTIVE:
2596 base->event_count_active++;
2597 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2598 ev,ev_active_next);
2599 break;
2600 case EVLIST_TIMEOUT: {
2601 if (is_common_timeout(&ev->ev_timeout, base)) {
2602 struct common_timeout_list *ctl =
2603 get_common_timeout_list(base, &ev->ev_timeout);
2604 insert_common_timeout_inorder(ctl, ev);
2605 } else
2606 min_heap_push(&base->timeheap, ev);
2607 break;
2609 default:
2610 event_errx(1, "%s: unknown queue %x", __func__, queue);
2614 /* Functions for debugging */
2616 const char *
2617 event_get_version(void)
2619 return (_EVENT_VERSION);
2622 ev_uint32_t
2623 event_get_version_number(void)
2625 return (_EVENT_NUMERIC_VERSION);
2629 * No thread-safe interface needed - the information should be the same
2630 * for all threads.
2633 const char *
2634 event_get_method(void)
2636 return (current_base->evsel->name);
2639 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2640 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2641 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2642 static void (*_mm_free_fn)(void *p) = NULL;
2644 void *
2645 event_mm_malloc_(size_t sz)
2647 if (_mm_malloc_fn)
2648 return _mm_malloc_fn(sz);
2649 else
2650 return malloc(sz);
2653 void *
2654 event_mm_calloc_(size_t count, size_t size)
2656 if (_mm_malloc_fn) {
2657 size_t sz = count * size;
2658 void *p = _mm_malloc_fn(sz);
2659 if (p)
2660 memset(p, 0, sz);
2661 return p;
2662 } else
2663 return calloc(count, size);
2666 char *
2667 event_mm_strdup_(const char *str)
2669 if (_mm_malloc_fn) {
2670 size_t ln = strlen(str);
2671 void *p = _mm_malloc_fn(ln+1);
2672 if (p)
2673 memcpy(p, str, ln+1);
2674 return p;
2675 } else
2676 #ifdef WIN32
2677 return _strdup(str);
2678 #else
2679 return strdup(str);
2680 #endif
2683 void *
2684 event_mm_realloc_(void *ptr, size_t sz)
2686 if (_mm_realloc_fn)
2687 return _mm_realloc_fn(ptr, sz);
2688 else
2689 return realloc(ptr, sz);
2692 void
2693 event_mm_free_(void *ptr)
2695 if (_mm_free_fn)
2696 _mm_free_fn(ptr);
2697 else
2698 free(ptr);
2701 void
2702 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2703 void *(*realloc_fn)(void *ptr, size_t sz),
2704 void (*free_fn)(void *ptr))
2706 _mm_malloc_fn = malloc_fn;
2707 _mm_realloc_fn = realloc_fn;
2708 _mm_free_fn = free_fn;
2710 #endif
2712 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2713 static void
2714 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2716 ev_uint64_t msg;
2717 ev_ssize_t r;
2718 struct event_base *base = arg;
2720 r = read(fd, (void*) &msg, sizeof(msg));
2721 if (r<0 && errno != EAGAIN) {
2722 event_sock_warn(fd, "Error reading from eventfd");
2724 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2725 base->is_notify_pending = 0;
2726 EVBASE_RELEASE_LOCK(base, th_base_lock);
2728 #endif
2730 static void
2731 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2733 unsigned char buf[1024];
2734 struct event_base *base = arg;
2735 #ifdef WIN32
2736 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2738 #else
2739 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2741 #endif
2743 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2744 base->is_notify_pending = 0;
2745 EVBASE_RELEASE_LOCK(base, th_base_lock);
2749 evthread_make_base_notifiable(struct event_base *base)
2751 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2752 int (*notify)(struct event_base *) = evthread_notify_base_default;
2754 /* XXXX grab the lock here? */
2755 if (!base)
2756 return -1;
2758 if (base->th_notify_fd[0] >= 0)
2759 return 0;
2761 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2762 #ifndef EFD_CLOEXEC
2763 #define EFD_CLOEXEC 0
2764 #endif
2765 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2766 if (base->th_notify_fd[0] >= 0) {
2767 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2768 notify = evthread_notify_base_eventfd;
2769 cb = evthread_notify_drain_eventfd;
2771 #endif
2772 #if defined(_EVENT_HAVE_PIPE)
2773 if (base->th_notify_fd[0] < 0) {
2774 if ((base->evsel->features & EV_FEATURE_FDS)) {
2775 if (pipe(base->th_notify_fd) < 0) {
2776 event_warn("%s: pipe", __func__);
2777 } else {
2778 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2779 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2783 #endif
2785 #ifdef WIN32
2786 #define LOCAL_SOCKETPAIR_AF AF_INET
2787 #else
2788 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2789 #endif
2790 if (base->th_notify_fd[0] < 0) {
2791 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2792 base->th_notify_fd) == -1) {
2793 event_sock_warn(-1, "%s: socketpair", __func__);
2794 return (-1);
2795 } else {
2796 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2797 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2801 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2803 base->th_notify_fn = notify;
2806 Making the second socket nonblocking is a bit subtle, given that we
2807 ignore any EAGAIN returns when writing to it, and you don't usally
2808 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2809 then there's no need to add any more data to the buffer, since
2810 the main thread is already either about to wake up and drain it,
2811 or woken up and in the process of draining it.
2813 if (base->th_notify_fd[1] > 0)
2814 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2816 /* prepare an event that we can use for wakeup */
2817 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2818 EV_READ|EV_PERSIST, cb, base);
2820 /* we need to mark this as internal event */
2821 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2822 event_priority_set(&base->th_notify, 0);
2824 return event_add(&base->th_notify, NULL);
2827 void
2828 event_base_dump_events(struct event_base *base, FILE *output)
2830 struct event *e;
2831 int i;
2832 fprintf(output, "Inserted events:\n");
2833 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2834 fprintf(output, " %p [fd "EV_SOCK_FMT"]%s%s%s%s%s\n",
2835 (void*)e, EV_SOCK_ARG(e->ev_fd),
2836 (e->ev_events&EV_READ)?" Read":"",
2837 (e->ev_events&EV_WRITE)?" Write":"",
2838 (e->ev_events&EV_SIGNAL)?" Signal":"",
2839 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2840 (e->ev_events&EV_PERSIST)?" Persist":"");
2843 for (i = 0; i < base->nactivequeues; ++i) {
2844 if (TAILQ_EMPTY(&base->activequeues[i]))
2845 continue;
2846 fprintf(output, "Active events [priority %d]:\n", i);
2847 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2848 fprintf(output, " %p [fd "EV_SOCK_FMT"]%s%s%s%s\n",
2849 (void*)e, EV_SOCK_ARG(e->ev_fd),
2850 (e->ev_res&EV_READ)?" Read active":"",
2851 (e->ev_res&EV_WRITE)?" Write active":"",
2852 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2853 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2858 void
2859 event_base_add_virtual(struct event_base *base)
2861 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2862 base->virtual_event_count++;
2863 EVBASE_RELEASE_LOCK(base, th_base_lock);
2866 void
2867 event_base_del_virtual(struct event_base *base)
2869 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2870 EVUTIL_ASSERT(base->virtual_event_count > 0);
2871 base->virtual_event_count--;
2872 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2873 evthread_notify_base(base);
2874 EVBASE_RELEASE_LOCK(base, th_base_lock);
2877 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2879 event_global_setup_locks_(const int enable_locks)
2881 #ifndef _EVENT_DISABLE_DEBUG_MODE
2882 EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
2883 #endif
2884 if (evsig_global_setup_locks_(enable_locks) < 0)
2885 return -1;
2886 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
2887 return -1;
2888 return 0;
2890 #endif
2892 void
2893 event_base_assert_ok(struct event_base *base)
2895 int i;
2896 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2897 evmap_check_integrity(base);
2899 /* Check the heap property */
2900 for (i = 1; i < (int)base->timeheap.n; ++i) {
2901 int parent = (i - 1) / 2;
2902 struct event *ev, *p_ev;
2903 ev = base->timeheap.p[i];
2904 p_ev = base->timeheap.p[parent];
2905 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
2906 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
2907 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
2910 /* Check that the common timeouts are fine */
2911 for (i = 0; i < base->n_common_timeouts; ++i) {
2912 struct common_timeout_list *ctl = base->common_timeout_queues[i];
2913 struct event *last=NULL, *ev;
2914 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
2915 if (last)
2916 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
2917 EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
2918 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
2919 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
2920 last = ev;
2924 EVBASE_RELEASE_LOCK(base, th_base_lock);