MiniDLNA update: 1.0.19.1 to 1.0.20
[tomato.git] / release / src / router / libevent / event.c
blob41c7d8f4fd86023948944fc12c53c96e69b9161f
1 /*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
29 #ifdef WIN32
30 #include <winsock2.h>
31 #define WIN32_LEAN_AND_MEAN
32 #include <windows.h>
33 #undef WIN32_LEAN_AND_MEAN
34 #endif
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
42 #endif
43 #include <stdio.h>
44 #include <stdlib.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
46 #include <unistd.h>
47 #endif
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
74 #endif
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
77 #endif
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
80 #endif
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
83 #endif
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
86 #endif
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
89 #endif
90 #ifdef WIN32
91 extern const struct eventop win32ops;
92 #endif
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
97 &evportops,
98 #endif
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
100 &kqops,
101 #endif
102 #ifdef _EVENT_HAVE_EPOLL
103 &epollops,
104 #endif
105 #ifdef _EVENT_HAVE_DEVPOLL
106 &devpollops,
107 #endif
108 #ifdef _EVENT_HAVE_POLL
109 &pollops,
110 #endif
111 #ifdef _EVENT_HAVE_SELECT
112 &selectops,
113 #endif
114 #ifdef WIN32
115 &win32ops,
116 #endif
117 NULL
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
124 /* Global state */
126 static int use_monotonic;
128 /* Prototypes */
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
158 unsigned added : 1;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
171 * just say >>6. */
172 return (u >> 6);
175 static inline int
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 static void *_event_debug_map_lock = NULL;
186 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
187 HT_INITIALIZER();
189 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
190 eq_debug_entry)
191 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
192 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
194 /* Macro: record that ev is now setup (that is, ready for an add) */
195 #define _event_debug_note_setup(ev) do { \
196 if (_event_debug_mode_on) { \
197 struct event_debug_entry *dent,find; \
198 find.ptr = (ev); \
199 EVLOCK_LOCK(_event_debug_map_lock, 0); \
200 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
201 if (dent) { \
202 dent->added = 0; \
203 } else { \
204 dent = mm_malloc(sizeof(*dent)); \
205 if (!dent) \
206 event_err(1, \
207 "Out of memory in debugging code"); \
208 dent->ptr = (ev); \
209 dent->added = 0; \
210 HT_INSERT(event_debug_map, &global_debug_map, dent); \
212 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
214 event_debug_mode_too_late = 1; \
215 } while (0)
216 /* Macro: record that ev is no longer setup */
217 #define _event_debug_note_teardown(ev) do { \
218 if (_event_debug_mode_on) { \
219 struct event_debug_entry *dent,find; \
220 find.ptr = (ev); \
221 EVLOCK_LOCK(_event_debug_map_lock, 0); \
222 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
223 if (dent) \
224 mm_free(dent); \
225 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
227 event_debug_mode_too_late = 1; \
228 } while (0)
229 /* Macro: record that ev is now added */
230 #define _event_debug_note_add(ev) do { \
231 if (_event_debug_mode_on) { \
232 struct event_debug_entry *dent,find; \
233 find.ptr = (ev); \
234 EVLOCK_LOCK(_event_debug_map_lock, 0); \
235 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
236 if (dent) { \
237 dent->added = 1; \
238 } else { \
239 event_errx(_EVENT_ERR_ABORT, \
240 "%s: noting an add on a non-setup event %p", \
241 __func__, (ev)); \
243 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
245 event_debug_mode_too_late = 1; \
246 } while (0)
247 /* Macro: record that ev is no longer added */
248 #define _event_debug_note_del(ev) do { \
249 if (_event_debug_mode_on) { \
250 struct event_debug_entry *dent,find; \
251 find.ptr = (ev); \
252 EVLOCK_LOCK(_event_debug_map_lock, 0); \
253 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
254 if (dent) { \
255 dent->added = 0; \
256 } else { \
257 event_errx(_EVENT_ERR_ABORT, \
258 "%s: noting a del on a non-setup event %p", \
259 __func__, (ev)); \
261 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
263 event_debug_mode_too_late = 1; \
264 } while (0)
265 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
266 #define _event_debug_assert_is_setup(ev) do { \
267 if (_event_debug_mode_on) { \
268 struct event_debug_entry *dent,find; \
269 find.ptr = (ev); \
270 EVLOCK_LOCK(_event_debug_map_lock, 0); \
271 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
272 if (!dent) { \
273 event_errx(_EVENT_ERR_ABORT, \
274 "%s called on a non-initialized event %p", \
275 __func__, (ev)); \
277 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
279 } while (0)
280 /* Macro: assert that ev is not added (i.e., okay to tear down or set
281 * up again) */
282 #define _event_debug_assert_not_added(ev) do { \
283 if (_event_debug_mode_on) { \
284 struct event_debug_entry *dent,find; \
285 find.ptr = (ev); \
286 EVLOCK_LOCK(_event_debug_map_lock, 0); \
287 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
288 if (dent && dent->added) { \
289 event_errx(_EVENT_ERR_ABORT, \
290 "%s called on an already added event %p", \
291 __func__, (ev)); \
293 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
295 } while (0)
296 #else
297 #define _event_debug_note_setup(ev) \
298 ((void)0)
299 #define _event_debug_note_teardown(ev) \
300 ((void)0)
301 #define _event_debug_note_add(ev) \
302 ((void)0)
303 #define _event_debug_note_del(ev) \
304 ((void)0)
305 #define _event_debug_assert_is_setup(ev) \
306 ((void)0)
307 #define _event_debug_assert_not_added(ev) \
308 ((void)0)
309 #endif
311 #define EVENT_BASE_ASSERT_LOCKED(base) \
312 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
314 /* The first time this function is called, it sets use_monotonic to 1
315 * if we have a clock function that supports monotonic time */
316 static void
317 detect_monotonic(void)
319 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
320 struct timespec ts;
321 static int use_monotonic_initialized = 0;
323 if (use_monotonic_initialized)
324 return;
326 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
327 use_monotonic = 1;
329 use_monotonic_initialized = 1;
330 #endif
333 /* How often (in seconds) do we check for changes in wall clock time relative
334 * to monotonic time? Set this to -1 for 'never.' */
335 #define CLOCK_SYNC_INTERVAL -1
337 /** Set 'tp' to the current time according to 'base'. We must hold the lock
338 * on 'base'. If there is a cached time, return it. Otherwise, use
339 * clock_gettime or gettimeofday as appropriate to find out the right time.
340 * Return 0 on success, -1 on failure.
342 static int
343 gettime(struct event_base *base, struct timeval *tp)
345 EVENT_BASE_ASSERT_LOCKED(base);
347 if (base->tv_cache.tv_sec) {
348 *tp = base->tv_cache;
349 return (0);
352 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
353 if (use_monotonic) {
354 struct timespec ts;
356 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
357 return (-1);
359 tp->tv_sec = ts.tv_sec;
360 tp->tv_usec = ts.tv_nsec / 1000;
361 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
362 < ts.tv_sec) {
363 struct timeval tv;
364 evutil_gettimeofday(&tv,NULL);
365 evutil_timersub(&tv, tp, &base->tv_clock_diff);
366 base->last_updated_clock_diff = ts.tv_sec;
369 return (0);
371 #endif
373 return (evutil_gettimeofday(tp, NULL));
377 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
379 int r;
380 if (!base) {
381 base = current_base;
382 if (!current_base)
383 return evutil_gettimeofday(tv, NULL);
386 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
387 if (base->tv_cache.tv_sec == 0) {
388 r = evutil_gettimeofday(tv, NULL);
389 } else {
390 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
391 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
392 #else
393 *tv = base->tv_cache;
394 #endif
395 r = 0;
397 EVBASE_RELEASE_LOCK(base, th_base_lock);
398 return r;
401 /** Make 'base' have no current cached time. */
402 static inline void
403 clear_time_cache(struct event_base *base)
405 base->tv_cache.tv_sec = 0;
408 /** Replace the cached time in 'base' with the current time. */
409 static inline void
410 update_time_cache(struct event_base *base)
412 base->tv_cache.tv_sec = 0;
413 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
414 gettime(base, &base->tv_cache);
417 struct event_base *
418 event_init(void)
420 struct event_base *base = event_base_new_with_config(NULL);
422 if (base == NULL) {
423 event_errx(1, "%s: Unable to construct event_base", __func__);
424 return NULL;
427 current_base = base;
429 return (base);
432 struct event_base *
433 event_base_new(void)
435 struct event_base *base = NULL;
436 struct event_config *cfg = event_config_new();
437 if (cfg) {
438 base = event_base_new_with_config(cfg);
439 event_config_free(cfg);
441 return base;
444 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
445 * avoid. */
446 static int
447 event_config_is_avoided_method(const struct event_config *cfg,
448 const char *method)
450 struct event_config_entry *entry;
452 TAILQ_FOREACH(entry, &cfg->entries, next) {
453 if (entry->avoid_method != NULL &&
454 strcmp(entry->avoid_method, method) == 0)
455 return (1);
458 return (0);
461 /** Return true iff 'method' is disabled according to the environment. */
462 static int
463 event_is_method_disabled(const char *name)
465 char environment[64];
466 int i;
468 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
469 for (i = 8; environment[i] != '\0'; ++i)
470 environment[i] = EVUTIL_TOUPPER(environment[i]);
471 /* Note that evutil_getenv() ignores the environment entirely if
472 * we're setuid */
473 return (evutil_getenv(environment) != NULL);
477 event_base_get_features(const struct event_base *base)
479 return base->evsel->features;
482 void
483 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
485 memset(cb, 0, sizeof(struct deferred_cb_queue));
486 TAILQ_INIT(&cb->deferred_cb_list);
489 /** Helper for the deferred_cb queue: wake up the event base. */
490 static void
491 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
493 struct event_base *base = baseptr;
494 if (EVBASE_NEED_NOTIFY(base))
495 evthread_notify_base(base);
498 struct deferred_cb_queue *
499 event_base_get_deferred_cb_queue(struct event_base *base)
501 return base ? &base->defer_queue : NULL;
504 void
505 event_enable_debug_mode(void)
507 #ifndef _EVENT_DISABLE_DEBUG_MODE
508 if (_event_debug_mode_on)
509 event_errx(1, "%s was called twice!", __func__);
510 if (event_debug_mode_too_late)
511 event_errx(1, "%s must be called *before* creating any events "
512 "or event_bases",__func__);
514 _event_debug_mode_on = 1;
516 HT_INIT(event_debug_map, &global_debug_map);
518 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
519 #endif
522 #if 0
523 void
524 event_disable_debug_mode(void)
526 struct event_debug_entry **ent, *victim;
528 EVLOCK_LOCK(_event_debug_map_lock, 0);
529 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
530 victim = *ent;
531 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
532 mm_free(victim);
534 HT_CLEAR(event_debug_map, &global_debug_map);
535 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
537 #endif
539 struct event_base *
540 event_base_new_with_config(const struct event_config *cfg)
542 int i;
543 struct event_base *base;
544 int should_check_environment;
546 #ifndef _EVENT_DISABLE_DEBUG_MODE
547 event_debug_mode_too_late = 1;
548 if (_event_debug_mode_on && !_event_debug_map_lock) {
549 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
551 #endif
553 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
554 event_warn("%s: calloc", __func__);
555 return NULL;
557 detect_monotonic();
558 gettime(base, &base->event_tv);
560 min_heap_ctor(&base->timeheap);
561 TAILQ_INIT(&base->eventqueue);
562 base->sig.ev_signal_pair[0] = -1;
563 base->sig.ev_signal_pair[1] = -1;
564 base->th_notify_fd[0] = -1;
565 base->th_notify_fd[1] = -1;
567 event_deferred_cb_queue_init(&base->defer_queue);
568 base->defer_queue.notify_fn = notify_base_cbq_callback;
569 base->defer_queue.notify_arg = base;
570 if (cfg)
571 base->flags = cfg->flags;
573 evmap_io_initmap(&base->io);
574 evmap_signal_initmap(&base->sigmap);
575 event_changelist_init(&base->changelist);
577 base->evbase = NULL;
579 should_check_environment =
580 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
582 for (i = 0; eventops[i] && !base->evbase; i++) {
583 if (cfg != NULL) {
584 /* determine if this backend should be avoided */
585 if (event_config_is_avoided_method(cfg,
586 eventops[i]->name))
587 continue;
588 if ((eventops[i]->features & cfg->require_features)
589 != cfg->require_features)
590 continue;
593 /* also obey the environment variables */
594 if (should_check_environment &&
595 event_is_method_disabled(eventops[i]->name))
596 continue;
598 base->evsel = eventops[i];
600 base->evbase = base->evsel->init(base);
603 if (base->evbase == NULL) {
604 event_warnx("%s: no event mechanism available",
605 __func__);
606 event_base_free(base);
607 return NULL;
610 if (evutil_getenv("EVENT_SHOW_METHOD"))
611 event_msgx("libevent using: %s", base->evsel->name);
613 /* allocate a single active event queue */
614 if (event_base_priority_init(base, 1) < 0) {
615 event_base_free(base);
616 return NULL;
619 /* prepare for threading */
621 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
622 if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
623 int r;
624 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
625 EVTHREAD_LOCKTYPE_RECURSIVE);
626 base->defer_queue.lock = base->th_base_lock;
627 EVTHREAD_ALLOC_COND(base->current_event_cond);
628 r = evthread_make_base_notifiable(base);
629 if (r<0) {
630 event_base_free(base);
631 return NULL;
634 #endif
636 #ifdef WIN32
637 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
638 event_base_start_iocp(base, cfg->n_cpus_hint);
639 #endif
641 return (base);
645 event_base_start_iocp(struct event_base *base, int n_cpus)
647 #ifdef WIN32
648 if (base->iocp)
649 return 0;
650 base->iocp = event_iocp_port_launch(n_cpus);
651 if (!base->iocp) {
652 event_warnx("%s: Couldn't launch IOCP", __func__);
653 return -1;
655 return 0;
656 #else
657 return -1;
658 #endif
661 void
662 event_base_stop_iocp(struct event_base *base)
664 #ifdef WIN32
665 int rv;
667 if (!base->iocp)
668 return;
669 rv = event_iocp_shutdown(base->iocp, -1);
670 EVUTIL_ASSERT(rv >= 0);
671 base->iocp = NULL;
672 #endif
675 void
676 event_base_free(struct event_base *base)
678 int i, n_deleted=0;
679 struct event *ev;
680 /* XXXX grab the lock? If there is contention when one thread frees
681 * the base, then the contending thread will be very sad soon. */
683 if (base == NULL && current_base)
684 base = current_base;
685 if (base == current_base)
686 current_base = NULL;
688 /* XXX(niels) - check for internal events first */
689 EVUTIL_ASSERT(base);
691 #ifdef WIN32
692 event_base_stop_iocp(base);
693 #endif
695 /* threading fds if we have them */
696 if (base->th_notify_fd[0] != -1) {
697 event_del(&base->th_notify);
698 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
699 if (base->th_notify_fd[1] != -1)
700 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
701 base->th_notify_fd[0] = -1;
702 base->th_notify_fd[1] = -1;
703 event_debug_unassign(&base->th_notify);
706 /* Delete all non-internal events. */
707 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
708 struct event *next = TAILQ_NEXT(ev, ev_next);
709 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
710 event_del(ev);
711 ++n_deleted;
713 ev = next;
715 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
716 event_del(ev);
717 ++n_deleted;
719 for (i = 0; i < base->n_common_timeouts; ++i) {
720 struct common_timeout_list *ctl =
721 base->common_timeout_queues[i];
722 event_del(&ctl->timeout_event); /* Internal; doesn't count */
723 event_debug_unassign(&ctl->timeout_event);
724 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
725 struct event *next = TAILQ_NEXT(ev,
726 ev_timeout_pos.ev_next_with_common_timeout);
727 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
728 event_del(ev);
729 ++n_deleted;
731 ev = next;
733 mm_free(ctl);
735 if (base->common_timeout_queues)
736 mm_free(base->common_timeout_queues);
738 for (i = 0; i < base->nactivequeues; ++i) {
739 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
740 struct event *next = TAILQ_NEXT(ev, ev_active_next);
741 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
742 event_del(ev);
743 ++n_deleted;
745 ev = next;
749 if (n_deleted)
750 event_debug(("%s: %d events were still set in base",
751 __func__, n_deleted));
753 if (base->evsel != NULL && base->evsel->dealloc != NULL)
754 base->evsel->dealloc(base);
756 for (i = 0; i < base->nactivequeues; ++i)
757 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
759 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
760 min_heap_dtor(&base->timeheap);
762 mm_free(base->activequeues);
764 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
766 evmap_io_clear(&base->io);
767 evmap_signal_clear(&base->sigmap);
768 event_changelist_freemem(&base->changelist);
770 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
771 EVTHREAD_FREE_COND(base->current_event_cond);
773 mm_free(base);
776 /* reinitialize the event base after a fork */
778 event_reinit(struct event_base *base)
780 const struct eventop *evsel;
781 int res = 0;
782 struct event *ev;
783 int was_notifiable = 0;
785 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
787 evsel = base->evsel;
789 #if 0
790 /* Right now, reinit always takes effect, since even if the
791 backend doesn't require it, the signal socketpair code does.
795 /* check if this event mechanism requires reinit */
796 if (!evsel->need_reinit)
797 goto done;
798 #endif
800 /* prevent internal delete */
801 if (base->sig.ev_signal_added) {
802 /* we cannot call event_del here because the base has
803 * not been reinitialized yet. */
804 event_queue_remove(base, &base->sig.ev_signal,
805 EVLIST_INSERTED);
806 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
807 event_queue_remove(base, &base->sig.ev_signal,
808 EVLIST_ACTIVE);
809 base->sig.ev_signal_added = 0;
811 if (base->th_notify_fd[0] != -1) {
812 /* we cannot call event_del here because the base has
813 * not been reinitialized yet. */
814 was_notifiable = 1;
815 event_queue_remove(base, &base->th_notify,
816 EVLIST_INSERTED);
817 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
818 event_queue_remove(base, &base->th_notify,
819 EVLIST_ACTIVE);
820 base->sig.ev_signal_added = 0;
821 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
822 if (base->th_notify_fd[1] != -1)
823 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
824 base->th_notify_fd[0] = -1;
825 base->th_notify_fd[1] = -1;
826 event_debug_unassign(&base->th_notify);
829 if (base->evsel->dealloc != NULL)
830 base->evsel->dealloc(base);
831 base->evbase = evsel->init(base);
832 if (base->evbase == NULL) {
833 event_errx(1, "%s: could not reinitialize event mechanism",
834 __func__);
835 res = -1;
836 goto done;
839 event_changelist_freemem(&base->changelist); /* XXX */
840 evmap_io_clear(&base->io);
841 evmap_signal_clear(&base->sigmap);
843 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
844 if (ev->ev_events & (EV_READ|EV_WRITE)) {
845 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
846 res = -1;
847 } else if (ev->ev_events & EV_SIGNAL) {
848 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
849 res = -1;
853 if (was_notifiable && res == 0)
854 res = evthread_make_base_notifiable(base);
856 done:
857 EVBASE_RELEASE_LOCK(base, th_base_lock);
858 return (res);
861 const char **
862 event_get_supported_methods(void)
864 static const char **methods = NULL;
865 const struct eventop **method;
866 const char **tmp;
867 int i = 0, k;
869 /* count all methods */
870 for (method = &eventops[0]; *method != NULL; ++method) {
871 ++i;
874 /* allocate one more than we need for the NULL pointer */
875 tmp = mm_calloc((i + 1), sizeof(char *));
876 if (tmp == NULL)
877 return (NULL);
879 /* populate the array with the supported methods */
880 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
881 tmp[i++] = eventops[k]->name;
883 tmp[i] = NULL;
885 if (methods != NULL)
886 mm_free((char**)methods);
888 methods = tmp;
890 return (methods);
893 struct event_config *
894 event_config_new(void)
896 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
898 if (cfg == NULL)
899 return (NULL);
901 TAILQ_INIT(&cfg->entries);
903 return (cfg);
906 static void
907 event_config_entry_free(struct event_config_entry *entry)
909 if (entry->avoid_method != NULL)
910 mm_free((char *)entry->avoid_method);
911 mm_free(entry);
914 void
915 event_config_free(struct event_config *cfg)
917 struct event_config_entry *entry;
919 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
920 TAILQ_REMOVE(&cfg->entries, entry, next);
921 event_config_entry_free(entry);
923 mm_free(cfg);
927 event_config_set_flag(struct event_config *cfg, int flag)
929 if (!cfg)
930 return -1;
931 cfg->flags |= flag;
932 return 0;
936 event_config_avoid_method(struct event_config *cfg, const char *method)
938 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
939 if (entry == NULL)
940 return (-1);
942 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
943 mm_free(entry);
944 return (-1);
947 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
949 return (0);
953 event_config_require_features(struct event_config *cfg,
954 int features)
956 if (!cfg)
957 return (-1);
958 cfg->require_features = features;
959 return (0);
963 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
965 if (!cfg)
966 return (-1);
967 cfg->n_cpus_hint = cpus;
968 return (0);
972 event_priority_init(int npriorities)
974 return event_base_priority_init(current_base, npriorities);
978 event_base_priority_init(struct event_base *base, int npriorities)
980 int i;
982 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
983 || npriorities >= EVENT_MAX_PRIORITIES)
984 return (-1);
986 if (npriorities == base->nactivequeues)
987 return (0);
989 if (base->nactivequeues) {
990 mm_free(base->activequeues);
991 base->nactivequeues = 0;
994 /* Allocate our priority queues */
995 base->activequeues = (struct event_list *)
996 mm_calloc(npriorities, sizeof(struct event_list));
997 if (base->activequeues == NULL) {
998 event_warn("%s: calloc", __func__);
999 return (-1);
1001 base->nactivequeues = npriorities;
1003 for (i = 0; i < base->nactivequeues; ++i) {
1004 TAILQ_INIT(&base->activequeues[i]);
1007 return (0);
1010 /* Returns true iff we're currently watching any events. */
1011 static int
1012 event_haveevents(struct event_base *base)
1014 /* Caller must hold th_base_lock */
1015 return (base->virtual_event_count > 0 || base->event_count > 0);
1018 /* "closure" function called when processing active signal events */
1019 static inline void
1020 event_signal_closure(struct event_base *base, struct event *ev)
1022 short ncalls;
1024 /* Allows deletes to work */
1025 ncalls = ev->ev_ncalls;
1026 ev->ev_pncalls = &ncalls;
1027 EVBASE_RELEASE_LOCK(base, th_base_lock);
1028 while (ncalls) {
1029 ncalls--;
1030 ev->ev_ncalls = ncalls;
1031 if (ncalls == 0)
1032 ev->ev_pncalls = NULL;
1033 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1034 #if 0
1035 /* XXXX we can't do this without a lock on the base. */
1036 if (base->event_break)
1037 return;
1038 #endif
1042 /* Common timeouts are special timeouts that are handled as queues rather than
1043 * in the minheap. This is more efficient than the minheap if we happen to
1044 * know that we're going to get several thousands of timeout events all with
1045 * the same timeout value.
1047 * Since all our timeout handling code assumes timevals can be copied,
1048 * assigned, etc, we can't use "magic pointer" to encode these common
1049 * timeouts. Searching through a list to see if every timeout is common could
1050 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1051 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1052 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1053 * of index into the event_base's aray of common timeouts.
1056 #define MICROSECONDS_MASK 0x000fffff
1057 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1058 #define COMMON_TIMEOUT_IDX_SHIFT 20
1059 #define COMMON_TIMEOUT_MASK 0xf0000000
1060 #define COMMON_TIMEOUT_MAGIC 0x50000000
1062 #define COMMON_TIMEOUT_IDX(tv) \
1063 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1065 /** Return true iff if 'tv' is a common timeout in 'base' */
1066 static inline int
1067 is_common_timeout(const struct timeval *tv,
1068 const struct event_base *base)
1070 int idx;
1071 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1072 return 0;
1073 idx = COMMON_TIMEOUT_IDX(tv);
1074 return idx < base->n_common_timeouts;
1077 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1078 * one is a common timeout. */
1079 static inline int
1080 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1082 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1083 (tv2->tv_usec & ~MICROSECONDS_MASK);
1086 /** Requires that 'tv' is a common timeout. Return the corresponding
1087 * common_timeout_list. */
1088 static inline struct common_timeout_list *
1089 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1091 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1094 #if 0
1095 static inline int
1096 common_timeout_ok(const struct timeval *tv,
1097 struct event_base *base)
1099 const struct timeval *expect =
1100 &get_common_timeout_list(base, tv)->duration;
1101 return tv->tv_sec == expect->tv_sec &&
1102 tv->tv_usec == expect->tv_usec;
1104 #endif
1106 /* Add the timeout for the first event in given common timeout list to the
1107 * event_base's minheap. */
1108 static void
1109 common_timeout_schedule(struct common_timeout_list *ctl,
1110 const struct timeval *now, struct event *head)
1112 struct timeval timeout = head->ev_timeout;
1113 timeout.tv_usec &= MICROSECONDS_MASK;
1114 event_add_internal(&ctl->timeout_event, &timeout, 1);
1117 /* Callback: invoked when the timeout for a common timeout queue triggers.
1118 * This means that (at least) the first event in that queue should be run,
1119 * and the timeout should be rescheduled if there are more events. */
1120 static void
1121 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1123 struct timeval now;
1124 struct common_timeout_list *ctl = arg;
1125 struct event_base *base = ctl->base;
1126 struct event *ev = NULL;
1127 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1128 gettime(base, &now);
1129 while (1) {
1130 ev = TAILQ_FIRST(&ctl->events);
1131 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1132 (ev->ev_timeout.tv_sec == now.tv_sec &&
1133 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1134 break;
1135 event_del_internal(ev);
1136 event_active_nolock(ev, EV_TIMEOUT, 1);
1138 if (ev)
1139 common_timeout_schedule(ctl, &now, ev);
1140 EVBASE_RELEASE_LOCK(base, th_base_lock);
1143 #define MAX_COMMON_TIMEOUTS 256
1145 const struct timeval *
1146 event_base_init_common_timeout(struct event_base *base,
1147 const struct timeval *duration)
1149 int i;
1150 struct timeval tv;
1151 const struct timeval *result=NULL;
1152 struct common_timeout_list *new_ctl;
1154 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1155 if (duration->tv_usec > 1000000) {
1156 memcpy(&tv, duration, sizeof(struct timeval));
1157 if (is_common_timeout(duration, base))
1158 tv.tv_usec &= MICROSECONDS_MASK;
1159 tv.tv_sec += tv.tv_usec / 1000000;
1160 tv.tv_usec %= 1000000;
1161 duration = &tv;
1163 for (i = 0; i < base->n_common_timeouts; ++i) {
1164 const struct common_timeout_list *ctl =
1165 base->common_timeout_queues[i];
1166 if (duration->tv_sec == ctl->duration.tv_sec &&
1167 duration->tv_usec ==
1168 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1169 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1170 result = &ctl->duration;
1171 goto done;
1174 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1175 event_warnx("%s: Too many common timeouts already in use; "
1176 "we only support %d per event_base", __func__,
1177 MAX_COMMON_TIMEOUTS);
1178 goto done;
1180 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1181 int n = base->n_common_timeouts < 16 ? 16 :
1182 base->n_common_timeouts*2;
1183 struct common_timeout_list **newqueues =
1184 mm_realloc(base->common_timeout_queues,
1185 n*sizeof(struct common_timeout_queue *));
1186 if (!newqueues) {
1187 event_warn("%s: realloc",__func__);
1188 goto done;
1190 base->n_common_timeouts_allocated = n;
1191 base->common_timeout_queues = newqueues;
1193 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1194 if (!new_ctl) {
1195 event_warn("%s: calloc",__func__);
1196 goto done;
1198 TAILQ_INIT(&new_ctl->events);
1199 new_ctl->duration.tv_sec = duration->tv_sec;
1200 new_ctl->duration.tv_usec =
1201 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1202 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1203 evtimer_assign(&new_ctl->timeout_event, base,
1204 common_timeout_callback, new_ctl);
1205 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1206 event_priority_set(&new_ctl->timeout_event, 0);
1207 new_ctl->base = base;
1208 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1209 result = &new_ctl->duration;
1211 done:
1212 if (result)
1213 EVUTIL_ASSERT(is_common_timeout(result, base));
1215 EVBASE_RELEASE_LOCK(base, th_base_lock);
1216 return result;
1219 /* Closure function invoked when we're activating a persistent event. */
1220 static inline void
1221 event_persist_closure(struct event_base *base, struct event *ev)
1223 /* reschedule the persistent event if we have a timeout. */
1224 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1225 /* If there was a timeout, we want it to run at an interval of
1226 * ev_io_timeout after the last time it was _scheduled_ for,
1227 * not ev_io_timeout after _now_. If it fired for another
1228 * reason, though, the timeout ought to start ticking _now_. */
1229 struct timeval run_at;
1230 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1231 &ev->ev_io_timeout));
1232 if (is_common_timeout(&ev->ev_timeout, base)) {
1233 ev_uint32_t usec_mask;
1234 struct timeval delay, relative_to;
1235 delay = ev->ev_io_timeout;
1236 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1237 delay.tv_usec &= MICROSECONDS_MASK;
1238 if (ev->ev_res & EV_TIMEOUT) {
1239 relative_to = ev->ev_timeout;
1240 relative_to.tv_usec &= MICROSECONDS_MASK;
1241 } else {
1242 gettime(base, &relative_to);
1244 evutil_timeradd(&relative_to, &delay, &run_at);
1245 run_at.tv_usec |= usec_mask;
1246 } else {
1247 struct timeval relative_to;
1248 if (ev->ev_res & EV_TIMEOUT) {
1249 relative_to = ev->ev_timeout;
1250 } else {
1251 gettime(base, &relative_to);
1253 evutil_timeradd(&ev->ev_io_timeout, &relative_to,
1254 &run_at);
1256 event_add_internal(ev, &run_at, 1);
1258 EVBASE_RELEASE_LOCK(base, th_base_lock);
1259 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1263 Helper for event_process_active to process all the events in a single queue,
1264 releasing the lock as we go. This function requires that the lock be held
1265 when it's invoked. Returns -1 if we get a signal or an event_break that
1266 means we should stop processing any active events now. Otherwise returns
1267 the number of non-internal events that we processed.
1269 static int
1270 event_process_active_single_queue(struct event_base *base,
1271 struct event_list *activeq)
1273 struct event *ev;
1274 int count = 0;
1276 EVUTIL_ASSERT(activeq != NULL);
1278 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1279 if (ev->ev_events & EV_PERSIST)
1280 event_queue_remove(base, ev, EVLIST_ACTIVE);
1281 else
1282 event_del_internal(ev);
1283 if (!(ev->ev_flags & EVLIST_INTERNAL))
1284 ++count;
1286 event_debug((
1287 "event_process_active: event: %p, %s%scall %p",
1289 ev->ev_res & EV_READ ? "EV_READ " : " ",
1290 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1291 ev->ev_callback));
1293 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1294 base->current_event = ev;
1295 base->current_event_waiters = 0;
1296 #endif
1298 switch (ev->ev_closure) {
1299 case EV_CLOSURE_SIGNAL:
1300 event_signal_closure(base, ev);
1301 break;
1302 case EV_CLOSURE_PERSIST:
1303 event_persist_closure(base, ev);
1304 break;
1305 default:
1306 case EV_CLOSURE_NONE:
1307 EVBASE_RELEASE_LOCK(base, th_base_lock);
1308 (*ev->ev_callback)(
1309 (int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1310 break;
1313 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1314 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1315 base->current_event = NULL;
1316 if (base->current_event_waiters) {
1317 base->current_event_waiters = 0;
1318 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1320 #endif
1322 if (base->event_break)
1323 return -1;
1325 return count;
1329 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1330 *breakptr becomes set to 1, stop. Requires that we start out holding
1331 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1332 we process.
1334 static int
1335 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1337 int count = 0;
1338 struct deferred_cb *cb;
1340 #define MAX_DEFERRED 16
1341 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1342 cb->queued = 0;
1343 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1344 --queue->active_count;
1345 UNLOCK_DEFERRED_QUEUE(queue);
1347 cb->cb(cb, cb->arg);
1349 LOCK_DEFERRED_QUEUE(queue);
1350 if (*breakptr)
1351 return -1;
1352 if (++count == MAX_DEFERRED)
1353 break;
1355 #undef MAX_DEFERRED
1356 return count;
1360 * Active events are stored in priority queues. Lower priorities are always
1361 * process before higher priorities. Low priority events can starve high
1362 * priority ones.
1365 static int
1366 event_process_active(struct event_base *base)
1368 /* Caller must hold th_base_lock */
1369 struct event_list *activeq = NULL;
1370 int i, c = 0;
1372 for (i = 0; i < base->nactivequeues; ++i) {
1373 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1374 activeq = &base->activequeues[i];
1375 c = event_process_active_single_queue(base, activeq);
1376 if (c < 0)
1377 return -1;
1378 else if (c > 0)
1379 break; /* Processed a real event; do not
1380 * consider lower-priority events */
1381 /* If we get here, all of the events we processed
1382 * were internal. Continue. */
1386 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1387 return c;
1391 * Wait continuously for events. We exit only if no events are left.
1395 event_dispatch(void)
1397 return (event_loop(0));
1401 event_base_dispatch(struct event_base *event_base)
1403 return (event_base_loop(event_base, 0));
1406 const char *
1407 event_base_get_method(const struct event_base *base)
1409 EVUTIL_ASSERT(base);
1410 return (base->evsel->name);
1413 /** Callback: used to implement event_base_loopexit by telling the event_base
1414 * that it's time to exit its loop. */
1415 static void
1416 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1418 struct event_base *base = arg;
1419 base->event_gotterm = 1;
1423 event_loopexit(const struct timeval *tv)
1425 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1426 current_base, tv));
1430 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1432 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1433 event_base, tv));
1437 event_loopbreak(void)
1439 return (event_base_loopbreak(current_base));
1443 event_base_loopbreak(struct event_base *event_base)
1445 int r = 0;
1446 if (event_base == NULL)
1447 return (-1);
1449 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1450 event_base->event_break = 1;
1452 if (EVBASE_NEED_NOTIFY(event_base)) {
1453 r = evthread_notify_base(event_base);
1454 } else {
1455 r = (0);
1457 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1458 return r;
1462 event_base_got_break(struct event_base *event_base)
1464 int res;
1465 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1466 res = event_base->event_break;
1467 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1468 return res;
1472 event_base_got_exit(struct event_base *event_base)
1474 int res;
1475 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1476 res = event_base->event_gotterm;
1477 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1478 return res;
1481 /* not thread safe */
1484 event_loop(int flags)
1486 return event_base_loop(current_base, flags);
1490 event_base_loop(struct event_base *base, int flags)
1492 const struct eventop *evsel = base->evsel;
1493 struct timeval tv;
1494 struct timeval *tv_p;
1495 int res, done, retval = 0;
1497 /* Grab the lock. We will release it inside evsel.dispatch, and again
1498 * as we invoke user callbacks. */
1499 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1501 if (base->running_loop) {
1502 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1503 " can run on each event_base at once.", __func__);
1504 EVBASE_RELEASE_LOCK(base, th_base_lock);
1505 return -1;
1508 base->running_loop = 1;
1510 clear_time_cache(base);
1512 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1513 evsig_set_base(base);
1515 done = 0;
1517 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1518 base->th_owner_id = EVTHREAD_GET_ID();
1519 #endif
1521 base->event_gotterm = base->event_break = 0;
1523 while (!done) {
1524 /* Terminate the loop if we have been asked to */
1525 if (base->event_gotterm) {
1526 break;
1529 if (base->event_break) {
1530 break;
1533 timeout_correct(base, &tv);
1535 tv_p = &tv;
1536 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1537 timeout_next(base, &tv_p);
1538 } else {
1540 * if we have active events, we just poll new events
1541 * without waiting.
1543 evutil_timerclear(&tv);
1546 /* If we have no events, we just exit */
1547 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1548 event_debug(("%s: no events registered.", __func__));
1549 retval = 1;
1550 goto done;
1553 /* update last old time */
1554 gettime(base, &base->event_tv);
1556 clear_time_cache(base);
1558 res = evsel->dispatch(base, tv_p);
1560 if (res == -1) {
1561 event_debug(("%s: dispatch returned unsuccessfully.",
1562 __func__));
1563 retval = -1;
1564 goto done;
1567 update_time_cache(base);
1569 timeout_process(base);
1571 if (N_ACTIVE_CALLBACKS(base)) {
1572 int n = event_process_active(base);
1573 if ((flags & EVLOOP_ONCE)
1574 && N_ACTIVE_CALLBACKS(base) == 0
1575 && n != 0)
1576 done = 1;
1577 } else if (flags & EVLOOP_NONBLOCK)
1578 done = 1;
1580 event_debug(("%s: asked to terminate loop.", __func__));
1582 done:
1583 clear_time_cache(base);
1584 base->running_loop = 0;
1586 EVBASE_RELEASE_LOCK(base, th_base_lock);
1588 return (retval);
1591 /* Sets up an event for processing once */
1592 struct event_once {
1593 struct event ev;
1595 void (*cb)(evutil_socket_t, short, void *);
1596 void *arg;
1599 /* One-time callback to implement event_base_once: invokes the user callback,
1600 * then deletes the allocated storage */
1601 static void
1602 event_once_cb(evutil_socket_t fd, short events, void *arg)
1604 struct event_once *eonce = arg;
1606 (*eonce->cb)(fd, events, eonce->arg);
1607 event_debug_unassign(&eonce->ev);
1608 mm_free(eonce);
1611 /* not threadsafe, event scheduled once. */
1613 event_once(evutil_socket_t fd, short events,
1614 void (*callback)(evutil_socket_t, short, void *),
1615 void *arg, const struct timeval *tv)
1617 return event_base_once(current_base, fd, events, callback, arg, tv);
1620 /* Schedules an event once */
1622 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1623 void (*callback)(evutil_socket_t, short, void *),
1624 void *arg, const struct timeval *tv)
1626 struct event_once *eonce;
1627 struct timeval etv;
1628 int res = 0;
1630 /* We cannot support signals that just fire once, or persistent
1631 * events. */
1632 if (events & (EV_SIGNAL|EV_PERSIST))
1633 return (-1);
1635 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1636 return (-1);
1638 eonce->cb = callback;
1639 eonce->arg = arg;
1641 if (events == EV_TIMEOUT) {
1642 if (tv == NULL) {
1643 evutil_timerclear(&etv);
1644 tv = &etv;
1647 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1648 } else if (events & (EV_READ|EV_WRITE)) {
1649 events &= EV_READ|EV_WRITE;
1651 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1652 } else {
1653 /* Bad event combination */
1654 mm_free(eonce);
1655 return (-1);
1658 if (res == 0)
1659 res = event_add(&eonce->ev, tv);
1660 if (res != 0) {
1661 mm_free(eonce);
1662 return (res);
1665 return (0);
1669 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1671 if (!base)
1672 base = current_base;
1674 _event_debug_assert_not_added(ev);
1676 ev->ev_base = base;
1678 ev->ev_callback = callback;
1679 ev->ev_arg = arg;
1680 ev->ev_fd = fd;
1681 ev->ev_events = events;
1682 ev->ev_res = 0;
1683 ev->ev_flags = EVLIST_INIT;
1684 ev->ev_ncalls = 0;
1685 ev->ev_pncalls = NULL;
1687 if (events & EV_SIGNAL) {
1688 if ((events & (EV_READ|EV_WRITE)) != 0) {
1689 event_warnx("%s: EV_SIGNAL is not compatible with "
1690 "EV_READ or EV_WRITE", __func__);
1691 return -1;
1693 ev->ev_closure = EV_CLOSURE_SIGNAL;
1694 } else {
1695 if (events & EV_PERSIST) {
1696 evutil_timerclear(&ev->ev_io_timeout);
1697 ev->ev_closure = EV_CLOSURE_PERSIST;
1698 } else {
1699 ev->ev_closure = EV_CLOSURE_NONE;
1703 min_heap_elem_init(ev);
1705 if (base != NULL) {
1706 /* by default, we put new events into the middle priority */
1707 ev->ev_pri = base->nactivequeues / 2;
1710 _event_debug_note_setup(ev);
1712 return 0;
1716 event_base_set(struct event_base *base, struct event *ev)
1718 /* Only innocent events may be assigned to a different base */
1719 if (ev->ev_flags != EVLIST_INIT)
1720 return (-1);
1722 _event_debug_assert_is_setup(ev);
1724 ev->ev_base = base;
1725 ev->ev_pri = base->nactivequeues/2;
1727 return (0);
1730 void
1731 event_set(struct event *ev, evutil_socket_t fd, short events,
1732 void (*callback)(evutil_socket_t, short, void *), void *arg)
1734 int r;
1735 r = event_assign(ev, current_base, fd, events, callback, arg);
1736 EVUTIL_ASSERT(r == 0);
1739 struct event *
1740 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1742 struct event *ev;
1743 ev = mm_malloc(sizeof(struct event));
1744 if (ev == NULL)
1745 return (NULL);
1746 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1747 mm_free(ev);
1748 return (NULL);
1751 return (ev);
1754 void
1755 event_free(struct event *ev)
1757 _event_debug_assert_is_setup(ev);
1759 /* make sure that this event won't be coming back to haunt us. */
1760 event_del(ev);
1761 _event_debug_note_teardown(ev);
1762 mm_free(ev);
1766 void
1767 event_debug_unassign(struct event *ev)
1769 _event_debug_assert_not_added(ev);
1770 _event_debug_note_teardown(ev);
1772 ev->ev_flags &= ~EVLIST_INIT;
1776 * Set's the priority of an event - if an event is already scheduled
1777 * changing the priority is going to fail.
1781 event_priority_set(struct event *ev, int pri)
1783 _event_debug_assert_is_setup(ev);
1785 if (ev->ev_flags & EVLIST_ACTIVE)
1786 return (-1);
1787 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1788 return (-1);
1790 ev->ev_pri = pri;
1792 return (0);
1796 * Checks if a specific event is pending or scheduled.
1800 event_pending(const struct event *ev, short event, struct timeval *tv)
1802 int flags = 0;
1804 _event_debug_assert_is_setup(ev);
1806 if (ev->ev_flags & EVLIST_INSERTED)
1807 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1808 if (ev->ev_flags & EVLIST_ACTIVE)
1809 flags |= ev->ev_res;
1810 if (ev->ev_flags & EVLIST_TIMEOUT)
1811 flags |= EV_TIMEOUT;
1813 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1815 /* See if there is a timeout that we should report */
1816 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1817 struct timeval tmp = ev->ev_timeout;
1818 tmp.tv_usec &= MICROSECONDS_MASK;
1819 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1820 /* correctly remamp to real time */
1821 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
1822 #else
1823 *tv = tmp;
1824 #endif
1827 return (flags & event);
1831 event_initialized(const struct event *ev)
1833 if (!(ev->ev_flags & EVLIST_INIT))
1834 return 0;
1836 return 1;
1839 void
1840 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1842 _event_debug_assert_is_setup(event);
1844 if (base_out)
1845 *base_out = event->ev_base;
1846 if (fd_out)
1847 *fd_out = event->ev_fd;
1848 if (events_out)
1849 *events_out = event->ev_events;
1850 if (callback_out)
1851 *callback_out = event->ev_callback;
1852 if (arg_out)
1853 *arg_out = event->ev_arg;
1856 size_t
1857 event_get_struct_event_size(void)
1859 return sizeof(struct event);
1862 evutil_socket_t
1863 event_get_fd(const struct event *ev)
1865 _event_debug_assert_is_setup(ev);
1866 return ev->ev_fd;
1869 struct event_base *
1870 event_get_base(const struct event *ev)
1872 _event_debug_assert_is_setup(ev);
1873 return ev->ev_base;
1876 short
1877 event_get_events(const struct event *ev)
1879 _event_debug_assert_is_setup(ev);
1880 return ev->ev_events;
1883 event_callback_fn
1884 event_get_callback(const struct event *ev)
1886 _event_debug_assert_is_setup(ev);
1887 return ev->ev_callback;
1890 void *
1891 event_get_callback_arg(const struct event *ev)
1893 _event_debug_assert_is_setup(ev);
1894 return ev->ev_arg;
1898 event_add(struct event *ev, const struct timeval *tv)
1900 int res;
1902 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1903 event_warnx("%s: event has no event_base set.", __func__);
1904 return -1;
1907 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1909 res = event_add_internal(ev, tv, 0);
1911 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1913 return (res);
1916 /* Helper callback: wake an event_base from another thread. This version
1917 * works by writing a byte to one end of a socketpair, so that the event_base
1918 * listening on the other end will wake up as the corresponding event
1919 * triggers */
1920 static int
1921 evthread_notify_base_default(struct event_base *base)
1923 char buf[1];
1924 int r;
1925 buf[0] = (char) 0;
1926 #ifdef WIN32
1927 r = send(base->th_notify_fd[1], buf, 1, 0);
1928 #else
1929 r = write(base->th_notify_fd[1], buf, 1);
1930 #endif
1931 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1934 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1935 /* Helper callback: wake an event_base from another thread. This version
1936 * assumes that you have a working eventfd() implementation. */
1937 static int
1938 evthread_notify_base_eventfd(struct event_base *base)
1940 ev_uint64_t msg = 1;
1941 int r;
1942 do {
1943 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
1944 } while (r < 0 && errno == EAGAIN);
1946 return (r < 0) ? -1 : 0;
1948 #endif
1950 /** Tell the thread currently running the event_loop for base (if any) that it
1951 * needs to stop waiting in its dispatch function (if it is) and process all
1952 * active events and deferred callbacks (if there are any). */
1953 static int
1954 evthread_notify_base(struct event_base *base)
1956 EVENT_BASE_ASSERT_LOCKED(base);
1957 if (!base->th_notify_fn)
1958 return -1;
1959 if (base->is_notify_pending)
1960 return 0;
1961 base->is_notify_pending = 1;
1962 return base->th_notify_fn(base);
1965 /* Implementation function to add an event. Works just like event_add,
1966 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
1967 * we treat tv as an absolute time, not as an interval to add to the current
1968 * time */
1969 static inline int
1970 event_add_internal(struct event *ev, const struct timeval *tv,
1971 int tv_is_absolute)
1973 struct event_base *base = ev->ev_base;
1974 int res = 0;
1975 int notify = 0;
1977 EVENT_BASE_ASSERT_LOCKED(base);
1978 _event_debug_assert_is_setup(ev);
1980 event_debug((
1981 "event_add: event: %p (fd %d), %s%s%scall %p",
1983 (int)ev->ev_fd,
1984 ev->ev_events & EV_READ ? "EV_READ " : " ",
1985 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
1986 tv ? "EV_TIMEOUT " : " ",
1987 ev->ev_callback));
1989 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
1992 * prepare for timeout insertion further below, if we get a
1993 * failure on any step, we should not change any state.
1995 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
1996 if (min_heap_reserve(&base->timeheap,
1997 1 + min_heap_size(&base->timeheap)) == -1)
1998 return (-1); /* ENOMEM == errno */
2001 /* If the main thread is currently executing a signal event's
2002 * callback, and we are not the main thread, then we want to wait
2003 * until the callback is done before we mess with the event, or else
2004 * we can race on ev_ncalls and ev_pncalls below. */
2005 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2006 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
2007 && !EVBASE_IN_THREAD(base)) {
2008 ++base->current_event_waiters;
2009 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2011 #endif
2013 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
2014 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
2015 if (ev->ev_events & (EV_READ|EV_WRITE))
2016 res = evmap_io_add(base, ev->ev_fd, ev);
2017 else if (ev->ev_events & EV_SIGNAL)
2018 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
2019 if (res != -1)
2020 event_queue_insert(base, ev, EVLIST_INSERTED);
2021 if (res == 1) {
2022 /* evmap says we need to notify the main thread. */
2023 notify = 1;
2024 res = 0;
2029 * we should change the timeout state only if the previous event
2030 * addition succeeded.
2032 if (res != -1 && tv != NULL) {
2033 struct timeval now;
2034 int common_timeout;
2037 * for persistent timeout events, we remember the
2038 * timeout value and re-add the event.
2040 * If tv_is_absolute, this was already set.
2042 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2043 ev->ev_io_timeout = *tv;
2046 * we already reserved memory above for the case where we
2047 * are not replacing an existing timeout.
2049 if (ev->ev_flags & EVLIST_TIMEOUT) {
2050 /* XXX I believe this is needless. */
2051 if (min_heap_elt_is_top(ev))
2052 notify = 1;
2053 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2056 /* Check if it is active due to a timeout. Rescheduling
2057 * this timeout before the callback can be executed
2058 * removes it from the active list. */
2059 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2060 (ev->ev_res & EV_TIMEOUT)) {
2061 if (ev->ev_events & EV_SIGNAL) {
2062 /* See if we are just active executing
2063 * this event in a loop
2065 if (ev->ev_ncalls && ev->ev_pncalls) {
2066 /* Abort loop */
2067 *ev->ev_pncalls = 0;
2071 event_queue_remove(base, ev, EVLIST_ACTIVE);
2074 gettime(base, &now);
2076 common_timeout = is_common_timeout(tv, base);
2077 if (tv_is_absolute) {
2078 ev->ev_timeout = *tv;
2079 } else if (common_timeout) {
2080 struct timeval tmp = *tv;
2081 tmp.tv_usec &= MICROSECONDS_MASK;
2082 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2083 ev->ev_timeout.tv_usec |=
2084 (tv->tv_usec & ~MICROSECONDS_MASK);
2085 } else {
2086 evutil_timeradd(&now, tv, &ev->ev_timeout);
2089 event_debug((
2090 "event_add: timeout in %d seconds, call %p",
2091 (int)tv->tv_sec, ev->ev_callback));
2093 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2094 if (common_timeout) {
2095 struct common_timeout_list *ctl =
2096 get_common_timeout_list(base, &ev->ev_timeout);
2097 if (ev == TAILQ_FIRST(&ctl->events)) {
2098 common_timeout_schedule(ctl, &now, ev);
2100 } else {
2101 /* See if the earliest timeout is now earlier than it
2102 * was before: if so, we will need to tell the main
2103 * thread to wake up earlier than it would
2104 * otherwise. */
2105 if (min_heap_elt_is_top(ev))
2106 notify = 1;
2110 /* if we are not in the right thread, we need to wake up the loop */
2111 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2112 evthread_notify_base(base);
2114 _event_debug_note_add(ev);
2116 return (res);
2120 event_del(struct event *ev)
2122 int res;
2124 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2125 event_warnx("%s: event has no event_base set.", __func__);
2126 return -1;
2129 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2131 res = event_del_internal(ev);
2133 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2135 return (res);
2138 /* Helper for event_del: always called with th_base_lock held. */
2139 static inline int
2140 event_del_internal(struct event *ev)
2142 struct event_base *base;
2143 int res = 0, notify = 0;
2145 event_debug(("event_del: %p (fd %d), callback %p",
2146 ev, (int)ev->ev_fd, ev->ev_callback));
2148 /* An event without a base has not been added */
2149 if (ev->ev_base == NULL)
2150 return (-1);
2152 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2154 /* If the main thread is currently executing this event's callback,
2155 * and we are not the main thread, then we want to wait until the
2156 * callback is done before we start removing the event. That way,
2157 * when this function returns, it will be safe to free the
2158 * user-supplied argument. */
2159 base = ev->ev_base;
2160 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2161 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2162 ++base->current_event_waiters;
2163 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2165 #endif
2167 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2169 /* See if we are just active executing this event in a loop */
2170 if (ev->ev_events & EV_SIGNAL) {
2171 if (ev->ev_ncalls && ev->ev_pncalls) {
2172 /* Abort loop */
2173 *ev->ev_pncalls = 0;
2177 if (ev->ev_flags & EVLIST_TIMEOUT) {
2178 /* NOTE: We never need to notify the main thread because of a
2179 * deleted timeout event: all that could happen if we don't is
2180 * that the dispatch loop might wake up too early. But the
2181 * point of notifying the main thread _is_ to wake up the
2182 * dispatch loop early anyway, so we wouldn't gain anything by
2183 * doing it.
2185 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2188 if (ev->ev_flags & EVLIST_ACTIVE)
2189 event_queue_remove(base, ev, EVLIST_ACTIVE);
2191 if (ev->ev_flags & EVLIST_INSERTED) {
2192 event_queue_remove(base, ev, EVLIST_INSERTED);
2193 if (ev->ev_events & (EV_READ|EV_WRITE))
2194 res = evmap_io_del(base, ev->ev_fd, ev);
2195 else
2196 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2197 if (res == 1) {
2198 /* evmap says we need to notify the main thread. */
2199 notify = 1;
2200 res = 0;
2204 /* if we are not in the right thread, we need to wake up the loop */
2205 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2206 evthread_notify_base(base);
2208 _event_debug_note_del(ev);
2210 return (res);
2213 void
2214 event_active(struct event *ev, int res, short ncalls)
2216 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2217 event_warnx("%s: event has no event_base set.", __func__);
2218 return;
2221 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2223 _event_debug_assert_is_setup(ev);
2225 event_active_nolock(ev, res, ncalls);
2227 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2231 void
2232 event_active_nolock(struct event *ev, int res, short ncalls)
2234 struct event_base *base;
2236 event_debug(("event_active: %p (fd %d), res %d, callback %p",
2237 ev, (int)ev->ev_fd, (int)res, ev->ev_callback));
2240 /* We get different kinds of events, add them together */
2241 if (ev->ev_flags & EVLIST_ACTIVE) {
2242 ev->ev_res |= res;
2243 return;
2246 base = ev->ev_base;
2248 EVENT_BASE_ASSERT_LOCKED(base);
2250 ev->ev_res = res;
2252 if (ev->ev_events & EV_SIGNAL) {
2253 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2254 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2255 ++base->current_event_waiters;
2256 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2258 #endif
2259 ev->ev_ncalls = ncalls;
2260 ev->ev_pncalls = NULL;
2263 event_queue_insert(base, ev, EVLIST_ACTIVE);
2265 if (EVBASE_NEED_NOTIFY(base))
2266 evthread_notify_base(base);
2269 void
2270 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2272 memset(cb, 0, sizeof(struct deferred_cb));
2273 cb->cb = fn;
2274 cb->arg = arg;
2277 void
2278 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2279 struct deferred_cb *cb)
2281 if (!queue) {
2282 if (current_base)
2283 queue = &current_base->defer_queue;
2284 else
2285 return;
2288 LOCK_DEFERRED_QUEUE(queue);
2289 if (cb->queued) {
2290 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2291 --queue->active_count;
2292 cb->queued = 0;
2294 UNLOCK_DEFERRED_QUEUE(queue);
2297 void
2298 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2299 struct deferred_cb *cb)
2301 if (!queue) {
2302 if (current_base)
2303 queue = &current_base->defer_queue;
2304 else
2305 return;
2308 LOCK_DEFERRED_QUEUE(queue);
2309 if (!cb->queued) {
2310 cb->queued = 1;
2311 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2312 ++queue->active_count;
2313 if (queue->notify_fn)
2314 queue->notify_fn(queue, queue->notify_arg);
2316 UNLOCK_DEFERRED_QUEUE(queue);
2319 static int
2320 timeout_next(struct event_base *base, struct timeval **tv_p)
2322 /* Caller must hold th_base_lock */
2323 struct timeval now;
2324 struct event *ev;
2325 struct timeval *tv = *tv_p;
2326 int res = 0;
2328 ev = min_heap_top(&base->timeheap);
2330 if (ev == NULL) {
2331 /* if no time-based events are active wait for I/O */
2332 *tv_p = NULL;
2333 goto out;
2336 if (gettime(base, &now) == -1) {
2337 res = -1;
2338 goto out;
2341 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2342 evutil_timerclear(tv);
2343 goto out;
2346 evutil_timersub(&ev->ev_timeout, &now, tv);
2348 EVUTIL_ASSERT(tv->tv_sec >= 0);
2349 EVUTIL_ASSERT(tv->tv_usec >= 0);
2350 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2352 out:
2353 return (res);
2357 * Determines if the time is running backwards by comparing the current time
2358 * against the last time we checked. Not needed when using clock monotonic.
2359 * If time is running backwards, we adjust the firing time of every event by
2360 * the amount that time seems to have jumped.
2362 static void
2363 timeout_correct(struct event_base *base, struct timeval *tv)
2365 /* Caller must hold th_base_lock. */
2366 struct event **pev;
2367 unsigned int size;
2368 struct timeval off;
2369 int i;
2371 if (use_monotonic)
2372 return;
2374 /* Check if time is running backwards */
2375 gettime(base, tv);
2377 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2378 base->event_tv = *tv;
2379 return;
2382 event_debug(("%s: time is running backwards, corrected",
2383 __func__));
2384 evutil_timersub(&base->event_tv, tv, &off);
2387 * We can modify the key element of the node without destroying
2388 * the minheap property, because we change every element.
2390 pev = base->timeheap.p;
2391 size = base->timeheap.n;
2392 for (; size-- > 0; ++pev) {
2393 struct timeval *ev_tv = &(**pev).ev_timeout;
2394 evutil_timersub(ev_tv, &off, ev_tv);
2396 for (i=0; i<base->n_common_timeouts; ++i) {
2397 struct event *ev;
2398 struct common_timeout_list *ctl =
2399 base->common_timeout_queues[i];
2400 TAILQ_FOREACH(ev, &ctl->events,
2401 ev_timeout_pos.ev_next_with_common_timeout) {
2402 struct timeval *ev_tv = &ev->ev_timeout;
2403 ev_tv->tv_usec &= MICROSECONDS_MASK;
2404 evutil_timersub(ev_tv, &off, ev_tv);
2405 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2406 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2410 /* Now remember what the new time turned out to be. */
2411 base->event_tv = *tv;
2414 /* Activate every event whose timeout has elapsed. */
2415 static void
2416 timeout_process(struct event_base *base)
2418 /* Caller must hold lock. */
2419 struct timeval now;
2420 struct event *ev;
2422 if (min_heap_empty(&base->timeheap)) {
2423 return;
2426 gettime(base, &now);
2428 while ((ev = min_heap_top(&base->timeheap))) {
2429 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2430 break;
2432 /* delete this event from the I/O queues */
2433 event_del_internal(ev);
2435 event_debug(("timeout_process: call %p",
2436 ev->ev_callback));
2437 event_active_nolock(ev, EV_TIMEOUT, 1);
2441 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2442 static void
2443 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2445 EVENT_BASE_ASSERT_LOCKED(base);
2447 if (!(ev->ev_flags & queue)) {
2448 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
2449 ev, ev->ev_fd, queue);
2450 return;
2453 if (~ev->ev_flags & EVLIST_INTERNAL)
2454 base->event_count--;
2456 ev->ev_flags &= ~queue;
2457 switch (queue) {
2458 case EVLIST_INSERTED:
2459 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2460 break;
2461 case EVLIST_ACTIVE:
2462 base->event_count_active--;
2463 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2464 ev, ev_active_next);
2465 break;
2466 case EVLIST_TIMEOUT:
2467 if (is_common_timeout(&ev->ev_timeout, base)) {
2468 struct common_timeout_list *ctl =
2469 get_common_timeout_list(base, &ev->ev_timeout);
2470 TAILQ_REMOVE(&ctl->events, ev,
2471 ev_timeout_pos.ev_next_with_common_timeout);
2472 } else {
2473 min_heap_erase(&base->timeheap, ev);
2475 break;
2476 default:
2477 event_errx(1, "%s: unknown queue %x", __func__, queue);
2481 /* Add 'ev' to the common timeout list in 'ev'. */
2482 static void
2483 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2484 struct event *ev)
2486 struct event *e;
2487 /* By all logic, we should just be able to append 'ev' to the end of
2488 * ctl->events, since the timeout on each 'ev' is set to {the common
2489 * timeout} + {the time when we add the event}, and so the events
2490 * should arrive in order of their timeeouts. But just in case
2491 * there's some wacky threading issue going on, we do a search from
2492 * the end of 'ev' to find the right insertion point.
2494 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2495 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2496 /* This timercmp is a little sneaky, since both ev and e have
2497 * magic values in tv_usec. Fortunately, they ought to have
2498 * the _same_ magic values in tv_usec. Let's assert for that.
2500 EVUTIL_ASSERT(
2501 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2502 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2503 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2504 ev_timeout_pos.ev_next_with_common_timeout);
2505 return;
2508 TAILQ_INSERT_HEAD(&ctl->events, ev,
2509 ev_timeout_pos.ev_next_with_common_timeout);
2512 static void
2513 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2515 EVENT_BASE_ASSERT_LOCKED(base);
2517 if (ev->ev_flags & queue) {
2518 /* Double insertion is possible for active events */
2519 if (queue & EVLIST_ACTIVE)
2520 return;
2522 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
2523 ev, ev->ev_fd, queue);
2524 return;
2527 if (~ev->ev_flags & EVLIST_INTERNAL)
2528 base->event_count++;
2530 ev->ev_flags |= queue;
2531 switch (queue) {
2532 case EVLIST_INSERTED:
2533 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2534 break;
2535 case EVLIST_ACTIVE:
2536 base->event_count_active++;
2537 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2538 ev,ev_active_next);
2539 break;
2540 case EVLIST_TIMEOUT: {
2541 if (is_common_timeout(&ev->ev_timeout, base)) {
2542 struct common_timeout_list *ctl =
2543 get_common_timeout_list(base, &ev->ev_timeout);
2544 insert_common_timeout_inorder(ctl, ev);
2545 } else
2546 min_heap_push(&base->timeheap, ev);
2547 break;
2549 default:
2550 event_errx(1, "%s: unknown queue %x", __func__, queue);
2554 /* Functions for debugging */
2556 const char *
2557 event_get_version(void)
2559 return (_EVENT_VERSION);
2562 ev_uint32_t
2563 event_get_version_number(void)
2565 return (_EVENT_NUMERIC_VERSION);
2569 * No thread-safe interface needed - the information should be the same
2570 * for all threads.
2573 const char *
2574 event_get_method(void)
2576 return (current_base->evsel->name);
2579 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2580 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2581 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2582 static void (*_mm_free_fn)(void *p) = NULL;
2584 void *
2585 event_mm_malloc_(size_t sz)
2587 if (_mm_malloc_fn)
2588 return _mm_malloc_fn(sz);
2589 else
2590 return malloc(sz);
2593 void *
2594 event_mm_calloc_(size_t count, size_t size)
2596 if (_mm_malloc_fn) {
2597 size_t sz = count * size;
2598 void *p = _mm_malloc_fn(sz);
2599 if (p)
2600 memset(p, 0, sz);
2601 return p;
2602 } else
2603 return calloc(count, size);
2606 char *
2607 event_mm_strdup_(const char *str)
2609 if (_mm_malloc_fn) {
2610 size_t ln = strlen(str);
2611 void *p = _mm_malloc_fn(ln+1);
2612 if (p)
2613 memcpy(p, str, ln+1);
2614 return p;
2615 } else
2616 #ifdef WIN32
2617 return _strdup(str);
2618 #else
2619 return strdup(str);
2620 #endif
2623 void *
2624 event_mm_realloc_(void *ptr, size_t sz)
2626 if (_mm_realloc_fn)
2627 return _mm_realloc_fn(ptr, sz);
2628 else
2629 return realloc(ptr, sz);
2632 void
2633 event_mm_free_(void *ptr)
2635 if (_mm_free_fn)
2636 _mm_free_fn(ptr);
2637 else
2638 free(ptr);
2641 void
2642 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2643 void *(*realloc_fn)(void *ptr, size_t sz),
2644 void (*free_fn)(void *ptr))
2646 _mm_malloc_fn = malloc_fn;
2647 _mm_realloc_fn = realloc_fn;
2648 _mm_free_fn = free_fn;
2650 #endif
2652 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2653 static void
2654 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2656 ev_uint64_t msg;
2657 ev_ssize_t r;
2658 struct event_base *base = arg;
2660 r = read(fd, (void*) &msg, sizeof(msg));
2661 if (r<0 && errno != EAGAIN) {
2662 event_sock_warn(fd, "Error reading from eventfd");
2664 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2665 base->is_notify_pending = 0;
2666 EVBASE_RELEASE_LOCK(base, th_base_lock);
2668 #endif
2670 static void
2671 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2673 unsigned char buf[1024];
2674 struct event_base *base = arg;
2675 #ifdef WIN32
2676 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2678 #else
2679 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2681 #endif
2683 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2684 base->is_notify_pending = 0;
2685 EVBASE_RELEASE_LOCK(base, th_base_lock);
2689 evthread_make_base_notifiable(struct event_base *base)
2691 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2692 int (*notify)(struct event_base *) = evthread_notify_base_default;
2694 /* XXXX grab the lock here? */
2695 if (!base)
2696 return -1;
2698 if (base->th_notify_fd[0] >= 0)
2699 return 0;
2701 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2702 #ifndef EFD_CLOEXEC
2703 #define EFD_CLOEXEC 0
2704 #endif
2705 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2706 if (base->th_notify_fd[0] >= 0) {
2707 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2708 notify = evthread_notify_base_eventfd;
2709 cb = evthread_notify_drain_eventfd;
2711 #endif
2712 #if defined(_EVENT_HAVE_PIPE)
2713 if (base->th_notify_fd[0] < 0) {
2714 if ((base->evsel->features & EV_FEATURE_FDS)) {
2715 if (pipe(base->th_notify_fd) < 0) {
2716 event_warn("%s: pipe", __func__);
2717 } else {
2718 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2719 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2723 #endif
2725 #ifdef WIN32
2726 #define LOCAL_SOCKETPAIR_AF AF_INET
2727 #else
2728 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2729 #endif
2730 if (base->th_notify_fd[0] < 0) {
2731 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2732 base->th_notify_fd) == -1) {
2733 event_sock_warn(-1, "%s: socketpair", __func__);
2734 return (-1);
2735 } else {
2736 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2737 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2741 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2743 base->th_notify_fn = notify;
2746 Making the second socket nonblocking is a bit subtle, given that we
2747 ignore any EAGAIN returns when writing to it, and you don't usally
2748 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2749 then there's no need to add any more data to the buffer, since
2750 the main thread is already either about to wake up and drain it,
2751 or woken up and in the process of draining it.
2753 if (base->th_notify_fd[1] > 0)
2754 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2756 /* prepare an event that we can use for wakeup */
2757 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2758 EV_READ|EV_PERSIST, cb, base);
2760 /* we need to mark this as internal event */
2761 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2762 event_priority_set(&base->th_notify, 0);
2764 return event_add(&base->th_notify, NULL);
2767 void
2768 event_base_dump_events(struct event_base *base, FILE *output)
2770 struct event *e;
2771 int i;
2772 fprintf(output, "Inserted events:\n");
2773 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2774 fprintf(output, " %p [fd %ld]%s%s%s%s%s\n",
2775 (void*)e, (long)e->ev_fd,
2776 (e->ev_events&EV_READ)?" Read":"",
2777 (e->ev_events&EV_WRITE)?" Write":"",
2778 (e->ev_events&EV_SIGNAL)?" Signal":"",
2779 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2780 (e->ev_events&EV_PERSIST)?" Persist":"");
2783 for (i = 0; i < base->nactivequeues; ++i) {
2784 if (TAILQ_EMPTY(&base->activequeues[i]))
2785 continue;
2786 fprintf(output, "Active events [priority %d]:\n", i);
2787 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2788 fprintf(output, " %p [fd %ld]%s%s%s%s\n",
2789 (void*)e, (long)e->ev_fd,
2790 (e->ev_res&EV_READ)?" Read active":"",
2791 (e->ev_res&EV_WRITE)?" Write active":"",
2792 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2793 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2798 void
2799 event_base_add_virtual(struct event_base *base)
2801 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2802 base->virtual_event_count++;
2803 EVBASE_RELEASE_LOCK(base, th_base_lock);
2806 void
2807 event_base_del_virtual(struct event_base *base)
2809 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2810 EVUTIL_ASSERT(base->virtual_event_count > 0);
2811 base->virtual_event_count--;
2812 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2813 evthread_notify_base(base);
2814 EVBASE_RELEASE_LOCK(base, th_base_lock);