transmission: update from 2.13 to 2.22
[tomato.git] / release / src / router / libevent / event.c
blobfedb9c73cfa293adb776bc3e6815669699880f90
1 /*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
29 #ifdef WIN32
30 #include <winsock2.h>
31 #define WIN32_LEAN_AND_MEAN
32 #include <windows.h>
33 #undef WIN32_LEAN_AND_MEAN
34 #endif
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
42 #endif
43 #include <stdio.h>
44 #include <stdlib.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
46 #include <unistd.h>
47 #endif
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops;
74 #endif
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops;
77 #endif
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops;
80 #endif
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops;
83 #endif
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops;
86 #endif
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops;
89 #endif
90 #ifdef WIN32
91 extern const struct eventop win32ops;
92 #endif
94 /* Array of backends in order of preference. */
95 static const struct eventop *eventops[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
97 &evportops,
98 #endif
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
100 &kqops,
101 #endif
102 #ifdef _EVENT_HAVE_EPOLL
103 &epollops,
104 #endif
105 #ifdef _EVENT_HAVE_DEVPOLL
106 &devpollops,
107 #endif
108 #ifdef _EVENT_HAVE_POLL
109 &pollops,
110 #endif
111 #ifdef _EVENT_HAVE_SELECT
112 &selectops,
113 #endif
114 #ifdef WIN32
115 &win32ops,
116 #endif
117 NULL
120 /* Global state; deprecated */
121 struct event_base *event_global_current_base_ = NULL;
122 #define current_base event_global_current_base_
124 /* Global state */
126 static int use_monotonic;
128 /* Prototypes */
129 static inline int event_add_internal(struct event *ev,
130 const struct timeval *tv, int tv_is_absolute);
131 static inline int event_del_internal(struct event *ev);
133 static void event_queue_insert(struct event_base *, struct event *, int);
134 static void event_queue_remove(struct event_base *, struct event *, int);
135 static int event_haveevents(struct event_base *);
137 static int event_process_active(struct event_base *);
139 static int timeout_next(struct event_base *, struct timeval **);
140 static void timeout_process(struct event_base *);
141 static void timeout_correct(struct event_base *, struct timeval *);
143 static inline void event_signal_closure(struct event_base *, struct event *ev);
144 static inline void event_persist_closure(struct event_base *, struct event *ev);
146 static int evthread_notify_base(struct event_base *base);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry {
156 HT_ENTRY(event_debug_entry) node;
157 const struct event *ptr;
158 unsigned added : 1;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry *e)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
171 * just say >>6. */
172 return (u >> 6);
175 static inline int
176 eq_debug_entry(const struct event_debug_entry *a,
177 const struct event_debug_entry *b)
179 return a->ptr == b->ptr;
182 int _event_debug_mode_on = 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late = 0;
185 static void *_event_debug_map_lock = NULL;
186 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
187 HT_INITIALIZER();
189 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
190 eq_debug_entry)
191 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
192 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
194 /* Macro: record that ev is now setup (that is, ready for an add) */
195 #define _event_debug_note_setup(ev) do { \
196 if (_event_debug_mode_on) { \
197 struct event_debug_entry *dent,find; \
198 find.ptr = (ev); \
199 EVLOCK_LOCK(_event_debug_map_lock, 0); \
200 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
201 if (dent) { \
202 dent->added = 0; \
203 } else { \
204 dent = mm_malloc(sizeof(*dent)); \
205 if (!dent) \
206 event_err(1, \
207 "Out of memory in debugging code"); \
208 dent->ptr = (ev); \
209 dent->added = 0; \
210 HT_INSERT(event_debug_map, &global_debug_map, dent); \
212 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
214 event_debug_mode_too_late = 1; \
215 } while (0)
216 /* Macro: record that ev is no longer setup */
217 #define _event_debug_note_teardown(ev) do { \
218 if (_event_debug_mode_on) { \
219 struct event_debug_entry *dent,find; \
220 find.ptr = (ev); \
221 EVLOCK_LOCK(_event_debug_map_lock, 0); \
222 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
223 if (dent) \
224 mm_free(dent); \
225 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
227 event_debug_mode_too_late = 1; \
228 } while (0)
229 /* Macro: record that ev is now added */
230 #define _event_debug_note_add(ev) do { \
231 if (_event_debug_mode_on) { \
232 struct event_debug_entry *dent,find; \
233 find.ptr = (ev); \
234 EVLOCK_LOCK(_event_debug_map_lock, 0); \
235 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
236 if (dent) { \
237 dent->added = 1; \
238 } else { \
239 event_errx(_EVENT_ERR_ABORT, \
240 "%s: noting an add on a non-setup event %p", \
241 __func__, (ev)); \
243 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
245 event_debug_mode_too_late = 1; \
246 } while (0)
247 /* Macro: record that ev is no longer added */
248 #define _event_debug_note_del(ev) do { \
249 if (_event_debug_mode_on) { \
250 struct event_debug_entry *dent,find; \
251 find.ptr = (ev); \
252 EVLOCK_LOCK(_event_debug_map_lock, 0); \
253 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
254 if (dent) { \
255 dent->added = 0; \
256 } else { \
257 event_errx(_EVENT_ERR_ABORT, \
258 "%s: noting a del on a non-setup event %p", \
259 __func__, (ev)); \
261 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
263 event_debug_mode_too_late = 1; \
264 } while (0)
265 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
266 #define _event_debug_assert_is_setup(ev) do { \
267 if (_event_debug_mode_on) { \
268 struct event_debug_entry *dent,find; \
269 find.ptr = (ev); \
270 EVLOCK_LOCK(_event_debug_map_lock, 0); \
271 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
272 if (!dent) { \
273 event_errx(_EVENT_ERR_ABORT, \
274 "%s called on a non-initialized event %p", \
275 __func__, (ev)); \
277 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
279 } while (0)
280 /* Macro: assert that ev is not added (i.e., okay to tear down or set
281 * up again) */
282 #define _event_debug_assert_not_added(ev) do { \
283 if (_event_debug_mode_on) { \
284 struct event_debug_entry *dent,find; \
285 find.ptr = (ev); \
286 EVLOCK_LOCK(_event_debug_map_lock, 0); \
287 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
288 if (dent && dent->added) { \
289 event_errx(_EVENT_ERR_ABORT, \
290 "%s called on an already added event %p", \
291 __func__, (ev)); \
293 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
295 } while (0)
296 #else
297 #define _event_debug_note_setup(ev) \
298 ((void)0)
299 #define _event_debug_note_teardown(ev) \
300 ((void)0)
301 #define _event_debug_note_add(ev) \
302 ((void)0)
303 #define _event_debug_note_del(ev) \
304 ((void)0)
305 #define _event_debug_assert_is_setup(ev) \
306 ((void)0)
307 #define _event_debug_assert_not_added(ev) \
308 ((void)0)
309 #endif
311 #define EVENT_BASE_ASSERT_LOCKED(base) \
312 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
314 /* The first time this function is called, it sets use_monotonic to 1
315 * if we have a clock function that supports monotonic time */
316 static void
317 detect_monotonic(void)
319 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
320 struct timespec ts;
321 static int use_monotonic_initialized = 0;
323 if (use_monotonic_initialized)
324 return;
326 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
327 use_monotonic = 1;
329 use_monotonic_initialized = 1;
330 #endif
333 /** Set 'tp' to the current time according to 'base'. We must hold the lock
334 * on 'base'. If there is a cached time, return it. Otherwise, use
335 * clock_gettime or gettimeofday as appropriate to find out the right time.
336 * Return 0 on success, -1 on failure.
338 static int
339 gettime(struct event_base *base, struct timeval *tp)
341 EVENT_BASE_ASSERT_LOCKED(base);
343 if (base->tv_cache.tv_sec) {
344 *tp = base->tv_cache;
345 return (0);
348 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
349 if (use_monotonic) {
350 struct timespec ts;
352 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
353 return (-1);
355 tp->tv_sec = ts.tv_sec;
356 tp->tv_usec = ts.tv_nsec / 1000;
357 return (0);
359 #endif
361 return (evutil_gettimeofday(tp, NULL));
365 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
367 int r;
368 if (!base) {
369 base = current_base;
370 if (!current_base)
371 return evutil_gettimeofday(tv, NULL);
374 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
375 r = gettime(base, tv);
376 EVBASE_RELEASE_LOCK(base, th_base_lock);
377 return r;
380 /** Make 'base' have no current cached time. */
381 static inline void
382 clear_time_cache(struct event_base *base)
384 base->tv_cache.tv_sec = 0;
387 /** Replace the cached time in 'base' with the current time. */
388 static inline void
389 update_time_cache(struct event_base *base)
391 base->tv_cache.tv_sec = 0;
392 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
393 gettime(base, &base->tv_cache);
396 struct event_base *
397 event_init(void)
399 struct event_base *base = event_base_new_with_config(NULL);
401 if (base == NULL) {
402 event_errx(1, "%s: Unable to construct event_base", __func__);
403 return NULL;
406 current_base = base;
408 return (base);
411 struct event_base *
412 event_base_new(void)
414 struct event_base *base = NULL;
415 struct event_config *cfg = event_config_new();
416 if (cfg) {
417 base = event_base_new_with_config(cfg);
418 event_config_free(cfg);
420 return base;
423 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
424 * avoid. */
425 static int
426 event_config_is_avoided_method(const struct event_config *cfg,
427 const char *method)
429 struct event_config_entry *entry;
431 TAILQ_FOREACH(entry, &cfg->entries, next) {
432 if (entry->avoid_method != NULL &&
433 strcmp(entry->avoid_method, method) == 0)
434 return (1);
437 return (0);
440 /** Return true iff 'method' is disabled according to the environment. */
441 static int
442 event_is_method_disabled(const char *name)
444 char environment[64];
445 int i;
447 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
448 for (i = 8; environment[i] != '\0'; ++i)
449 environment[i] = EVUTIL_TOUPPER(environment[i]);
450 /* Note that evutil_getenv() ignores the environment entirely if
451 * we're setuid */
452 return (evutil_getenv(environment) != NULL);
456 event_base_get_features(const struct event_base *base)
458 return base->evsel->features;
461 void
462 event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
464 memset(cb, 0, sizeof(struct deferred_cb_queue));
465 TAILQ_INIT(&cb->deferred_cb_list);
468 /** Helper for the deferred_cb queue: wake up the event base. */
469 static void
470 notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
472 struct event_base *base = baseptr;
473 if (EVBASE_NEED_NOTIFY(base))
474 evthread_notify_base(base);
477 struct deferred_cb_queue *
478 event_base_get_deferred_cb_queue(struct event_base *base)
480 return base ? &base->defer_queue : NULL;
483 void
484 event_enable_debug_mode(void)
486 #ifndef _EVENT_DISABLE_DEBUG_MODE
487 if (_event_debug_mode_on)
488 event_errx(1, "%s was called twice!", __func__);
489 if (event_debug_mode_too_late)
490 event_errx(1, "%s must be called *before* creating any events "
491 "or event_bases",__func__);
493 _event_debug_mode_on = 1;
495 HT_INIT(event_debug_map, &global_debug_map);
497 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
498 #endif
501 #if 0
502 void
503 event_disable_debug_mode(void)
505 struct event_debug_entry **ent, *victim;
507 EVLOCK_LOCK(_event_debug_map_lock, 0);
508 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
509 victim = *ent;
510 ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
511 mm_free(victim);
513 HT_CLEAR(event_debug_map, &global_debug_map);
514 EVLOCK_UNLOCK(_event_debug_map_lock , 0);
516 #endif
518 struct event_base *
519 event_base_new_with_config(const struct event_config *cfg)
521 int i;
522 struct event_base *base;
523 int should_check_environment;
525 #ifndef _EVENT_DISABLE_DEBUG_MODE
526 event_debug_mode_too_late = 1;
527 if (_event_debug_mode_on && !_event_debug_map_lock) {
528 EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
530 #endif
532 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
533 event_warn("%s: calloc", __func__);
534 return NULL;
536 detect_monotonic();
537 gettime(base, &base->event_tv);
539 min_heap_ctor(&base->timeheap);
540 TAILQ_INIT(&base->eventqueue);
541 base->sig.ev_signal_pair[0] = -1;
542 base->sig.ev_signal_pair[1] = -1;
543 base->th_notify_fd[0] = -1;
544 base->th_notify_fd[1] = -1;
546 event_deferred_cb_queue_init(&base->defer_queue);
547 base->defer_queue.notify_fn = notify_base_cbq_callback;
548 base->defer_queue.notify_arg = base;
549 if (cfg)
550 base->flags = cfg->flags;
552 evmap_io_initmap(&base->io);
553 evmap_signal_initmap(&base->sigmap);
554 event_changelist_init(&base->changelist);
556 base->evbase = NULL;
558 should_check_environment =
559 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
561 for (i = 0; eventops[i] && !base->evbase; i++) {
562 if (cfg != NULL) {
563 /* determine if this backend should be avoided */
564 if (event_config_is_avoided_method(cfg,
565 eventops[i]->name))
566 continue;
567 if ((eventops[i]->features & cfg->require_features)
568 != cfg->require_features)
569 continue;
572 /* also obey the environment variables */
573 if (should_check_environment &&
574 event_is_method_disabled(eventops[i]->name))
575 continue;
577 base->evsel = eventops[i];
579 base->evbase = base->evsel->init(base);
582 if (base->evbase == NULL) {
583 event_warnx("%s: no event mechanism available",
584 __func__);
585 event_base_free(base);
586 return NULL;
589 if (evutil_getenv("EVENT_SHOW_METHOD"))
590 event_msgx("libevent using: %s", base->evsel->name);
592 /* allocate a single active event queue */
593 if (event_base_priority_init(base, 1) < 0) {
594 event_base_free(base);
595 return NULL;
598 /* prepare for threading */
600 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
601 if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
602 int r;
603 EVTHREAD_ALLOC_LOCK(base->th_base_lock,
604 EVTHREAD_LOCKTYPE_RECURSIVE);
605 base->defer_queue.lock = base->th_base_lock;
606 EVTHREAD_ALLOC_COND(base->current_event_cond);
607 r = evthread_make_base_notifiable(base);
608 if (r<0) {
609 event_base_free(base);
610 return NULL;
613 #endif
615 #ifdef WIN32
616 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
617 event_base_start_iocp(base, cfg->n_cpus_hint);
618 #endif
620 return (base);
624 event_base_start_iocp(struct event_base *base, int n_cpus)
626 #ifdef WIN32
627 if (base->iocp)
628 return 0;
629 base->iocp = event_iocp_port_launch(n_cpus);
630 if (!base->iocp) {
631 event_warnx("%s: Couldn't launch IOCP", __func__);
632 return -1;
634 return 0;
635 #else
636 return -1;
637 #endif
640 void
641 event_base_stop_iocp(struct event_base *base)
643 #ifdef WIN32
644 int rv;
646 if (!base->iocp)
647 return;
648 rv = event_iocp_shutdown(base->iocp, -1);
649 EVUTIL_ASSERT(rv >= 0);
650 base->iocp = NULL;
651 #endif
654 void
655 event_base_free(struct event_base *base)
657 int i, n_deleted=0;
658 struct event *ev;
659 /* XXXX grab the lock? If there is contention when one thread frees
660 * the base, then the contending thread will be very sad soon. */
662 if (base == NULL && current_base)
663 base = current_base;
664 if (base == current_base)
665 current_base = NULL;
667 /* XXX(niels) - check for internal events first */
668 EVUTIL_ASSERT(base);
670 #ifdef WIN32
671 event_base_stop_iocp(base);
672 #endif
674 /* threading fds if we have them */
675 if (base->th_notify_fd[0] != -1) {
676 event_del(&base->th_notify);
677 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
678 if (base->th_notify_fd[1] != -1)
679 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
680 base->th_notify_fd[0] = -1;
681 base->th_notify_fd[1] = -1;
682 event_debug_unassign(&base->th_notify);
685 /* Delete all non-internal events. */
686 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
687 struct event *next = TAILQ_NEXT(ev, ev_next);
688 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
689 event_del(ev);
690 ++n_deleted;
692 ev = next;
694 while ((ev = min_heap_top(&base->timeheap)) != NULL) {
695 event_del(ev);
696 ++n_deleted;
698 for (i = 0; i < base->n_common_timeouts; ++i) {
699 struct common_timeout_list *ctl =
700 base->common_timeout_queues[i];
701 event_del(&ctl->timeout_event); /* Internal; doesn't count */
702 event_debug_unassign(&ctl->timeout_event);
703 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
704 struct event *next = TAILQ_NEXT(ev,
705 ev_timeout_pos.ev_next_with_common_timeout);
706 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
707 event_del(ev);
708 ++n_deleted;
710 ev = next;
712 mm_free(ctl);
714 if (base->common_timeout_queues)
715 mm_free(base->common_timeout_queues);
717 for (i = 0; i < base->nactivequeues; ++i) {
718 for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
719 struct event *next = TAILQ_NEXT(ev, ev_active_next);
720 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
721 event_del(ev);
722 ++n_deleted;
724 ev = next;
728 if (n_deleted)
729 event_debug(("%s: %d events were still set in base",
730 __func__, n_deleted));
732 if (base->evsel != NULL && base->evsel->dealloc != NULL)
733 base->evsel->dealloc(base);
735 for (i = 0; i < base->nactivequeues; ++i)
736 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
738 EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
739 min_heap_dtor(&base->timeheap);
741 mm_free(base->activequeues);
743 EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
745 evmap_io_clear(&base->io);
746 evmap_signal_clear(&base->sigmap);
747 event_changelist_freemem(&base->changelist);
749 EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
750 EVTHREAD_FREE_COND(base->current_event_cond);
752 mm_free(base);
755 /* reinitialize the event base after a fork */
757 event_reinit(struct event_base *base)
759 const struct eventop *evsel;
760 int res = 0;
761 struct event *ev;
762 int was_notifiable = 0;
764 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
766 evsel = base->evsel;
768 #if 0
769 /* Right now, reinit always takes effect, since even if the
770 backend doesn't require it, the signal socketpair code does.
774 /* check if this event mechanism requires reinit */
775 if (!evsel->need_reinit)
776 goto done;
777 #endif
779 /* prevent internal delete */
780 if (base->sig.ev_signal_added) {
781 /* we cannot call event_del here because the base has
782 * not been reinitialized yet. */
783 event_queue_remove(base, &base->sig.ev_signal,
784 EVLIST_INSERTED);
785 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
786 event_queue_remove(base, &base->sig.ev_signal,
787 EVLIST_ACTIVE);
788 base->sig.ev_signal_added = 0;
790 if (base->th_notify_fd[0] != -1) {
791 /* we cannot call event_del here because the base has
792 * not been reinitialized yet. */
793 was_notifiable = 1;
794 event_queue_remove(base, &base->th_notify,
795 EVLIST_INSERTED);
796 if (base->th_notify.ev_flags & EVLIST_ACTIVE)
797 event_queue_remove(base, &base->th_notify,
798 EVLIST_ACTIVE);
799 base->sig.ev_signal_added = 0;
800 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
801 if (base->th_notify_fd[1] != -1)
802 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
803 base->th_notify_fd[0] = -1;
804 base->th_notify_fd[1] = -1;
805 event_debug_unassign(&base->th_notify);
808 if (base->evsel->dealloc != NULL)
809 base->evsel->dealloc(base);
810 base->evbase = evsel->init(base);
811 if (base->evbase == NULL) {
812 event_errx(1, "%s: could not reinitialize event mechanism",
813 __func__);
814 res = -1;
815 goto done;
818 event_changelist_freemem(&base->changelist); /* XXX */
819 evmap_io_clear(&base->io);
820 evmap_signal_clear(&base->sigmap);
822 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
823 if (ev->ev_events & (EV_READ|EV_WRITE)) {
824 if (evmap_io_add(base, ev->ev_fd, ev) == -1)
825 res = -1;
826 } else if (ev->ev_events & EV_SIGNAL) {
827 if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
828 res = -1;
832 if (was_notifiable && res == 0)
833 res = evthread_make_base_notifiable(base);
835 done:
836 EVBASE_RELEASE_LOCK(base, th_base_lock);
837 return (res);
840 const char **
841 event_get_supported_methods(void)
843 static const char **methods = NULL;
844 const struct eventop **method;
845 const char **tmp;
846 int i = 0, k;
848 /* count all methods */
849 for (method = &eventops[0]; *method != NULL; ++method) {
850 ++i;
853 /* allocate one more than we need for the NULL pointer */
854 tmp = mm_calloc((i + 1), sizeof(char *));
855 if (tmp == NULL)
856 return (NULL);
858 /* populate the array with the supported methods */
859 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
860 tmp[i++] = eventops[k]->name;
862 tmp[i] = NULL;
864 if (methods != NULL)
865 mm_free((char**)methods);
867 methods = tmp;
869 return (methods);
872 struct event_config *
873 event_config_new(void)
875 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
877 if (cfg == NULL)
878 return (NULL);
880 TAILQ_INIT(&cfg->entries);
882 return (cfg);
885 static void
886 event_config_entry_free(struct event_config_entry *entry)
888 if (entry->avoid_method != NULL)
889 mm_free((char *)entry->avoid_method);
890 mm_free(entry);
893 void
894 event_config_free(struct event_config *cfg)
896 struct event_config_entry *entry;
898 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
899 TAILQ_REMOVE(&cfg->entries, entry, next);
900 event_config_entry_free(entry);
902 mm_free(cfg);
906 event_config_set_flag(struct event_config *cfg, int flag)
908 if (!cfg)
909 return -1;
910 cfg->flags |= flag;
911 return 0;
915 event_config_avoid_method(struct event_config *cfg, const char *method)
917 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
918 if (entry == NULL)
919 return (-1);
921 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
922 mm_free(entry);
923 return (-1);
926 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
928 return (0);
932 event_config_require_features(struct event_config *cfg,
933 int features)
935 if (!cfg)
936 return (-1);
937 cfg->require_features = features;
938 return (0);
942 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
944 if (!cfg)
945 return (-1);
946 cfg->n_cpus_hint = cpus;
947 return (0);
951 event_priority_init(int npriorities)
953 return event_base_priority_init(current_base, npriorities);
957 event_base_priority_init(struct event_base *base, int npriorities)
959 int i;
961 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
962 || npriorities >= EVENT_MAX_PRIORITIES)
963 return (-1);
965 if (npriorities == base->nactivequeues)
966 return (0);
968 if (base->nactivequeues) {
969 mm_free(base->activequeues);
970 base->nactivequeues = 0;
973 /* Allocate our priority queues */
974 base->activequeues = (struct event_list *)
975 mm_calloc(npriorities, sizeof(struct event_list));
976 if (base->activequeues == NULL) {
977 event_warn("%s: calloc", __func__);
978 return (-1);
980 base->nactivequeues = npriorities;
982 for (i = 0; i < base->nactivequeues; ++i) {
983 TAILQ_INIT(&base->activequeues[i]);
986 return (0);
989 /* Returns true iff we're currently watching any events. */
990 static int
991 event_haveevents(struct event_base *base)
993 /* Caller must hold th_base_lock */
994 return (base->virtual_event_count > 0 || base->event_count > 0);
997 /* "closure" function called when processing active signal events */
998 static inline void
999 event_signal_closure(struct event_base *base, struct event *ev)
1001 short ncalls;
1003 /* Allows deletes to work */
1004 ncalls = ev->ev_ncalls;
1005 ev->ev_pncalls = &ncalls;
1006 EVBASE_RELEASE_LOCK(base, th_base_lock);
1007 while (ncalls) {
1008 ncalls--;
1009 ev->ev_ncalls = ncalls;
1010 if (ncalls == 0)
1011 ev->ev_pncalls = NULL;
1012 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1013 #if 0
1014 /* XXXX we can't do this without a lock on the base. */
1015 if (base->event_break)
1016 return;
1017 #endif
1021 /* Common timeouts are special timeouts that are handled as queues rather than
1022 * in the minheap. This is more efficient than the minheap if we happen to
1023 * know that we're going to get several thousands of timeout events all with
1024 * the same timeout value.
1026 * Since all our timeout handling code assumes timevals can be copied,
1027 * assigned, etc, we can't use "magic pointer" to encode these common
1028 * timeouts. Searching through a list to see if every timeout is common could
1029 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1030 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1031 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1032 * of index into the event_base's aray of common timeouts.
1035 #define MICROSECONDS_MASK 0x000fffff
1036 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1037 #define COMMON_TIMEOUT_IDX_SHIFT 20
1038 #define COMMON_TIMEOUT_MASK 0xf0000000
1039 #define COMMON_TIMEOUT_MAGIC 0x50000000
1041 #define COMMON_TIMEOUT_IDX(tv) \
1042 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1044 /** Return true iff if 'tv' is a common timeout in 'base' */
1045 static inline int
1046 is_common_timeout(const struct timeval *tv,
1047 const struct event_base *base)
1049 int idx;
1050 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1051 return 0;
1052 idx = COMMON_TIMEOUT_IDX(tv);
1053 return idx < base->n_common_timeouts;
1056 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1057 * one is a common timeout. */
1058 static inline int
1059 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1061 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1062 (tv2->tv_usec & ~MICROSECONDS_MASK);
1065 /** Requires that 'tv' is a common timeout. Return the corresponding
1066 * common_timeout_list. */
1067 static inline struct common_timeout_list *
1068 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1070 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1073 #if 0
1074 static inline int
1075 common_timeout_ok(const struct timeval *tv,
1076 struct event_base *base)
1078 const struct timeval *expect =
1079 &get_common_timeout_list(base, tv)->duration;
1080 return tv->tv_sec == expect->tv_sec &&
1081 tv->tv_usec == expect->tv_usec;
1083 #endif
1085 /* Add the timeout for the first event in given common timeout list to the
1086 * event_base's minheap. */
1087 static void
1088 common_timeout_schedule(struct common_timeout_list *ctl,
1089 const struct timeval *now, struct event *head)
1091 struct timeval timeout = head->ev_timeout;
1092 timeout.tv_usec &= MICROSECONDS_MASK;
1093 event_add_internal(&ctl->timeout_event, &timeout, 1);
1096 /* Callback: invoked when the timeout for a common timeout queue triggers.
1097 * This means that (at least) the first event in that queue should be run,
1098 * and the timeout should be rescheduled if there are more events. */
1099 static void
1100 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1102 struct timeval now;
1103 struct common_timeout_list *ctl = arg;
1104 struct event_base *base = ctl->base;
1105 struct event *ev = NULL;
1106 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1107 gettime(base, &now);
1108 while (1) {
1109 ev = TAILQ_FIRST(&ctl->events);
1110 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1111 (ev->ev_timeout.tv_sec == now.tv_sec &&
1112 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1113 break;
1114 event_del_internal(ev);
1115 event_active_nolock(ev, EV_TIMEOUT, 1);
1117 if (ev)
1118 common_timeout_schedule(ctl, &now, ev);
1119 EVBASE_RELEASE_LOCK(base, th_base_lock);
1122 #define MAX_COMMON_TIMEOUTS 256
1124 const struct timeval *
1125 event_base_init_common_timeout(struct event_base *base,
1126 const struct timeval *duration)
1128 int i;
1129 struct timeval tv;
1130 const struct timeval *result=NULL;
1131 struct common_timeout_list *new_ctl;
1133 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1134 if (duration->tv_usec > 1000000) {
1135 memcpy(&tv, duration, sizeof(struct timeval));
1136 if (is_common_timeout(duration, base))
1137 tv.tv_usec &= MICROSECONDS_MASK;
1138 tv.tv_sec += tv.tv_usec / 1000000;
1139 tv.tv_usec %= 1000000;
1140 duration = &tv;
1142 for (i = 0; i < base->n_common_timeouts; ++i) {
1143 const struct common_timeout_list *ctl =
1144 base->common_timeout_queues[i];
1145 if (duration->tv_sec == ctl->duration.tv_sec &&
1146 duration->tv_usec ==
1147 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1148 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1149 result = &ctl->duration;
1150 goto done;
1153 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1154 event_warnx("%s: Too many common timeouts already in use; "
1155 "we only support %d per event_base", __func__,
1156 MAX_COMMON_TIMEOUTS);
1157 goto done;
1159 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1160 int n = base->n_common_timeouts < 16 ? 16 :
1161 base->n_common_timeouts*2;
1162 struct common_timeout_list **newqueues =
1163 mm_realloc(base->common_timeout_queues,
1164 n*sizeof(struct common_timeout_queue *));
1165 if (!newqueues) {
1166 event_warn("%s: realloc",__func__);
1167 goto done;
1169 base->n_common_timeouts_allocated = n;
1170 base->common_timeout_queues = newqueues;
1172 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1173 if (!new_ctl) {
1174 event_warn("%s: calloc",__func__);
1175 goto done;
1177 TAILQ_INIT(&new_ctl->events);
1178 new_ctl->duration.tv_sec = duration->tv_sec;
1179 new_ctl->duration.tv_usec =
1180 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1181 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1182 evtimer_assign(&new_ctl->timeout_event, base,
1183 common_timeout_callback, new_ctl);
1184 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1185 event_priority_set(&new_ctl->timeout_event, 0);
1186 new_ctl->base = base;
1187 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1188 result = &new_ctl->duration;
1190 done:
1191 if (result)
1192 EVUTIL_ASSERT(is_common_timeout(result, base));
1194 EVBASE_RELEASE_LOCK(base, th_base_lock);
1195 return result;
1198 /* Closure function invoked when we're activating a persistent event. */
1199 static inline void
1200 event_persist_closure(struct event_base *base, struct event *ev)
1202 /* reschedule the persistent event if we have a timeout. */
1203 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1204 /* If there was a timeout, we want it to run at an interval of
1205 * ev_io_timeout after the last time it was _scheduled_ for,
1206 * not ev_io_timeout after _now_. If it fired for another
1207 * reason, though, the timeout ought to start ticking _now_. */
1208 struct timeval run_at;
1209 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1210 &ev->ev_io_timeout));
1211 if (is_common_timeout(&ev->ev_timeout, base)) {
1212 ev_uint32_t usec_mask;
1213 struct timeval delay, relative_to;
1214 delay = ev->ev_io_timeout;
1215 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1216 delay.tv_usec &= MICROSECONDS_MASK;
1217 if (ev->ev_res & EV_TIMEOUT) {
1218 relative_to = ev->ev_timeout;
1219 relative_to.tv_usec &= MICROSECONDS_MASK;
1220 } else {
1221 gettime(base, &relative_to);
1223 evutil_timeradd(&relative_to, &delay, &run_at);
1224 run_at.tv_usec |= usec_mask;
1225 } else {
1226 struct timeval relative_to;
1227 if (ev->ev_res & EV_TIMEOUT) {
1228 relative_to = ev->ev_timeout;
1229 } else {
1230 gettime(base, &relative_to);
1232 evutil_timeradd(&ev->ev_io_timeout, &relative_to,
1233 &run_at);
1235 event_add_internal(ev, &run_at, 1);
1237 EVBASE_RELEASE_LOCK(base, th_base_lock);
1238 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1242 Helper for event_process_active to process all the events in a single queue,
1243 releasing the lock as we go. This function requires that the lock be held
1244 when it's invoked. Returns -1 if we get a signal or an event_break that
1245 means we should stop processing any active events now. Otherwise returns
1246 the number of non-internal events that we processed.
1248 static int
1249 event_process_active_single_queue(struct event_base *base,
1250 struct event_list *activeq)
1252 struct event *ev;
1253 int count = 0;
1255 EVUTIL_ASSERT(activeq != NULL);
1257 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
1258 if (ev->ev_events & EV_PERSIST)
1259 event_queue_remove(base, ev, EVLIST_ACTIVE);
1260 else
1261 event_del_internal(ev);
1262 if (!(ev->ev_flags & EVLIST_INTERNAL))
1263 ++count;
1265 event_debug((
1266 "event_process_active: event: %p, %s%scall %p",
1268 ev->ev_res & EV_READ ? "EV_READ " : " ",
1269 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1270 ev->ev_callback));
1272 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1273 base->current_event = ev;
1274 base->current_event_waiters = 0;
1275 #endif
1277 switch (ev->ev_closure) {
1278 case EV_CLOSURE_SIGNAL:
1279 event_signal_closure(base, ev);
1280 break;
1281 case EV_CLOSURE_PERSIST:
1282 event_persist_closure(base, ev);
1283 break;
1284 default:
1285 case EV_CLOSURE_NONE:
1286 EVBASE_RELEASE_LOCK(base, th_base_lock);
1287 (*ev->ev_callback)(
1288 (int)ev->ev_fd, ev->ev_res, ev->ev_arg);
1289 break;
1292 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1293 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1294 base->current_event = NULL;
1295 if (base->current_event_waiters) {
1296 base->current_event_waiters = 0;
1297 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1299 #endif
1301 if (base->event_break)
1302 return -1;
1304 return count;
1308 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1309 *breakptr becomes set to 1, stop. Requires that we start out holding
1310 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1311 we process.
1313 static int
1314 event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
1316 int count = 0;
1317 struct deferred_cb *cb;
1319 #define MAX_DEFERRED 16
1320 while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
1321 cb->queued = 0;
1322 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
1323 --queue->active_count;
1324 UNLOCK_DEFERRED_QUEUE(queue);
1326 cb->cb(cb, cb->arg);
1328 LOCK_DEFERRED_QUEUE(queue);
1329 if (*breakptr)
1330 return -1;
1331 if (++count == MAX_DEFERRED)
1332 break;
1334 #undef MAX_DEFERRED
1335 return count;
1339 * Active events are stored in priority queues. Lower priorities are always
1340 * process before higher priorities. Low priority events can starve high
1341 * priority ones.
1344 static int
1345 event_process_active(struct event_base *base)
1347 /* Caller must hold th_base_lock */
1348 struct event_list *activeq = NULL;
1349 int i, c = 0;
1351 for (i = 0; i < base->nactivequeues; ++i) {
1352 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1353 activeq = &base->activequeues[i];
1354 c = event_process_active_single_queue(base, activeq);
1355 if (c < 0)
1356 return -1;
1357 else if (c > 0)
1358 break; /* Processed a real event; do not
1359 * consider lower-priority events */
1360 /* If we get here, all of the events we processed
1361 * were internal. Continue. */
1365 event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
1366 return c;
1370 * Wait continuously for events. We exit only if no events are left.
1374 event_dispatch(void)
1376 return (event_loop(0));
1380 event_base_dispatch(struct event_base *event_base)
1382 return (event_base_loop(event_base, 0));
1385 const char *
1386 event_base_get_method(const struct event_base *base)
1388 EVUTIL_ASSERT(base);
1389 return (base->evsel->name);
1392 /** Callback: used to implement event_base_loopexit by telling the event_base
1393 * that it's time to exit its loop. */
1394 static void
1395 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1397 struct event_base *base = arg;
1398 base->event_gotterm = 1;
1402 event_loopexit(const struct timeval *tv)
1404 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1405 current_base, tv));
1409 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1411 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1412 event_base, tv));
1416 event_loopbreak(void)
1418 return (event_base_loopbreak(current_base));
1422 event_base_loopbreak(struct event_base *event_base)
1424 int r = 0;
1425 if (event_base == NULL)
1426 return (-1);
1428 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1429 event_base->event_break = 1;
1431 if (EVBASE_NEED_NOTIFY(event_base)) {
1432 r = evthread_notify_base(event_base);
1433 } else {
1434 r = (0);
1436 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1437 return r;
1441 event_base_got_break(struct event_base *event_base)
1443 int res;
1444 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1445 res = event_base->event_break;
1446 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1447 return res;
1451 event_base_got_exit(struct event_base *event_base)
1453 int res;
1454 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1455 res = event_base->event_gotterm;
1456 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1457 return res;
1460 /* not thread safe */
1463 event_loop(int flags)
1465 return event_base_loop(current_base, flags);
1469 event_base_loop(struct event_base *base, int flags)
1471 const struct eventop *evsel = base->evsel;
1472 struct timeval tv;
1473 struct timeval *tv_p;
1474 int res, done, retval = 0;
1476 /* Grab the lock. We will release it inside evsel.dispatch, and again
1477 * as we invoke user callbacks. */
1478 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1480 if (base->running_loop) {
1481 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1482 " can run on each event_base at once.", __func__);
1483 EVBASE_RELEASE_LOCK(base, th_base_lock);
1484 return -1;
1487 base->running_loop = 1;
1489 clear_time_cache(base);
1491 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1492 evsig_set_base(base);
1494 done = 0;
1496 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1497 base->th_owner_id = EVTHREAD_GET_ID();
1498 #endif
1500 base->event_gotterm = base->event_break = 0;
1502 while (!done) {
1503 /* Terminate the loop if we have been asked to */
1504 if (base->event_gotterm) {
1505 break;
1508 if (base->event_break) {
1509 break;
1512 timeout_correct(base, &tv);
1514 tv_p = &tv;
1515 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1516 timeout_next(base, &tv_p);
1517 } else {
1519 * if we have active events, we just poll new events
1520 * without waiting.
1522 evutil_timerclear(&tv);
1525 /* If we have no events, we just exit */
1526 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1527 event_debug(("%s: no events registered.", __func__));
1528 retval = 1;
1529 goto done;
1532 /* update last old time */
1533 gettime(base, &base->event_tv);
1535 clear_time_cache(base);
1537 res = evsel->dispatch(base, tv_p);
1539 if (res == -1) {
1540 event_debug(("%s: dispatch returned unsuccessfully.",
1541 __func__));
1542 retval = -1;
1543 goto done;
1546 update_time_cache(base);
1548 timeout_process(base);
1550 if (N_ACTIVE_CALLBACKS(base)) {
1551 int n = event_process_active(base);
1552 if ((flags & EVLOOP_ONCE)
1553 && N_ACTIVE_CALLBACKS(base) == 0
1554 && n != 0)
1555 done = 1;
1556 } else if (flags & EVLOOP_NONBLOCK)
1557 done = 1;
1559 event_debug(("%s: asked to terminate loop.", __func__));
1561 done:
1562 clear_time_cache(base);
1563 base->running_loop = 0;
1565 EVBASE_RELEASE_LOCK(base, th_base_lock);
1567 return (retval);
1570 /* Sets up an event for processing once */
1571 struct event_once {
1572 struct event ev;
1574 void (*cb)(evutil_socket_t, short, void *);
1575 void *arg;
1578 /* One-time callback to implement event_base_once: invokes the user callback,
1579 * then deletes the allocated storage */
1580 static void
1581 event_once_cb(evutil_socket_t fd, short events, void *arg)
1583 struct event_once *eonce = arg;
1585 (*eonce->cb)(fd, events, eonce->arg);
1586 event_debug_unassign(&eonce->ev);
1587 mm_free(eonce);
1590 /* not threadsafe, event scheduled once. */
1592 event_once(evutil_socket_t fd, short events,
1593 void (*callback)(evutil_socket_t, short, void *),
1594 void *arg, const struct timeval *tv)
1596 return event_base_once(current_base, fd, events, callback, arg, tv);
1599 /* Schedules an event once */
1601 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1602 void (*callback)(evutil_socket_t, short, void *),
1603 void *arg, const struct timeval *tv)
1605 struct event_once *eonce;
1606 struct timeval etv;
1607 int res = 0;
1609 /* We cannot support signals that just fire once, or persistent
1610 * events. */
1611 if (events & (EV_SIGNAL|EV_PERSIST))
1612 return (-1);
1614 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1615 return (-1);
1617 eonce->cb = callback;
1618 eonce->arg = arg;
1620 if (events == EV_TIMEOUT) {
1621 if (tv == NULL) {
1622 evutil_timerclear(&etv);
1623 tv = &etv;
1626 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1627 } else if (events & (EV_READ|EV_WRITE)) {
1628 events &= EV_READ|EV_WRITE;
1630 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1631 } else {
1632 /* Bad event combination */
1633 mm_free(eonce);
1634 return (-1);
1637 if (res == 0)
1638 res = event_add(&eonce->ev, tv);
1639 if (res != 0) {
1640 mm_free(eonce);
1641 return (res);
1644 return (0);
1648 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1650 if (!base)
1651 base = current_base;
1653 _event_debug_assert_not_added(ev);
1655 ev->ev_base = base;
1657 ev->ev_callback = callback;
1658 ev->ev_arg = arg;
1659 ev->ev_fd = fd;
1660 ev->ev_events = events;
1661 ev->ev_res = 0;
1662 ev->ev_flags = EVLIST_INIT;
1663 ev->ev_ncalls = 0;
1664 ev->ev_pncalls = NULL;
1666 if (events & EV_SIGNAL) {
1667 if ((events & (EV_READ|EV_WRITE)) != 0) {
1668 event_warnx("%s: EV_SIGNAL is not compatible with "
1669 "EV_READ or EV_WRITE", __func__);
1670 return -1;
1672 ev->ev_closure = EV_CLOSURE_SIGNAL;
1673 } else {
1674 if (events & EV_PERSIST) {
1675 evutil_timerclear(&ev->ev_io_timeout);
1676 ev->ev_closure = EV_CLOSURE_PERSIST;
1677 } else {
1678 ev->ev_closure = EV_CLOSURE_NONE;
1682 min_heap_elem_init(ev);
1684 if (base != NULL) {
1685 /* by default, we put new events into the middle priority */
1686 ev->ev_pri = base->nactivequeues / 2;
1689 _event_debug_note_setup(ev);
1691 return 0;
1695 event_base_set(struct event_base *base, struct event *ev)
1697 /* Only innocent events may be assigned to a different base */
1698 if (ev->ev_flags != EVLIST_INIT)
1699 return (-1);
1701 _event_debug_assert_is_setup(ev);
1703 ev->ev_base = base;
1704 ev->ev_pri = base->nactivequeues/2;
1706 return (0);
1709 void
1710 event_set(struct event *ev, evutil_socket_t fd, short events,
1711 void (*callback)(evutil_socket_t, short, void *), void *arg)
1713 int r;
1714 r = event_assign(ev, current_base, fd, events, callback, arg);
1715 EVUTIL_ASSERT(r == 0);
1718 struct event *
1719 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1721 struct event *ev;
1722 ev = mm_malloc(sizeof(struct event));
1723 if (ev == NULL)
1724 return (NULL);
1725 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1726 mm_free(ev);
1727 return (NULL);
1730 return (ev);
1733 void
1734 event_free(struct event *ev)
1736 _event_debug_assert_is_setup(ev);
1738 /* make sure that this event won't be coming back to haunt us. */
1739 event_del(ev);
1740 _event_debug_note_teardown(ev);
1741 mm_free(ev);
1745 void
1746 event_debug_unassign(struct event *ev)
1748 _event_debug_assert_not_added(ev);
1749 _event_debug_note_teardown(ev);
1751 ev->ev_flags &= ~EVLIST_INIT;
1755 * Set's the priority of an event - if an event is already scheduled
1756 * changing the priority is going to fail.
1760 event_priority_set(struct event *ev, int pri)
1762 _event_debug_assert_is_setup(ev);
1764 if (ev->ev_flags & EVLIST_ACTIVE)
1765 return (-1);
1766 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
1767 return (-1);
1769 ev->ev_pri = pri;
1771 return (0);
1775 * Checks if a specific event is pending or scheduled.
1779 event_pending(const struct event *ev, short event, struct timeval *tv)
1781 struct timeval now, res;
1782 int flags = 0;
1784 _event_debug_assert_is_setup(ev);
1786 if (ev->ev_flags & EVLIST_INSERTED)
1787 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
1788 if (ev->ev_flags & EVLIST_ACTIVE)
1789 flags |= ev->ev_res;
1790 if (ev->ev_flags & EVLIST_TIMEOUT)
1791 flags |= EV_TIMEOUT;
1793 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
1795 /* See if there is a timeout that we should report */
1796 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
1797 struct timeval tmp = ev->ev_timeout;
1798 event_base_gettimeofday_cached(ev->ev_base, &now);
1799 tmp.tv_usec &= MICROSECONDS_MASK;
1800 evutil_timersub(&tmp, &now, &res);
1801 /* correctly remap to real time */
1802 evutil_gettimeofday(&now, NULL);
1803 evutil_timeradd(&now, &res, tv);
1806 return (flags & event);
1810 event_initialized(const struct event *ev)
1812 if (!(ev->ev_flags & EVLIST_INIT))
1813 return 0;
1815 return 1;
1818 void
1819 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
1821 _event_debug_assert_is_setup(event);
1823 if (base_out)
1824 *base_out = event->ev_base;
1825 if (fd_out)
1826 *fd_out = event->ev_fd;
1827 if (events_out)
1828 *events_out = event->ev_events;
1829 if (callback_out)
1830 *callback_out = event->ev_callback;
1831 if (arg_out)
1832 *arg_out = event->ev_arg;
1835 size_t
1836 event_get_struct_event_size(void)
1838 return sizeof(struct event);
1841 evutil_socket_t
1842 event_get_fd(const struct event *ev)
1844 _event_debug_assert_is_setup(ev);
1845 return ev->ev_fd;
1848 struct event_base *
1849 event_get_base(const struct event *ev)
1851 _event_debug_assert_is_setup(ev);
1852 return ev->ev_base;
1855 short
1856 event_get_events(const struct event *ev)
1858 _event_debug_assert_is_setup(ev);
1859 return ev->ev_events;
1862 event_callback_fn
1863 event_get_callback(const struct event *ev)
1865 _event_debug_assert_is_setup(ev);
1866 return ev->ev_callback;
1869 void *
1870 event_get_callback_arg(const struct event *ev)
1872 _event_debug_assert_is_setup(ev);
1873 return ev->ev_arg;
1877 event_add(struct event *ev, const struct timeval *tv)
1879 int res;
1881 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
1882 event_warnx("%s: event has no event_base set.", __func__);
1883 return -1;
1886 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
1888 res = event_add_internal(ev, tv, 0);
1890 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
1892 return (res);
1895 /* Helper callback: wake an event_base from another thread. This version
1896 * works by writing a byte to one end of a socketpair, so that the event_base
1897 * listening on the other end will wake up as the corresponding event
1898 * triggers */
1899 static int
1900 evthread_notify_base_default(struct event_base *base)
1902 char buf[1];
1903 int r;
1904 buf[0] = (char) 0;
1905 #ifdef WIN32
1906 r = send(base->th_notify_fd[1], buf, 1, 0);
1907 #else
1908 r = write(base->th_notify_fd[1], buf, 1);
1909 #endif
1910 return (r < 0 && errno != EAGAIN) ? -1 : 0;
1913 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1914 /* Helper callback: wake an event_base from another thread. This version
1915 * assumes that you have a working eventfd() implementation. */
1916 static int
1917 evthread_notify_base_eventfd(struct event_base *base)
1919 ev_uint64_t msg = 1;
1920 int r;
1921 do {
1922 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
1923 } while (r < 0 && errno == EAGAIN);
1925 return (r < 0) ? -1 : 0;
1927 #endif
1929 /** Tell the thread currently running the event_loop for base (if any) that it
1930 * needs to stop waiting in its dispatch function (if it is) and process all
1931 * active events and deferred callbacks (if there are any). */
1932 static int
1933 evthread_notify_base(struct event_base *base)
1935 EVENT_BASE_ASSERT_LOCKED(base);
1936 if (!base->th_notify_fn)
1937 return -1;
1938 if (base->is_notify_pending)
1939 return 0;
1940 base->is_notify_pending = 1;
1941 return base->th_notify_fn(base);
1944 /* Implementation function to add an event. Works just like event_add,
1945 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
1946 * we treat tv as an absolute time, not as an interval to add to the current
1947 * time */
1948 static inline int
1949 event_add_internal(struct event *ev, const struct timeval *tv,
1950 int tv_is_absolute)
1952 struct event_base *base = ev->ev_base;
1953 int res = 0;
1954 int notify = 0;
1956 EVENT_BASE_ASSERT_LOCKED(base);
1957 _event_debug_assert_is_setup(ev);
1959 event_debug((
1960 "event_add: event: %p (fd %d), %s%s%scall %p",
1962 (int)ev->ev_fd,
1963 ev->ev_events & EV_READ ? "EV_READ " : " ",
1964 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
1965 tv ? "EV_TIMEOUT " : " ",
1966 ev->ev_callback));
1968 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
1971 * prepare for timeout insertion further below, if we get a
1972 * failure on any step, we should not change any state.
1974 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
1975 if (min_heap_reserve(&base->timeheap,
1976 1 + min_heap_size(&base->timeheap)) == -1)
1977 return (-1); /* ENOMEM == errno */
1980 /* If the main thread is currently executing a signal event's
1981 * callback, and we are not the main thread, then we want to wait
1982 * until the callback is done before we mess with the event, or else
1983 * we can race on ev_ncalls and ev_pncalls below. */
1984 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1985 if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
1986 && !EVBASE_IN_THREAD(base)) {
1987 ++base->current_event_waiters;
1988 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
1990 #endif
1992 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
1993 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
1994 if (ev->ev_events & (EV_READ|EV_WRITE))
1995 res = evmap_io_add(base, ev->ev_fd, ev);
1996 else if (ev->ev_events & EV_SIGNAL)
1997 res = evmap_signal_add(base, (int)ev->ev_fd, ev);
1998 if (res != -1)
1999 event_queue_insert(base, ev, EVLIST_INSERTED);
2000 if (res == 1) {
2001 /* evmap says we need to notify the main thread. */
2002 notify = 1;
2003 res = 0;
2008 * we should change the timeout state only if the previous event
2009 * addition succeeded.
2011 if (res != -1 && tv != NULL) {
2012 struct timeval now;
2013 int common_timeout;
2016 * for persistent timeout events, we remember the
2017 * timeout value and re-add the event.
2019 * If tv_is_absolute, this was already set.
2021 if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
2022 ev->ev_io_timeout = *tv;
2025 * we already reserved memory above for the case where we
2026 * are not replacing an existing timeout.
2028 if (ev->ev_flags & EVLIST_TIMEOUT) {
2029 /* XXX I believe this is needless. */
2030 if (min_heap_elt_is_top(ev))
2031 notify = 1;
2032 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2035 /* Check if it is active due to a timeout. Rescheduling
2036 * this timeout before the callback can be executed
2037 * removes it from the active list. */
2038 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2039 (ev->ev_res & EV_TIMEOUT)) {
2040 if (ev->ev_events & EV_SIGNAL) {
2041 /* See if we are just active executing
2042 * this event in a loop
2044 if (ev->ev_ncalls && ev->ev_pncalls) {
2045 /* Abort loop */
2046 *ev->ev_pncalls = 0;
2050 event_queue_remove(base, ev, EVLIST_ACTIVE);
2053 gettime(base, &now);
2055 common_timeout = is_common_timeout(tv, base);
2056 if (tv_is_absolute) {
2057 ev->ev_timeout = *tv;
2058 } else if (common_timeout) {
2059 struct timeval tmp = *tv;
2060 tmp.tv_usec &= MICROSECONDS_MASK;
2061 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2062 ev->ev_timeout.tv_usec |=
2063 (tv->tv_usec & ~MICROSECONDS_MASK);
2064 } else {
2065 evutil_timeradd(&now, tv, &ev->ev_timeout);
2068 event_debug((
2069 "event_add: timeout in %d seconds, call %p",
2070 (int)tv->tv_sec, ev->ev_callback));
2072 event_queue_insert(base, ev, EVLIST_TIMEOUT);
2073 if (common_timeout) {
2074 struct common_timeout_list *ctl =
2075 get_common_timeout_list(base, &ev->ev_timeout);
2076 if (ev == TAILQ_FIRST(&ctl->events)) {
2077 common_timeout_schedule(ctl, &now, ev);
2079 } else {
2080 /* See if the earliest timeout is now earlier than it
2081 * was before: if so, we will need to tell the main
2082 * thread to wake up earlier than it would
2083 * otherwise. */
2084 if (min_heap_elt_is_top(ev))
2085 notify = 1;
2089 /* if we are not in the right thread, we need to wake up the loop */
2090 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2091 evthread_notify_base(base);
2093 _event_debug_note_add(ev);
2095 return (res);
2099 event_del(struct event *ev)
2101 int res;
2103 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2104 event_warnx("%s: event has no event_base set.", __func__);
2105 return -1;
2108 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2110 res = event_del_internal(ev);
2112 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2114 return (res);
2117 /* Helper for event_del: always called with th_base_lock held. */
2118 static inline int
2119 event_del_internal(struct event *ev)
2121 struct event_base *base;
2122 int res = 0, notify = 0;
2124 event_debug(("event_del: %p (fd %d), callback %p",
2125 ev, (int)ev->ev_fd, ev->ev_callback));
2127 /* An event without a base has not been added */
2128 if (ev->ev_base == NULL)
2129 return (-1);
2131 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2133 /* If the main thread is currently executing this event's callback,
2134 * and we are not the main thread, then we want to wait until the
2135 * callback is done before we start removing the event. That way,
2136 * when this function returns, it will be safe to free the
2137 * user-supplied argument. */
2138 base = ev->ev_base;
2139 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2140 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2141 ++base->current_event_waiters;
2142 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2144 #endif
2146 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2148 /* See if we are just active executing this event in a loop */
2149 if (ev->ev_events & EV_SIGNAL) {
2150 if (ev->ev_ncalls && ev->ev_pncalls) {
2151 /* Abort loop */
2152 *ev->ev_pncalls = 0;
2156 if (ev->ev_flags & EVLIST_TIMEOUT) {
2157 /* NOTE: We never need to notify the main thread because of a
2158 * deleted timeout event: all that could happen if we don't is
2159 * that the dispatch loop might wake up too early. But the
2160 * point of notifying the main thread _is_ to wake up the
2161 * dispatch loop early anyway, so we wouldn't gain anything by
2162 * doing it.
2164 event_queue_remove(base, ev, EVLIST_TIMEOUT);
2167 if (ev->ev_flags & EVLIST_ACTIVE)
2168 event_queue_remove(base, ev, EVLIST_ACTIVE);
2170 if (ev->ev_flags & EVLIST_INSERTED) {
2171 event_queue_remove(base, ev, EVLIST_INSERTED);
2172 if (ev->ev_events & (EV_READ|EV_WRITE))
2173 res = evmap_io_del(base, ev->ev_fd, ev);
2174 else
2175 res = evmap_signal_del(base, (int)ev->ev_fd, ev);
2176 if (res == 1) {
2177 /* evmap says we need to notify the main thread. */
2178 notify = 1;
2179 res = 0;
2183 /* if we are not in the right thread, we need to wake up the loop */
2184 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2185 evthread_notify_base(base);
2187 _event_debug_note_del(ev);
2189 return (res);
2192 void
2193 event_active(struct event *ev, int res, short ncalls)
2195 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2196 event_warnx("%s: event has no event_base set.", __func__);
2197 return;
2200 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2202 _event_debug_assert_is_setup(ev);
2204 event_active_nolock(ev, res, ncalls);
2206 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2210 void
2211 event_active_nolock(struct event *ev, int res, short ncalls)
2213 struct event_base *base;
2215 event_debug(("event_active: %p (fd %d), res %d, callback %p",
2216 ev, (int)ev->ev_fd, (int)res, ev->ev_callback));
2219 /* We get different kinds of events, add them together */
2220 if (ev->ev_flags & EVLIST_ACTIVE) {
2221 ev->ev_res |= res;
2222 return;
2225 base = ev->ev_base;
2227 EVENT_BASE_ASSERT_LOCKED(base);
2229 ev->ev_res = res;
2231 if (ev->ev_events & EV_SIGNAL) {
2232 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2233 if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
2234 ++base->current_event_waiters;
2235 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2237 #endif
2238 ev->ev_ncalls = ncalls;
2239 ev->ev_pncalls = NULL;
2242 event_queue_insert(base, ev, EVLIST_ACTIVE);
2244 if (EVBASE_NEED_NOTIFY(base))
2245 evthread_notify_base(base);
2248 void
2249 event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
2251 memset(cb, 0, sizeof(struct deferred_cb));
2252 cb->cb = fn;
2253 cb->arg = arg;
2256 void
2257 event_deferred_cb_cancel(struct deferred_cb_queue *queue,
2258 struct deferred_cb *cb)
2260 if (!queue) {
2261 if (current_base)
2262 queue = &current_base->defer_queue;
2263 else
2264 return;
2267 LOCK_DEFERRED_QUEUE(queue);
2268 if (cb->queued) {
2269 TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
2270 --queue->active_count;
2271 cb->queued = 0;
2273 UNLOCK_DEFERRED_QUEUE(queue);
2276 void
2277 event_deferred_cb_schedule(struct deferred_cb_queue *queue,
2278 struct deferred_cb *cb)
2280 if (!queue) {
2281 if (current_base)
2282 queue = &current_base->defer_queue;
2283 else
2284 return;
2287 LOCK_DEFERRED_QUEUE(queue);
2288 if (!cb->queued) {
2289 cb->queued = 1;
2290 TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
2291 ++queue->active_count;
2292 if (queue->notify_fn)
2293 queue->notify_fn(queue, queue->notify_arg);
2295 UNLOCK_DEFERRED_QUEUE(queue);
2298 static int
2299 timeout_next(struct event_base *base, struct timeval **tv_p)
2301 /* Caller must hold th_base_lock */
2302 struct timeval now;
2303 struct event *ev;
2304 struct timeval *tv = *tv_p;
2305 int res = 0;
2307 ev = min_heap_top(&base->timeheap);
2309 if (ev == NULL) {
2310 /* if no time-based events are active wait for I/O */
2311 *tv_p = NULL;
2312 goto out;
2315 if (gettime(base, &now) == -1) {
2316 res = -1;
2317 goto out;
2320 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2321 evutil_timerclear(tv);
2322 goto out;
2325 evutil_timersub(&ev->ev_timeout, &now, tv);
2327 EVUTIL_ASSERT(tv->tv_sec >= 0);
2328 EVUTIL_ASSERT(tv->tv_usec >= 0);
2329 event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
2331 out:
2332 return (res);
2336 * Determines if the time is running backwards by comparing the current time
2337 * against the last time we checked. Not needed when using clock monotonic.
2338 * If time is running backwards, we adjust the firing time of every event by
2339 * the amount that time seems to have jumped.
2341 static void
2342 timeout_correct(struct event_base *base, struct timeval *tv)
2344 /* Caller must hold th_base_lock. */
2345 struct event **pev;
2346 unsigned int size;
2347 struct timeval off;
2348 int i;
2350 if (use_monotonic)
2351 return;
2353 /* Check if time is running backwards */
2354 gettime(base, tv);
2356 if (evutil_timercmp(tv, &base->event_tv, >=)) {
2357 base->event_tv = *tv;
2358 return;
2361 event_debug(("%s: time is running backwards, corrected",
2362 __func__));
2363 evutil_timersub(&base->event_tv, tv, &off);
2366 * We can modify the key element of the node without destroying
2367 * the minheap property, because we change every element.
2369 pev = base->timeheap.p;
2370 size = base->timeheap.n;
2371 for (; size-- > 0; ++pev) {
2372 struct timeval *ev_tv = &(**pev).ev_timeout;
2373 evutil_timersub(ev_tv, &off, ev_tv);
2375 for (i=0; i<base->n_common_timeouts; ++i) {
2376 struct event *ev;
2377 struct common_timeout_list *ctl =
2378 base->common_timeout_queues[i];
2379 TAILQ_FOREACH(ev, &ctl->events,
2380 ev_timeout_pos.ev_next_with_common_timeout) {
2381 struct timeval *ev_tv = &ev->ev_timeout;
2382 ev_tv->tv_usec &= MICROSECONDS_MASK;
2383 evutil_timersub(ev_tv, &off, ev_tv);
2384 ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
2385 (i<<COMMON_TIMEOUT_IDX_SHIFT);
2389 /* Now remember what the new time turned out to be. */
2390 base->event_tv = *tv;
2393 /* Activate every event whose timeout has elapsed. */
2394 static void
2395 timeout_process(struct event_base *base)
2397 /* Caller must hold lock. */
2398 struct timeval now;
2399 struct event *ev;
2401 if (min_heap_empty(&base->timeheap)) {
2402 return;
2405 gettime(base, &now);
2407 while ((ev = min_heap_top(&base->timeheap))) {
2408 if (evutil_timercmp(&ev->ev_timeout, &now, >))
2409 break;
2411 /* delete this event from the I/O queues */
2412 event_del_internal(ev);
2414 event_debug(("timeout_process: call %p",
2415 ev->ev_callback));
2416 event_active_nolock(ev, EV_TIMEOUT, 1);
2420 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2421 static void
2422 event_queue_remove(struct event_base *base, struct event *ev, int queue)
2424 EVENT_BASE_ASSERT_LOCKED(base);
2426 if (!(ev->ev_flags & queue)) {
2427 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
2428 ev, ev->ev_fd, queue);
2429 return;
2432 if (~ev->ev_flags & EVLIST_INTERNAL)
2433 base->event_count--;
2435 ev->ev_flags &= ~queue;
2436 switch (queue) {
2437 case EVLIST_INSERTED:
2438 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
2439 break;
2440 case EVLIST_ACTIVE:
2441 base->event_count_active--;
2442 TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
2443 ev, ev_active_next);
2444 break;
2445 case EVLIST_TIMEOUT:
2446 if (is_common_timeout(&ev->ev_timeout, base)) {
2447 struct common_timeout_list *ctl =
2448 get_common_timeout_list(base, &ev->ev_timeout);
2449 TAILQ_REMOVE(&ctl->events, ev,
2450 ev_timeout_pos.ev_next_with_common_timeout);
2451 } else {
2452 min_heap_erase(&base->timeheap, ev);
2454 break;
2455 default:
2456 event_errx(1, "%s: unknown queue %x", __func__, queue);
2460 /* Add 'ev' to the common timeout list in 'ev'. */
2461 static void
2462 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2463 struct event *ev)
2465 struct event *e;
2466 /* By all logic, we should just be able to append 'ev' to the end of
2467 * ctl->events, since the timeout on each 'ev' is set to {the common
2468 * timeout} + {the time when we add the event}, and so the events
2469 * should arrive in order of their timeeouts. But just in case
2470 * there's some wacky threading issue going on, we do a search from
2471 * the end of 'ev' to find the right insertion point.
2473 TAILQ_FOREACH_REVERSE(e, &ctl->events,
2474 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2475 /* This timercmp is a little sneaky, since both ev and e have
2476 * magic values in tv_usec. Fortunately, they ought to have
2477 * the _same_ magic values in tv_usec. Let's assert for that.
2479 EVUTIL_ASSERT(
2480 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2481 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2482 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2483 ev_timeout_pos.ev_next_with_common_timeout);
2484 return;
2487 TAILQ_INSERT_HEAD(&ctl->events, ev,
2488 ev_timeout_pos.ev_next_with_common_timeout);
2491 static void
2492 event_queue_insert(struct event_base *base, struct event *ev, int queue)
2494 EVENT_BASE_ASSERT_LOCKED(base);
2496 if (ev->ev_flags & queue) {
2497 /* Double insertion is possible for active events */
2498 if (queue & EVLIST_ACTIVE)
2499 return;
2501 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
2502 ev, ev->ev_fd, queue);
2503 return;
2506 if (~ev->ev_flags & EVLIST_INTERNAL)
2507 base->event_count++;
2509 ev->ev_flags |= queue;
2510 switch (queue) {
2511 case EVLIST_INSERTED:
2512 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
2513 break;
2514 case EVLIST_ACTIVE:
2515 base->event_count_active++;
2516 TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
2517 ev,ev_active_next);
2518 break;
2519 case EVLIST_TIMEOUT: {
2520 if (is_common_timeout(&ev->ev_timeout, base)) {
2521 struct common_timeout_list *ctl =
2522 get_common_timeout_list(base, &ev->ev_timeout);
2523 insert_common_timeout_inorder(ctl, ev);
2524 } else
2525 min_heap_push(&base->timeheap, ev);
2526 break;
2528 default:
2529 event_errx(1, "%s: unknown queue %x", __func__, queue);
2533 /* Functions for debugging */
2535 const char *
2536 event_get_version(void)
2538 return (_EVENT_VERSION);
2541 ev_uint32_t
2542 event_get_version_number(void)
2544 return (_EVENT_NUMERIC_VERSION);
2548 * No thread-safe interface needed - the information should be the same
2549 * for all threads.
2552 const char *
2553 event_get_method(void)
2555 return (current_base->evsel->name);
2558 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2559 static void *(*_mm_malloc_fn)(size_t sz) = NULL;
2560 static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
2561 static void (*_mm_free_fn)(void *p) = NULL;
2563 void *
2564 event_mm_malloc_(size_t sz)
2566 if (_mm_malloc_fn)
2567 return _mm_malloc_fn(sz);
2568 else
2569 return malloc(sz);
2572 void *
2573 event_mm_calloc_(size_t count, size_t size)
2575 if (_mm_malloc_fn) {
2576 size_t sz = count * size;
2577 void *p = _mm_malloc_fn(sz);
2578 if (p)
2579 memset(p, 0, sz);
2580 return p;
2581 } else
2582 return calloc(count, size);
2585 char *
2586 event_mm_strdup_(const char *str)
2588 if (_mm_malloc_fn) {
2589 size_t ln = strlen(str);
2590 void *p = _mm_malloc_fn(ln+1);
2591 if (p)
2592 memcpy(p, str, ln+1);
2593 return p;
2594 } else
2595 #ifdef WIN32
2596 return _strdup(str);
2597 #else
2598 return strdup(str);
2599 #endif
2602 void *
2603 event_mm_realloc_(void *ptr, size_t sz)
2605 if (_mm_realloc_fn)
2606 return _mm_realloc_fn(ptr, sz);
2607 else
2608 return realloc(ptr, sz);
2611 void
2612 event_mm_free_(void *ptr)
2614 if (_mm_free_fn)
2615 _mm_free_fn(ptr);
2616 else
2617 free(ptr);
2620 void
2621 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
2622 void *(*realloc_fn)(void *ptr, size_t sz),
2623 void (*free_fn)(void *ptr))
2625 _mm_malloc_fn = malloc_fn;
2626 _mm_realloc_fn = realloc_fn;
2627 _mm_free_fn = free_fn;
2629 #endif
2631 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2632 static void
2633 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
2635 ev_uint64_t msg;
2636 ev_ssize_t r;
2637 struct event_base *base = arg;
2639 r = read(fd, (void*) &msg, sizeof(msg));
2640 if (r<0 && errno != EAGAIN) {
2641 event_sock_warn(fd, "Error reading from eventfd");
2643 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2644 base->is_notify_pending = 0;
2645 EVBASE_RELEASE_LOCK(base, th_base_lock);
2647 #endif
2649 static void
2650 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
2652 unsigned char buf[1024];
2653 struct event_base *base = arg;
2654 #ifdef WIN32
2655 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
2657 #else
2658 while (read(fd, (char*)buf, sizeof(buf)) > 0)
2660 #endif
2662 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2663 base->is_notify_pending = 0;
2664 EVBASE_RELEASE_LOCK(base, th_base_lock);
2668 evthread_make_base_notifiable(struct event_base *base)
2670 void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
2671 int (*notify)(struct event_base *) = evthread_notify_base_default;
2673 /* XXXX grab the lock here? */
2674 if (!base)
2675 return -1;
2677 if (base->th_notify_fd[0] >= 0)
2678 return 0;
2680 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2681 #ifndef EFD_CLOEXEC
2682 #define EFD_CLOEXEC 0
2683 #endif
2684 base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
2685 if (base->th_notify_fd[0] >= 0) {
2686 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2687 notify = evthread_notify_base_eventfd;
2688 cb = evthread_notify_drain_eventfd;
2690 #endif
2691 #if defined(_EVENT_HAVE_PIPE)
2692 if (base->th_notify_fd[0] < 0) {
2693 if ((base->evsel->features & EV_FEATURE_FDS)) {
2694 if (pipe(base->th_notify_fd) < 0) {
2695 event_warn("%s: pipe", __func__);
2696 } else {
2697 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2698 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2702 #endif
2704 #ifdef WIN32
2705 #define LOCAL_SOCKETPAIR_AF AF_INET
2706 #else
2707 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2708 #endif
2709 if (base->th_notify_fd[0] < 0) {
2710 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
2711 base->th_notify_fd) == -1) {
2712 event_sock_warn(-1, "%s: socketpair", __func__);
2713 return (-1);
2714 } else {
2715 evutil_make_socket_closeonexec(base->th_notify_fd[0]);
2716 evutil_make_socket_closeonexec(base->th_notify_fd[1]);
2720 evutil_make_socket_nonblocking(base->th_notify_fd[0]);
2722 base->th_notify_fn = notify;
2725 Making the second socket nonblocking is a bit subtle, given that we
2726 ignore any EAGAIN returns when writing to it, and you don't usally
2727 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2728 then there's no need to add any more data to the buffer, since
2729 the main thread is already either about to wake up and drain it,
2730 or woken up and in the process of draining it.
2732 if (base->th_notify_fd[1] > 0)
2733 evutil_make_socket_nonblocking(base->th_notify_fd[1]);
2735 /* prepare an event that we can use for wakeup */
2736 event_assign(&base->th_notify, base, base->th_notify_fd[0],
2737 EV_READ|EV_PERSIST, cb, base);
2739 /* we need to mark this as internal event */
2740 base->th_notify.ev_flags |= EVLIST_INTERNAL;
2741 event_priority_set(&base->th_notify, 0);
2743 return event_add(&base->th_notify, NULL);
2746 void
2747 event_base_dump_events(struct event_base *base, FILE *output)
2749 struct event *e;
2750 int i;
2751 fprintf(output, "Inserted events:\n");
2752 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2753 fprintf(output, " %p [fd %ld]%s%s%s%s%s\n",
2754 (void*)e, (long)e->ev_fd,
2755 (e->ev_events&EV_READ)?" Read":"",
2756 (e->ev_events&EV_WRITE)?" Write":"",
2757 (e->ev_events&EV_SIGNAL)?" Signal":"",
2758 (e->ev_events&EV_TIMEOUT)?" Timeout":"",
2759 (e->ev_events&EV_PERSIST)?" Persist":"");
2762 for (i = 0; i < base->nactivequeues; ++i) {
2763 if (TAILQ_EMPTY(&base->activequeues[i]))
2764 continue;
2765 fprintf(output, "Active events [priority %d]:\n", i);
2766 TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
2767 fprintf(output, " %p [fd %ld]%s%s%s%s\n",
2768 (void*)e, (long)e->ev_fd,
2769 (e->ev_res&EV_READ)?" Read active":"",
2770 (e->ev_res&EV_WRITE)?" Write active":"",
2771 (e->ev_res&EV_SIGNAL)?" Signal active":"",
2772 (e->ev_res&EV_TIMEOUT)?" Timeout active":"");
2777 void
2778 event_base_add_virtual(struct event_base *base)
2780 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2781 base->virtual_event_count++;
2782 EVBASE_RELEASE_LOCK(base, th_base_lock);
2785 void
2786 event_base_del_virtual(struct event_base *base)
2788 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2789 EVUTIL_ASSERT(base->virtual_event_count > 0);
2790 base->virtual_event_count--;
2791 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
2792 evthread_notify_base(base);
2793 EVBASE_RELEASE_LOCK(base, th_base_lock);