2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
30 #define WIN32_LEAN_AND_MEAN
32 #undef WIN32_LEAN_AND_MEAN
34 #include <sys/types.h>
35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
38 #include <sys/queue.h>
49 #include "event-internal.h"
50 #include "evmap-internal.h"
51 #include "mm-internal.h"
52 #include "changelist-internal.h"
54 /** An entry for an evmap_io list: notes all the events that want to read or
55 write on a given fd, and the number of each.
58 struct event_list events
;
63 /* An entry for an evmap_signal list: notes all the events that want to know
64 when a signal triggers. */
66 struct event_list events
;
69 /* On some platforms, fds start at 0 and increment by 1 as they are
70 allocated, and old numbers get used. For these platforms, we
71 implement io maps just like signal maps: as an array of pointers to
72 struct evmap_io. But on other platforms (windows), sockets are not
73 0-indexed, not necessarily consecutive, and not necessarily reused.
74 There, we use a hashtable to implement evmap_io.
77 struct event_map_entry
{
78 HT_ENTRY(event_map_entry
) map_node
;
80 union { /* This is a union in case we need to make more things that can
81 be in the hashtable. */
82 struct evmap_io evmap_io
;
86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
87 * of the fd in e->fd. */
88 static inline unsigned
89 hashsocket(struct event_map_entry
*e
)
91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
92 * matter. Our hashtable implementation really likes low-order bits,
93 * though, so let's do the rotate-and-add trick. */
94 unsigned h
= (unsigned) e
->fd
;
95 h
+= (h
>> 2) | (h
<< 30);
99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
100 * have the same e->fd. */
102 eqsocket(struct event_map_entry
*e1
, struct event_map_entry
*e2
)
104 return e1
->fd
== e2
->fd
;
107 HT_PROTOTYPE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
)
108 HT_GENERATE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
,
109 0.5, mm_malloc
, mm_realloc
, mm_free
)
111 #define GET_IO_SLOT(x, map, slot, type) \
113 struct event_map_entry _key, *_ent; \
115 _ent = HT_FIND(event_io_map, map, &_key); \
116 (x) = _ent ? &_ent->ent.type : NULL; \
119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
121 struct event_map_entry _key, *_ent; \
123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
124 event_map_entry, &_key, ptr, \
129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
130 if (EVUTIL_UNLIKELY(_ent == NULL)) \
133 (ctor)(&_ent->ent.type); \
134 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
136 (x) = &_ent->ent.type; \
139 void evmap_io_initmap(struct event_io_map
*ctx
)
141 HT_INIT(event_io_map
, ctx
);
144 void evmap_io_clear(struct event_io_map
*ctx
)
146 struct event_map_entry
**ent
, **next
, *this;
147 for (ent
= HT_START(event_io_map
, ctx
); ent
; ent
= next
) {
149 next
= HT_NEXT_RMV(event_io_map
, ctx
, ent
);
152 HT_CLEAR(event_io_map
, ctx
); /* remove all storage held by the ctx. */
156 /* Set the variable 'x' to the field in event_map 'map' with fields of type
157 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
158 if there are no entries for 'slot'. Does no bounds-checking. */
159 #define GET_SIGNAL_SLOT(x, map, slot, type) \
160 (x) = (struct type *)((map)->entries[slot])
161 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
162 by allocating enough memory for a 'struct type', and initializing the new
163 value by calling the function 'ctor' on it. Makes the function
164 return -1 on allocation failure.
166 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
168 if ((map)->entries[slot] == NULL) { \
169 (map)->entries[slot] = \
170 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
171 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
173 (ctor)((struct type *)(map)->entries[slot]); \
175 (x) = (struct type *)((map)->entries[slot]); \
178 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
179 as thin aliases over the SIGNAL_SLOT versions. */
181 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
182 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
183 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
184 #define FDINFO_OFFSET sizeof(struct evmap_io)
186 evmap_io_initmap(struct event_io_map
* ctx
)
188 evmap_signal_initmap(ctx
);
191 evmap_io_clear(struct event_io_map
* ctx
)
193 evmap_signal_clear(ctx
);
198 /** Expand 'map' with new entries of width 'msize' until it is big enough
199 to store a value in 'slot'.
202 evmap_make_space(struct event_signal_map
*map
, int slot
, int msize
)
204 if (map
->nentries
<= slot
) {
205 int nentries
= map
->nentries
? map
->nentries
: 32;
208 while (nentries
<= slot
)
211 tmp
= (void **)mm_realloc(map
->entries
, nentries
* msize
);
215 memset(&tmp
[map
->nentries
], 0,
216 (nentries
- map
->nentries
) * msize
);
218 map
->nentries
= nentries
;
226 evmap_signal_initmap(struct event_signal_map
*ctx
)
233 evmap_signal_clear(struct event_signal_map
*ctx
)
235 if (ctx
->entries
!= NULL
) {
237 for (i
= 0; i
< ctx
->nentries
; ++i
) {
238 if (ctx
->entries
[i
] != NULL
)
239 mm_free(ctx
->entries
[i
]);
241 mm_free(ctx
->entries
);
248 /* code specific to file descriptors */
250 /** Constructor for struct evmap_io */
252 evmap_io_init(struct evmap_io
*entry
)
254 TAILQ_INIT(&entry
->events
);
260 /* return -1 on error, 0 on success if nothing changed in the event backend,
261 * and 1 on success if something did. */
263 evmap_io_add(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
265 const struct eventop
*evsel
= base
->evsel
;
266 struct event_io_map
*io
= &base
->io
;
267 struct evmap_io
*ctx
= NULL
;
268 int nread
, nwrite
, retval
= 0;
269 short res
= 0, old
= 0;
270 struct event
*old_ev
;
272 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
278 if (fd
>= io
->nentries
) {
279 if (evmap_make_space(io
, fd
, sizeof(struct evmap_io
*)) == -1)
283 GET_IO_SLOT_AND_CTOR(ctx
, io
, fd
, evmap_io
, evmap_io_init
,
287 nwrite
= ctx
->nwrite
;
294 if (ev
->ev_events
& EV_READ
) {
298 if (ev
->ev_events
& EV_WRITE
) {
302 if (EVUTIL_UNLIKELY(nread
> 0xffff || nwrite
> 0xffff)) {
303 event_warnx("Too many events reading or writing on fd %d",
307 if (EVENT_DEBUG_MODE_IS_ON() &&
308 (old_ev
= TAILQ_FIRST(&ctx
->events
)) &&
309 (old_ev
->ev_events
&EV_ET
) != (ev
->ev_events
&EV_ET
)) {
310 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
311 " events on fd %d", (int)fd
);
316 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
317 /* XXX(niels): we cannot mix edge-triggered and
318 * level-triggered, we should probably assert on
320 if (evsel
->add(base
, ev
->ev_fd
,
321 old
, (ev
->ev_events
& EV_ET
) | res
, extra
) == -1)
326 ctx
->nread
= (ev_uint16_t
) nread
;
327 ctx
->nwrite
= (ev_uint16_t
) nwrite
;
328 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_io_next
);
333 /* return -1 on error, 0 on success if nothing changed in the event backend,
334 * and 1 on success if something did. */
336 evmap_io_del(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
338 const struct eventop
*evsel
= base
->evsel
;
339 struct event_io_map
*io
= &base
->io
;
340 struct evmap_io
*ctx
;
341 int nread
, nwrite
, retval
= 0;
342 short res
= 0, old
= 0;
347 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
350 if (fd
>= io
->nentries
)
354 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
357 nwrite
= ctx
->nwrite
;
364 if (ev
->ev_events
& EV_READ
) {
367 EVUTIL_ASSERT(nread
>= 0);
369 if (ev
->ev_events
& EV_WRITE
) {
372 EVUTIL_ASSERT(nwrite
>= 0);
376 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
377 if (evsel
->del(base
, ev
->ev_fd
, old
, res
, extra
) == -1)
383 ctx
->nwrite
= nwrite
;
384 TAILQ_REMOVE(&ctx
->events
, ev
, ev_io_next
);
390 evmap_io_active(struct event_base
*base
, evutil_socket_t fd
, short events
)
392 struct event_io_map
*io
= &base
->io
;
393 struct evmap_io
*ctx
;
397 EVUTIL_ASSERT(fd
< io
->nentries
);
399 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
402 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
403 if (ev
->ev_events
& events
)
404 event_active_nolock(ev
, ev
->ev_events
& events
, 1);
408 /* code specific to signals */
411 evmap_signal_init(struct evmap_signal
*entry
)
413 TAILQ_INIT(&entry
->events
);
418 evmap_signal_add(struct event_base
*base
, int sig
, struct event
*ev
)
420 const struct eventop
*evsel
= base
->evsigsel
;
421 struct event_signal_map
*map
= &base
->sigmap
;
422 struct evmap_signal
*ctx
= NULL
;
424 if (sig
>= map
->nentries
) {
425 if (evmap_make_space(
426 map
, sig
, sizeof(struct evmap_signal
*)) == -1)
429 GET_SIGNAL_SLOT_AND_CTOR(ctx
, map
, sig
, evmap_signal
, evmap_signal_init
,
430 base
->evsigsel
->fdinfo_len
);
432 if (TAILQ_EMPTY(&ctx
->events
)) {
433 if (evsel
->add(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
)
438 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_signal_next
);
444 evmap_signal_del(struct event_base
*base
, int sig
, struct event
*ev
)
446 const struct eventop
*evsel
= base
->evsigsel
;
447 struct event_signal_map
*map
= &base
->sigmap
;
448 struct evmap_signal
*ctx
;
450 if (sig
>= map
->nentries
)
453 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
455 if (TAILQ_FIRST(&ctx
->events
) == TAILQ_LAST(&ctx
->events
, event_list
)) {
456 if (evsel
->del(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
) == -1)
460 TAILQ_REMOVE(&ctx
->events
, ev
, ev_signal_next
);
466 evmap_signal_active(struct event_base
*base
, evutil_socket_t sig
, int ncalls
)
468 struct event_signal_map
*map
= &base
->sigmap
;
469 struct evmap_signal
*ctx
;
472 EVUTIL_ASSERT(sig
< map
->nentries
);
473 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
475 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
)
476 event_active_nolock(ev
, EV_SIGNAL
, ncalls
);
480 evmap_io_get_fdinfo(struct event_io_map
*map
, evutil_socket_t fd
)
482 struct evmap_io
*ctx
;
483 GET_IO_SLOT(ctx
, map
, fd
, evmap_io
);
485 return ((char*)ctx
) + sizeof(struct evmap_io
);
490 /** Per-fd structure for use with changelists. It keeps track, for each fd or
491 * signal using the changelist, of where its entry in the changelist is.
493 struct event_changelist_fdinfo
{
494 int idxplus1
; /* this is the index +1, so that memset(0) will make it
495 * a no-such-element */
499 event_changelist_init(struct event_changelist
*changelist
)
501 changelist
->changes
= NULL
;
502 changelist
->changes_size
= 0;
503 changelist
->n_changes
= 0;
506 /** Helper: return the changelist_fdinfo corresponding to a given change. */
507 static inline struct event_changelist_fdinfo
*
508 event_change_get_fdinfo(struct event_base
*base
,
509 const struct event_change
*change
)
512 if (change
->read_change
& EV_CHANGE_SIGNAL
) {
513 struct evmap_signal
*ctx
;
514 GET_SIGNAL_SLOT(ctx
, &base
->sigmap
, change
->fd
, evmap_signal
);
515 ptr
= ((char*)ctx
) + sizeof(struct evmap_signal
);
517 struct evmap_io
*ctx
;
518 GET_IO_SLOT(ctx
, &base
->io
, change
->fd
, evmap_io
);
519 ptr
= ((char*)ctx
) + sizeof(struct evmap_io
);
524 #ifdef DEBUG_CHANGELIST
525 /** Make sure that the changelist is consistent with the evmap structures. */
527 event_changelist_check(struct event_base
*base
)
530 struct event_changelist
*changelist
= &base
->changelist
;
532 EVUTIL_ASSERT(changelist
->changes_size
>= changelist
->n_changes
);
533 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
534 struct event_change
*c
= &changelist
->changes
[i
];
535 struct event_changelist_fdinfo
*f
;
536 EVUTIL_ASSERT(c
->fd
>= 0);
537 f
= event_change_get_fdinfo(base
, c
);
539 EVUTIL_ASSERT(f
->idxplus1
== i
+ 1);
542 for (i
= 0; i
< base
->io
.nentries
; ++i
) {
543 struct evmap_io
*io
= base
->io
.entries
[i
];
544 struct event_changelist_fdinfo
*f
;
548 ( ((char*)io
) + sizeof(struct evmap_io
) );
550 struct event_change
*c
= &changelist
->changes
[f
->idxplus1
- 1];
551 EVUTIL_ASSERT(c
->fd
== i
);
556 #define event_changelist_check(base) ((void)0)
560 event_changelist_remove_all(struct event_changelist
*changelist
,
561 struct event_base
*base
)
565 event_changelist_check(base
);
567 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
568 struct event_change
*ch
= &changelist
->changes
[i
];
569 struct event_changelist_fdinfo
*fdinfo
=
570 event_change_get_fdinfo(base
, ch
);
571 EVUTIL_ASSERT(fdinfo
->idxplus1
== i
+ 1);
572 fdinfo
->idxplus1
= 0;
575 changelist
->n_changes
= 0;
577 event_changelist_check(base
);
581 event_changelist_freemem(struct event_changelist
*changelist
)
583 if (changelist
->changes
)
584 mm_free(changelist
->changes
);
585 event_changelist_init(changelist
); /* zero it all out. */
588 /** Increase the size of 'changelist' to hold more changes. */
590 event_changelist_grow(struct event_changelist
*changelist
)
593 struct event_change
*new_changes
;
594 if (changelist
->changes_size
< 64)
597 new_size
= changelist
->changes_size
* 2;
599 new_changes
= mm_realloc(changelist
->changes
,
600 new_size
* sizeof(struct event_change
));
602 if (EVUTIL_UNLIKELY(new_changes
== NULL
))
605 changelist
->changes
= new_changes
;
606 changelist
->changes_size
= new_size
;
611 /** Return a pointer to the changelist entry for the file descriptor or signal
612 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
613 * old_events field to old_events.
615 static struct event_change
*
616 event_changelist_get_or_construct(struct event_changelist
*changelist
,
619 struct event_changelist_fdinfo
*fdinfo
)
621 struct event_change
*change
;
623 if (fdinfo
->idxplus1
== 0) {
625 EVUTIL_ASSERT(changelist
->n_changes
<= changelist
->changes_size
);
627 if (changelist
->n_changes
== changelist
->changes_size
) {
628 if (event_changelist_grow(changelist
) < 0)
632 idx
= changelist
->n_changes
++;
633 change
= &changelist
->changes
[idx
];
634 fdinfo
->idxplus1
= idx
+ 1;
636 memset(change
, 0, sizeof(struct event_change
));
638 change
->old_events
= old_events
;
640 change
= &changelist
->changes
[fdinfo
->idxplus1
- 1];
641 EVUTIL_ASSERT(change
->fd
== fd
);
647 event_changelist_add(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
650 struct event_changelist
*changelist
= &base
->changelist
;
651 struct event_changelist_fdinfo
*fdinfo
= p
;
652 struct event_change
*change
;
654 event_changelist_check(base
);
656 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
660 /* An add replaces any previous delete, but doesn't result in a no-op,
661 * since the delete might fail (because the fd had been closed since
662 * the last add, for instance. */
664 if (events
& (EV_READ
|EV_SIGNAL
)) {
665 change
->read_change
= EV_CHANGE_ADD
|
666 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
668 if (events
& EV_WRITE
) {
669 change
->write_change
= EV_CHANGE_ADD
|
670 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
673 event_changelist_check(base
);
678 event_changelist_del(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
681 struct event_changelist
*changelist
= &base
->changelist
;
682 struct event_changelist_fdinfo
*fdinfo
= p
;
683 struct event_change
*change
;
685 event_changelist_check(base
);
686 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
687 event_changelist_check(base
);
691 /* A delete removes any previous add, rather than replacing it:
692 on those platforms where "add, delete, dispatch" is not the same
693 as "no-op, dispatch", we want the no-op behavior.
695 As well as checking the current operation we should also check
696 the original set of events to make sure were not ignoring
697 the case where the add operation is present on an event that
700 If we have a no-op item, we could remove it it from the list
701 entirely, but really there's not much point: skipping the no-op
702 change when we do the dispatch later is far cheaper than rejuggling
705 As this stands, it also lets through deletions of events that are
709 if (events
& (EV_READ
|EV_SIGNAL
)) {
710 if (!(change
->old_events
& (EV_READ
| EV_SIGNAL
)) &&
711 (change
->read_change
& EV_CHANGE_ADD
))
712 change
->read_change
= 0;
714 change
->read_change
= EV_CHANGE_DEL
;
716 if (events
& EV_WRITE
) {
717 if (!(change
->old_events
& EV_WRITE
) &&
718 (change
->write_change
& EV_CHANGE_ADD
))
719 change
->write_change
= 0;
721 change
->write_change
= EV_CHANGE_DEL
;
724 event_changelist_check(base
);
729 evmap_check_integrity(struct event_base
*base
)
731 #define EVLIST_X_SIGFOUND 0x1000
732 #define EVLIST_X_IOFOUND 0x2000
736 struct event_io_map
*io
= &base
->io
;
737 struct event_signal_map
*sigmap
= &base
->sigmap
;
739 struct event_map_entry
**mapent
;
741 int nsignals
, ntimers
, nio
;
742 nsignals
= ntimers
= nio
= 0;
744 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
745 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_INSERTED
);
746 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_INIT
);
747 ev
->ev_flags
&= ~(EVLIST_X_SIGFOUND
|EVLIST_X_IOFOUND
);
751 HT_FOREACH(mapent
, event_io_map
, io
) {
752 struct evmap_io
*ctx
= &(*mapent
)->ent
.evmap_io
;
755 for (i
= 0; i
< io
->nentries
; ++i
) {
756 struct evmap_io
*ctx
= io
->entries
[i
];
762 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
763 EVUTIL_ASSERT(!(ev
->ev_flags
& EVLIST_X_IOFOUND
));
764 EVUTIL_ASSERT(ev
->ev_fd
== i
);
765 ev
->ev_flags
|= EVLIST_X_IOFOUND
;
770 for (i
= 0; i
< sigmap
->nentries
; ++i
) {
771 struct evmap_signal
*ctx
= sigmap
->entries
[i
];
775 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
) {
776 EVUTIL_ASSERT(!(ev
->ev_flags
& EVLIST_X_SIGFOUND
));
777 EVUTIL_ASSERT(ev
->ev_fd
== i
);
778 ev
->ev_flags
|= EVLIST_X_SIGFOUND
;
783 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
784 if (ev
->ev_events
& (EV_READ
|EV_WRITE
)) {
785 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_X_IOFOUND
);
788 if (ev
->ev_events
& EV_SIGNAL
) {
789 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_X_SIGFOUND
);
794 EVUTIL_ASSERT(nio
== 0);
795 EVUTIL_ASSERT(nsignals
== 0);
796 /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
797 * pending signals and io events.