2 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
30 #define WIN32_LEAN_AND_MEAN
32 #undef WIN32_LEAN_AND_MEAN
34 #include <sys/types.h>
35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
38 #include <sys/queue.h>
49 #include "event-internal.h"
50 #include "evmap-internal.h"
51 #include "mm-internal.h"
52 #include "changelist-internal.h"
54 /** An entry for an evmap_io list: notes all the events that want to read or
55 write on a given fd, and the number of each.
58 struct event_list events
;
63 /* An entry for an evmap_signal list: notes all the events that want to know
64 when a signal triggers. */
66 struct event_list events
;
69 /* On some platforms, fds start at 0 and increment by 1 as they are
70 allocated, and old numbers get used. For these platforms, we
71 implement io maps just like signal maps: as an array of pointers to
72 struct evmap_io. But on other platforms (windows), sockets are not
73 0-indexed, not necessarily consecutive, and not necessarily reused.
74 There, we use a hashtable to implement evmap_io.
77 struct event_map_entry
{
78 HT_ENTRY(event_map_entry
) map_node
;
80 union { /* This is a union in case we need to make more things that can
81 be in the hashtable. */
82 struct evmap_io evmap_io
;
86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
87 * of the fd in e->fd. */
88 static inline unsigned
89 hashsocket(struct event_map_entry
*e
)
91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
92 * matter. Our hashtable implementation really likes low-order bits,
93 * though, so let's do the rotate-and-add trick. */
94 unsigned h
= (unsigned) e
->fd
;
95 h
+= (h
>> 2) | (h
<< 30);
99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
100 * have the same e->fd. */
102 eqsocket(struct event_map_entry
*e1
, struct event_map_entry
*e2
)
104 return e1
->fd
== e2
->fd
;
107 HT_PROTOTYPE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
)
108 HT_GENERATE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
,
109 0.5, mm_malloc
, mm_realloc
, mm_free
)
111 #define GET_IO_SLOT(x, map, slot, type) \
113 struct event_map_entry _key, *_ent; \
115 _ent = HT_FIND(event_io_map, map, &_key); \
116 (x) = _ent ? &_ent->ent.type : NULL; \
119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
121 struct event_map_entry _key, *_ent; \
123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
124 event_map_entry, &_key, ptr, \
129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
130 EVUTIL_ASSERT(_ent); \
132 (ctor)(&_ent->ent.type); \
133 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
135 (x) = &_ent->ent.type; \
138 void evmap_io_initmap(struct event_io_map
*ctx
)
140 HT_INIT(event_io_map
, ctx
);
143 void evmap_io_clear(struct event_io_map
*ctx
)
145 struct event_map_entry
**ent
, **next
, *this;
146 for (ent
= HT_START(event_io_map
, ctx
); ent
; ent
= next
) {
148 next
= HT_NEXT_RMV(event_io_map
, ctx
, ent
);
154 /* Set the variable 'x' to the field in event_map 'map' with fields of type
155 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
156 if there are no entries for 'slot'. Does no bounds-checking. */
157 #define GET_SIGNAL_SLOT(x, map, slot, type) \
158 (x) = (struct type *)((map)->entries[slot])
159 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
160 by allocating enough memory for a 'struct type', and initializing the new
161 value by calling the function 'ctor' on it.
163 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
165 if ((map)->entries[slot] == NULL) { \
166 EVUTIL_ASSERT(ctor != NULL); \
167 (map)->entries[slot] = \
168 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
169 EVUTIL_ASSERT((map)->entries[slot] != NULL); \
170 (ctor)((struct type *)(map)->entries[slot]); \
172 (x) = (struct type *)((map)->entries[slot]); \
175 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
176 as thin aliases over the SIGNAL_SLOT versions. */
178 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
179 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
180 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
181 #define FDINFO_OFFSET sizeof(struct evmap_io)
183 evmap_io_initmap(struct event_io_map
* ctx
)
185 evmap_signal_initmap(ctx
);
188 evmap_io_clear(struct event_io_map
* ctx
)
190 evmap_signal_clear(ctx
);
195 /** Expand 'map' with new entries of width 'msize' until it is big enough
196 to store a value in 'slot'.
199 evmap_make_space(struct event_signal_map
*map
, int slot
, int msize
)
201 if (map
->nentries
<= slot
) {
202 int nentries
= map
->nentries
? map
->nentries
: 32;
205 while (nentries
<= slot
)
208 tmp
= (void **)mm_realloc(map
->entries
, nentries
* msize
);
212 memset(&tmp
[map
->nentries
], 0,
213 (nentries
- map
->nentries
) * msize
);
215 map
->nentries
= nentries
;
223 evmap_signal_initmap(struct event_signal_map
*ctx
)
230 evmap_signal_clear(struct event_signal_map
*ctx
)
232 if (ctx
->entries
!= NULL
) {
234 for (i
= 0; i
< ctx
->nentries
; ++i
) {
235 if (ctx
->entries
[i
] != NULL
)
236 mm_free(ctx
->entries
[i
]);
238 mm_free(ctx
->entries
);
245 /* code specific to file descriptors */
247 /** Constructor for struct evmap_io */
249 evmap_io_init(struct evmap_io
*entry
)
251 TAILQ_INIT(&entry
->events
);
257 /* return -1 on error, 0 on success if nothing changed in the event backend,
258 * and 1 on success if something did. */
260 evmap_io_add(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
262 const struct eventop
*evsel
= base
->evsel
;
263 struct event_io_map
*io
= &base
->io
;
264 struct evmap_io
*ctx
= NULL
;
265 int nread
, nwrite
, retval
= 0;
266 short res
= 0, old
= 0;
267 struct event
*old_ev
;
269 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
275 if (fd
>= io
->nentries
) {
276 if (evmap_make_space(io
, fd
, sizeof(struct evmap_io
*)) == -1)
280 GET_IO_SLOT_AND_CTOR(ctx
, io
, fd
, evmap_io
, evmap_io_init
,
284 nwrite
= ctx
->nwrite
;
291 if (ev
->ev_events
& EV_READ
) {
295 if (ev
->ev_events
& EV_WRITE
) {
299 if (EVUTIL_UNLIKELY(nread
> 0xffff || nwrite
> 0xffff)) {
300 event_warnx("Too many events reading or writing on fd %d",
304 if (EVENT_DEBUG_MODE_IS_ON() &&
305 (old_ev
= TAILQ_FIRST(&ctx
->events
)) &&
306 (old_ev
->ev_events
&EV_ET
) != (ev
->ev_events
&EV_ET
)) {
307 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
308 " events on fd %d", (int)fd
);
313 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
314 /* XXX(niels): we cannot mix edge-triggered and
315 * level-triggered, we should probably assert on
317 if (evsel
->add(base
, ev
->ev_fd
,
318 old
, (ev
->ev_events
& EV_ET
) | res
, extra
) == -1)
323 ctx
->nread
= (ev_uint16_t
) nread
;
324 ctx
->nwrite
= (ev_uint16_t
) nwrite
;
325 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_io_next
);
330 /* return -1 on error, 0 on success if nothing changed in the event backend,
331 * and 1 on success if something did. */
333 evmap_io_del(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
335 const struct eventop
*evsel
= base
->evsel
;
336 struct event_io_map
*io
= &base
->io
;
337 struct evmap_io
*ctx
;
338 int nread
, nwrite
, retval
= 0;
339 short res
= 0, old
= 0;
344 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
347 if (fd
>= io
->nentries
)
351 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
354 nwrite
= ctx
->nwrite
;
361 if (ev
->ev_events
& EV_READ
) {
364 EVUTIL_ASSERT(nread
>= 0);
366 if (ev
->ev_events
& EV_WRITE
) {
369 EVUTIL_ASSERT(nwrite
>= 0);
373 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
374 if (evsel
->del(base
, ev
->ev_fd
, old
, res
, extra
) == -1)
380 ctx
->nwrite
= nwrite
;
381 TAILQ_REMOVE(&ctx
->events
, ev
, ev_io_next
);
387 evmap_io_active(struct event_base
*base
, evutil_socket_t fd
, short events
)
389 struct event_io_map
*io
= &base
->io
;
390 struct evmap_io
*ctx
;
394 EVUTIL_ASSERT(fd
< io
->nentries
);
396 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
399 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
400 if (ev
->ev_events
& events
)
401 event_active_nolock(ev
, ev
->ev_events
& events
, 1);
405 /* code specific to signals */
408 evmap_signal_init(struct evmap_signal
*entry
)
410 TAILQ_INIT(&entry
->events
);
415 evmap_signal_add(struct event_base
*base
, int sig
, struct event
*ev
)
417 const struct eventop
*evsel
= base
->evsigsel
;
418 struct event_signal_map
*map
= &base
->sigmap
;
419 struct evmap_signal
*ctx
= NULL
;
421 if (sig
>= map
->nentries
) {
422 if (evmap_make_space(
423 map
, sig
, sizeof(struct evmap_signal
*)) == -1)
426 GET_SIGNAL_SLOT_AND_CTOR(ctx
, map
, sig
, evmap_signal
, evmap_signal_init
,
427 base
->evsigsel
->fdinfo_len
);
429 if (TAILQ_EMPTY(&ctx
->events
)) {
430 if (evsel
->add(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
)
435 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_signal_next
);
441 evmap_signal_del(struct event_base
*base
, int sig
, struct event
*ev
)
443 const struct eventop
*evsel
= base
->evsigsel
;
444 struct event_signal_map
*map
= &base
->sigmap
;
445 struct evmap_signal
*ctx
;
447 if (sig
>= map
->nentries
)
450 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
452 if (TAILQ_FIRST(&ctx
->events
) == TAILQ_LAST(&ctx
->events
, event_list
)) {
453 if (evsel
->del(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
) == -1)
457 TAILQ_REMOVE(&ctx
->events
, ev
, ev_signal_next
);
463 evmap_signal_active(struct event_base
*base
, evutil_socket_t sig
, int ncalls
)
465 struct event_signal_map
*map
= &base
->sigmap
;
466 struct evmap_signal
*ctx
;
469 EVUTIL_ASSERT(sig
< map
->nentries
);
470 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
472 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
)
473 event_active_nolock(ev
, EV_SIGNAL
, ncalls
);
477 evmap_io_get_fdinfo(struct event_io_map
*map
, evutil_socket_t fd
)
479 struct evmap_io
*ctx
;
480 GET_IO_SLOT(ctx
, map
, fd
, evmap_io
);
482 return ((char*)ctx
) + sizeof(struct evmap_io
);
487 /** Per-fd structure for use with changelists. It keeps track, for each fd or
488 * signal using the changelist, of where its entry in the changelist is.
490 struct event_changelist_fdinfo
{
491 int idxplus1
; /* this is the index +1, so that memset(0) will make it
492 * a no-such-element */
496 event_changelist_init(struct event_changelist
*changelist
)
498 changelist
->changes
= NULL
;
499 changelist
->changes_size
= 0;
500 changelist
->n_changes
= 0;
503 /** Helper: return the changelist_fdinfo corresponding to a given change. */
504 static inline struct event_changelist_fdinfo
*
505 event_change_get_fdinfo(struct event_base
*base
,
506 const struct event_change
*change
)
509 if (change
->read_change
& EV_CHANGE_SIGNAL
) {
510 struct evmap_signal
*ctx
;
511 GET_SIGNAL_SLOT(ctx
, &base
->sigmap
, change
->fd
, evmap_signal
);
512 ptr
= ((char*)ctx
) + sizeof(struct evmap_signal
);
514 struct evmap_io
*ctx
;
515 GET_IO_SLOT(ctx
, &base
->io
, change
->fd
, evmap_io
);
516 ptr
= ((char*)ctx
) + sizeof(struct evmap_io
);
521 #ifdef DEBUG_CHANGELIST
522 /** Make sure that the changelist is consistent with the evmap structures. */
524 event_changelist_check(struct event_base
*base
)
527 struct event_changelist
*changelist
= &base
->changelist
;
529 EVUTIL_ASSERT(changelist
->changes_size
>= changelist
->n_changes
);
530 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
531 struct event_change
*c
= &changelist
->changes
[i
];
532 struct event_changelist_fdinfo
*f
;
533 EVUTIL_ASSERT(c
->fd
>= 0);
534 f
= event_change_get_fdinfo(base
, c
);
536 EVUTIL_ASSERT(f
->idxplus1
== i
+ 1);
539 for (i
= 0; i
< base
->io
.nentries
; ++i
) {
540 struct evmap_io
*io
= base
->io
.entries
[i
];
541 struct event_changelist_fdinfo
*f
;
545 ( ((char*)io
) + sizeof(struct evmap_io
) );
547 struct event_change
*c
= &changelist
->changes
[f
->idxplus1
- 1];
548 EVUTIL_ASSERT(c
->fd
== i
);
553 #define event_changelist_check(base) ((void)0)
557 event_changelist_remove_all(struct event_changelist
*changelist
,
558 struct event_base
*base
)
562 event_changelist_check(base
);
564 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
565 struct event_change
*ch
= &changelist
->changes
[i
];
566 struct event_changelist_fdinfo
*fdinfo
=
567 event_change_get_fdinfo(base
, ch
);
568 EVUTIL_ASSERT(fdinfo
->idxplus1
== i
+ 1);
569 fdinfo
->idxplus1
= 0;
572 changelist
->n_changes
= 0;
574 event_changelist_check(base
);
578 event_changelist_freemem(struct event_changelist
*changelist
)
580 if (changelist
->changes
)
581 mm_free(changelist
->changes
);
582 event_changelist_init(changelist
); /* zero it all out. */
585 /** Increase the size of 'changelist' to hold more changes. */
587 event_changelist_grow(struct event_changelist
*changelist
)
590 struct event_change
*new_changes
;
591 if (changelist
->changes_size
< 64)
594 new_size
= changelist
->changes_size
* 2;
596 new_changes
= mm_realloc(changelist
->changes
,
597 new_size
* sizeof(struct event_change
));
599 if (EVUTIL_UNLIKELY(new_changes
== NULL
))
602 changelist
->changes
= new_changes
;
603 changelist
->changes_size
= new_size
;
608 /** Return a pointer to the changelist entry for the file descriptor or signal
609 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
610 * old_events field to old_events.
612 static struct event_change
*
613 event_changelist_get_or_construct(struct event_changelist
*changelist
,
616 struct event_changelist_fdinfo
*fdinfo
)
618 struct event_change
*change
;
620 if (fdinfo
->idxplus1
== 0) {
622 EVUTIL_ASSERT(changelist
->n_changes
<= changelist
->changes_size
);
624 if (changelist
->n_changes
== changelist
->changes_size
) {
625 if (event_changelist_grow(changelist
) < 0)
629 idx
= changelist
->n_changes
++;
630 change
= &changelist
->changes
[idx
];
631 fdinfo
->idxplus1
= idx
+ 1;
633 memset(change
, 0, sizeof(struct event_change
));
635 change
->old_events
= old_events
;
637 change
= &changelist
->changes
[fdinfo
->idxplus1
- 1];
638 EVUTIL_ASSERT(change
->fd
== fd
);
644 event_changelist_add(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
647 struct event_changelist
*changelist
= &base
->changelist
;
648 struct event_changelist_fdinfo
*fdinfo
= p
;
649 struct event_change
*change
;
651 event_changelist_check(base
);
653 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
657 /* An add replaces any previous delete, but doesn't result in a no-op,
658 * since the delete might fail (because the fd had been closed since
659 * the last add, for instance. */
661 if (events
& (EV_READ
|EV_SIGNAL
)) {
662 change
->read_change
= EV_CHANGE_ADD
|
663 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
665 if (events
& EV_WRITE
) {
666 change
->write_change
= EV_CHANGE_ADD
|
667 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
670 event_changelist_check(base
);
675 event_changelist_del(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
678 struct event_changelist
*changelist
= &base
->changelist
;
679 struct event_changelist_fdinfo
*fdinfo
= p
;
680 struct event_change
*change
;
682 event_changelist_check(base
);
683 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
684 event_changelist_check(base
);
688 /* A delete removes any previous add, rather than replacing it:
689 on those platforms where "add, delete, dispatch" is not the same
690 as "no-op, dispatch", we want the no-op behavior.
692 As well as checking the current operation we should also check
693 the original set of events to make sure were not ignoring
694 the case where the add operation is present on an event that
697 If we have a no-op item, we could remove it it from the list
698 entirely, but really there's not much point: skipping the no-op
699 change when we do the dispatch later is far cheaper than rejuggling
702 As this stands, it also lets through deletions of events that are
706 if (events
& (EV_READ
|EV_SIGNAL
)) {
707 if (!(change
->old_events
& (EV_READ
| EV_SIGNAL
)) &&
708 (change
->read_change
& EV_CHANGE_ADD
))
709 change
->read_change
= 0;
711 change
->read_change
= EV_CHANGE_DEL
;
713 if (events
& EV_WRITE
) {
714 if (!(change
->old_events
& EV_WRITE
) &&
715 (change
->write_change
& EV_CHANGE_ADD
))
716 change
->write_change
= 0;
718 change
->write_change
= EV_CHANGE_DEL
;
721 event_changelist_check(base
);