2 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
30 #define WIN32_LEAN_AND_MEAN
32 #undef WIN32_LEAN_AND_MEAN
34 #include <sys/types.h>
35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
38 #include <sys/queue.h>
49 #include "event-internal.h"
50 #include "evmap-internal.h"
51 #include "mm-internal.h"
52 #include "changelist-internal.h"
54 /** An entry for an evmap_io list: notes all the events that want to read or
55 write on a given fd, and the number of each.
58 struct event_list events
;
63 /* An entry for an evmap_signal list: notes all the events that want to know
64 when a signal triggers. */
66 struct event_list events
;
69 /* On some platforms, fds start at 0 and increment by 1 as they are
70 allocated, and old numbers get used. For these platforms, we
71 implement io maps just like signal maps: as an array of pointers to
72 struct evmap_io. But on other platforms (windows), sockets are not
73 0-indexed, not necessarily consecutive, and not necessarily reused.
74 There, we use a hashtable to implement evmap_io.
77 struct event_map_entry
{
78 HT_ENTRY(event_map_entry
) map_node
;
80 union { /* This is a union in case we need to make more things that can
81 be in the hashtable. */
82 struct evmap_io evmap_io
;
86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
87 * of the fd in e->fd. */
88 static inline unsigned
89 hashsocket(struct event_map_entry
*e
)
91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
92 * matter. Our hashtable implementation really likes low-order bits,
93 * though, so let's do the rotate-and-add trick. */
94 unsigned h
= (unsigned) e
->fd
;
95 h
+= (h
>> 2) | (h
<< 30);
99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
100 * have the same e->fd. */
102 eqsocket(struct event_map_entry
*e1
, struct event_map_entry
*e2
)
104 return e1
->fd
== e2
->fd
;
107 HT_PROTOTYPE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
)
108 HT_GENERATE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
,
109 0.5, mm_malloc
, mm_realloc
, mm_free
)
111 #define GET_IO_SLOT(x, map, slot, type) \
113 struct event_map_entry _key, *_ent; \
115 _ent = HT_FIND(event_io_map, map, &_key); \
116 (x) = _ent ? &_ent->ent.type : NULL; \
119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
121 struct event_map_entry _key, *_ent; \
123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
124 event_map_entry, &_key, ptr, \
129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
130 EVUTIL_ASSERT(_ent); \
132 (ctor)(&_ent->ent.type); \
133 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
135 (x) = &_ent->ent.type; \
138 void evmap_io_initmap(struct event_io_map
*ctx
)
140 HT_INIT(event_io_map
, ctx
);
143 void evmap_io_clear(struct event_io_map
*ctx
)
145 struct event_map_entry
**ent
, **next
, *this;
146 for (ent
= HT_START(event_io_map
, ctx
); ent
; ent
= next
) {
148 next
= HT_NEXT_RMV(event_io_map
, ctx
, ent
);
151 HT_CLEAR(event_io_map
, ctx
); /* remove all storage held by the ctx. */
155 /* Set the variable 'x' to the field in event_map 'map' with fields of type
156 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
157 if there are no entries for 'slot'. Does no bounds-checking. */
158 #define GET_SIGNAL_SLOT(x, map, slot, type) \
159 (x) = (struct type *)((map)->entries[slot])
160 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
161 by allocating enough memory for a 'struct type', and initializing the new
162 value by calling the function 'ctor' on it.
164 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
166 if ((map)->entries[slot] == NULL) { \
167 EVUTIL_ASSERT(ctor != NULL); \
168 (map)->entries[slot] = \
169 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
170 EVUTIL_ASSERT((map)->entries[slot] != NULL); \
171 (ctor)((struct type *)(map)->entries[slot]); \
173 (x) = (struct type *)((map)->entries[slot]); \
176 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
177 as thin aliases over the SIGNAL_SLOT versions. */
179 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
180 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
181 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
182 #define FDINFO_OFFSET sizeof(struct evmap_io)
184 evmap_io_initmap(struct event_io_map
* ctx
)
186 evmap_signal_initmap(ctx
);
189 evmap_io_clear(struct event_io_map
* ctx
)
191 evmap_signal_clear(ctx
);
196 /** Expand 'map' with new entries of width 'msize' until it is big enough
197 to store a value in 'slot'.
200 evmap_make_space(struct event_signal_map
*map
, int slot
, int msize
)
202 if (map
->nentries
<= slot
) {
203 int nentries
= map
->nentries
? map
->nentries
: 32;
206 while (nentries
<= slot
)
209 tmp
= (void **)mm_realloc(map
->entries
, nentries
* msize
);
213 memset(&tmp
[map
->nentries
], 0,
214 (nentries
- map
->nentries
) * msize
);
216 map
->nentries
= nentries
;
224 evmap_signal_initmap(struct event_signal_map
*ctx
)
231 evmap_signal_clear(struct event_signal_map
*ctx
)
233 if (ctx
->entries
!= NULL
) {
235 for (i
= 0; i
< ctx
->nentries
; ++i
) {
236 if (ctx
->entries
[i
] != NULL
)
237 mm_free(ctx
->entries
[i
]);
239 mm_free(ctx
->entries
);
246 /* code specific to file descriptors */
248 /** Constructor for struct evmap_io */
250 evmap_io_init(struct evmap_io
*entry
)
252 TAILQ_INIT(&entry
->events
);
258 /* return -1 on error, 0 on success if nothing changed in the event backend,
259 * and 1 on success if something did. */
261 evmap_io_add(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
263 const struct eventop
*evsel
= base
->evsel
;
264 struct event_io_map
*io
= &base
->io
;
265 struct evmap_io
*ctx
= NULL
;
266 int nread
, nwrite
, retval
= 0;
267 short res
= 0, old
= 0;
268 struct event
*old_ev
;
270 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
276 if (fd
>= io
->nentries
) {
277 if (evmap_make_space(io
, fd
, sizeof(struct evmap_io
*)) == -1)
281 GET_IO_SLOT_AND_CTOR(ctx
, io
, fd
, evmap_io
, evmap_io_init
,
285 nwrite
= ctx
->nwrite
;
292 if (ev
->ev_events
& EV_READ
) {
296 if (ev
->ev_events
& EV_WRITE
) {
300 if (EVUTIL_UNLIKELY(nread
> 0xffff || nwrite
> 0xffff)) {
301 event_warnx("Too many events reading or writing on fd %d",
305 if (EVENT_DEBUG_MODE_IS_ON() &&
306 (old_ev
= TAILQ_FIRST(&ctx
->events
)) &&
307 (old_ev
->ev_events
&EV_ET
) != (ev
->ev_events
&EV_ET
)) {
308 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
309 " events on fd %d", (int)fd
);
314 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
315 /* XXX(niels): we cannot mix edge-triggered and
316 * level-triggered, we should probably assert on
318 if (evsel
->add(base
, ev
->ev_fd
,
319 old
, (ev
->ev_events
& EV_ET
) | res
, extra
) == -1)
324 ctx
->nread
= (ev_uint16_t
) nread
;
325 ctx
->nwrite
= (ev_uint16_t
) nwrite
;
326 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_io_next
);
331 /* return -1 on error, 0 on success if nothing changed in the event backend,
332 * and 1 on success if something did. */
334 evmap_io_del(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
336 const struct eventop
*evsel
= base
->evsel
;
337 struct event_io_map
*io
= &base
->io
;
338 struct evmap_io
*ctx
;
339 int nread
, nwrite
, retval
= 0;
340 short res
= 0, old
= 0;
345 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
348 if (fd
>= io
->nentries
)
352 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
355 nwrite
= ctx
->nwrite
;
362 if (ev
->ev_events
& EV_READ
) {
365 EVUTIL_ASSERT(nread
>= 0);
367 if (ev
->ev_events
& EV_WRITE
) {
370 EVUTIL_ASSERT(nwrite
>= 0);
374 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
375 if (evsel
->del(base
, ev
->ev_fd
, old
, res
, extra
) == -1)
381 ctx
->nwrite
= nwrite
;
382 TAILQ_REMOVE(&ctx
->events
, ev
, ev_io_next
);
388 evmap_io_active(struct event_base
*base
, evutil_socket_t fd
, short events
)
390 struct event_io_map
*io
= &base
->io
;
391 struct evmap_io
*ctx
;
395 EVUTIL_ASSERT(fd
< io
->nentries
);
397 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
400 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
401 if (ev
->ev_events
& events
)
402 event_active_nolock(ev
, ev
->ev_events
& events
, 1);
406 /* code specific to signals */
409 evmap_signal_init(struct evmap_signal
*entry
)
411 TAILQ_INIT(&entry
->events
);
416 evmap_signal_add(struct event_base
*base
, int sig
, struct event
*ev
)
418 const struct eventop
*evsel
= base
->evsigsel
;
419 struct event_signal_map
*map
= &base
->sigmap
;
420 struct evmap_signal
*ctx
= NULL
;
422 if (sig
>= map
->nentries
) {
423 if (evmap_make_space(
424 map
, sig
, sizeof(struct evmap_signal
*)) == -1)
427 GET_SIGNAL_SLOT_AND_CTOR(ctx
, map
, sig
, evmap_signal
, evmap_signal_init
,
428 base
->evsigsel
->fdinfo_len
);
430 if (TAILQ_EMPTY(&ctx
->events
)) {
431 if (evsel
->add(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
)
436 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_signal_next
);
442 evmap_signal_del(struct event_base
*base
, int sig
, struct event
*ev
)
444 const struct eventop
*evsel
= base
->evsigsel
;
445 struct event_signal_map
*map
= &base
->sigmap
;
446 struct evmap_signal
*ctx
;
448 if (sig
>= map
->nentries
)
451 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
453 if (TAILQ_FIRST(&ctx
->events
) == TAILQ_LAST(&ctx
->events
, event_list
)) {
454 if (evsel
->del(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
) == -1)
458 TAILQ_REMOVE(&ctx
->events
, ev
, ev_signal_next
);
464 evmap_signal_active(struct event_base
*base
, evutil_socket_t sig
, int ncalls
)
466 struct event_signal_map
*map
= &base
->sigmap
;
467 struct evmap_signal
*ctx
;
470 EVUTIL_ASSERT(sig
< map
->nentries
);
471 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
473 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
)
474 event_active_nolock(ev
, EV_SIGNAL
, ncalls
);
478 evmap_io_get_fdinfo(struct event_io_map
*map
, evutil_socket_t fd
)
480 struct evmap_io
*ctx
;
481 GET_IO_SLOT(ctx
, map
, fd
, evmap_io
);
483 return ((char*)ctx
) + sizeof(struct evmap_io
);
488 /** Per-fd structure for use with changelists. It keeps track, for each fd or
489 * signal using the changelist, of where its entry in the changelist is.
491 struct event_changelist_fdinfo
{
492 int idxplus1
; /* this is the index +1, so that memset(0) will make it
493 * a no-such-element */
497 event_changelist_init(struct event_changelist
*changelist
)
499 changelist
->changes
= NULL
;
500 changelist
->changes_size
= 0;
501 changelist
->n_changes
= 0;
504 /** Helper: return the changelist_fdinfo corresponding to a given change. */
505 static inline struct event_changelist_fdinfo
*
506 event_change_get_fdinfo(struct event_base
*base
,
507 const struct event_change
*change
)
510 if (change
->read_change
& EV_CHANGE_SIGNAL
) {
511 struct evmap_signal
*ctx
;
512 GET_SIGNAL_SLOT(ctx
, &base
->sigmap
, change
->fd
, evmap_signal
);
513 ptr
= ((char*)ctx
) + sizeof(struct evmap_signal
);
515 struct evmap_io
*ctx
;
516 GET_IO_SLOT(ctx
, &base
->io
, change
->fd
, evmap_io
);
517 ptr
= ((char*)ctx
) + sizeof(struct evmap_io
);
522 #ifdef DEBUG_CHANGELIST
523 /** Make sure that the changelist is consistent with the evmap structures. */
525 event_changelist_check(struct event_base
*base
)
528 struct event_changelist
*changelist
= &base
->changelist
;
530 EVUTIL_ASSERT(changelist
->changes_size
>= changelist
->n_changes
);
531 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
532 struct event_change
*c
= &changelist
->changes
[i
];
533 struct event_changelist_fdinfo
*f
;
534 EVUTIL_ASSERT(c
->fd
>= 0);
535 f
= event_change_get_fdinfo(base
, c
);
537 EVUTIL_ASSERT(f
->idxplus1
== i
+ 1);
540 for (i
= 0; i
< base
->io
.nentries
; ++i
) {
541 struct evmap_io
*io
= base
->io
.entries
[i
];
542 struct event_changelist_fdinfo
*f
;
546 ( ((char*)io
) + sizeof(struct evmap_io
) );
548 struct event_change
*c
= &changelist
->changes
[f
->idxplus1
- 1];
549 EVUTIL_ASSERT(c
->fd
== i
);
554 #define event_changelist_check(base) ((void)0)
558 event_changelist_remove_all(struct event_changelist
*changelist
,
559 struct event_base
*base
)
563 event_changelist_check(base
);
565 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
566 struct event_change
*ch
= &changelist
->changes
[i
];
567 struct event_changelist_fdinfo
*fdinfo
=
568 event_change_get_fdinfo(base
, ch
);
569 EVUTIL_ASSERT(fdinfo
->idxplus1
== i
+ 1);
570 fdinfo
->idxplus1
= 0;
573 changelist
->n_changes
= 0;
575 event_changelist_check(base
);
579 event_changelist_freemem(struct event_changelist
*changelist
)
581 if (changelist
->changes
)
582 mm_free(changelist
->changes
);
583 event_changelist_init(changelist
); /* zero it all out. */
586 /** Increase the size of 'changelist' to hold more changes. */
588 event_changelist_grow(struct event_changelist
*changelist
)
591 struct event_change
*new_changes
;
592 if (changelist
->changes_size
< 64)
595 new_size
= changelist
->changes_size
* 2;
597 new_changes
= mm_realloc(changelist
->changes
,
598 new_size
* sizeof(struct event_change
));
600 if (EVUTIL_UNLIKELY(new_changes
== NULL
))
603 changelist
->changes
= new_changes
;
604 changelist
->changes_size
= new_size
;
609 /** Return a pointer to the changelist entry for the file descriptor or signal
610 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
611 * old_events field to old_events.
613 static struct event_change
*
614 event_changelist_get_or_construct(struct event_changelist
*changelist
,
617 struct event_changelist_fdinfo
*fdinfo
)
619 struct event_change
*change
;
621 if (fdinfo
->idxplus1
== 0) {
623 EVUTIL_ASSERT(changelist
->n_changes
<= changelist
->changes_size
);
625 if (changelist
->n_changes
== changelist
->changes_size
) {
626 if (event_changelist_grow(changelist
) < 0)
630 idx
= changelist
->n_changes
++;
631 change
= &changelist
->changes
[idx
];
632 fdinfo
->idxplus1
= idx
+ 1;
634 memset(change
, 0, sizeof(struct event_change
));
636 change
->old_events
= old_events
;
638 change
= &changelist
->changes
[fdinfo
->idxplus1
- 1];
639 EVUTIL_ASSERT(change
->fd
== fd
);
645 event_changelist_add(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
648 struct event_changelist
*changelist
= &base
->changelist
;
649 struct event_changelist_fdinfo
*fdinfo
= p
;
650 struct event_change
*change
;
652 event_changelist_check(base
);
654 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
658 /* An add replaces any previous delete, but doesn't result in a no-op,
659 * since the delete might fail (because the fd had been closed since
660 * the last add, for instance. */
662 if (events
& (EV_READ
|EV_SIGNAL
)) {
663 change
->read_change
= EV_CHANGE_ADD
|
664 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
666 if (events
& EV_WRITE
) {
667 change
->write_change
= EV_CHANGE_ADD
|
668 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
671 event_changelist_check(base
);
676 event_changelist_del(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
679 struct event_changelist
*changelist
= &base
->changelist
;
680 struct event_changelist_fdinfo
*fdinfo
= p
;
681 struct event_change
*change
;
683 event_changelist_check(base
);
684 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
685 event_changelist_check(base
);
689 /* A delete removes any previous add, rather than replacing it:
690 on those platforms where "add, delete, dispatch" is not the same
691 as "no-op, dispatch", we want the no-op behavior.
693 As well as checking the current operation we should also check
694 the original set of events to make sure were not ignoring
695 the case where the add operation is present on an event that
698 If we have a no-op item, we could remove it it from the list
699 entirely, but really there's not much point: skipping the no-op
700 change when we do the dispatch later is far cheaper than rejuggling
703 As this stands, it also lets through deletions of events that are
707 if (events
& (EV_READ
|EV_SIGNAL
)) {
708 if (!(change
->old_events
& (EV_READ
| EV_SIGNAL
)) &&
709 (change
->read_change
& EV_CHANGE_ADD
))
710 change
->read_change
= 0;
712 change
->read_change
= EV_CHANGE_DEL
;
714 if (events
& EV_WRITE
) {
715 if (!(change
->old_events
& EV_WRITE
) &&
716 (change
->write_change
& EV_CHANGE_ADD
))
717 change
->write_change
= 0;
719 change
->write_change
= EV_CHANGE_DEL
;
722 event_changelist_check(base
);