transmission: update from 2.13 to 2.22
[tomato.git] / release / src / router / libevent / evmap.c
blob5521626cf2431137f542c94d406801fef2833328
1 /*
2 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
28 #ifdef WIN32
29 #include <winsock2.h>
30 #define WIN32_LEAN_AND_MEAN
31 #include <windows.h>
32 #undef WIN32_LEAN_AND_MEAN
33 #endif
34 #include <sys/types.h>
35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
36 #include <sys/time.h>
37 #endif
38 #include <sys/queue.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #ifndef WIN32
42 #include <unistd.h>
43 #endif
44 #include <errno.h>
45 #include <signal.h>
46 #include <string.h>
47 #include <time.h>
49 #include "event-internal.h"
50 #include "evmap-internal.h"
51 #include "mm-internal.h"
52 #include "changelist-internal.h"
54 /** An entry for an evmap_io list: notes all the events that want to read or
55 write on a given fd, and the number of each.
57 struct evmap_io {
58 struct event_list events;
59 ev_uint16_t nread;
60 ev_uint16_t nwrite;
63 /* An entry for an evmap_signal list: notes all the events that want to know
64 when a signal triggers. */
65 struct evmap_signal {
66 struct event_list events;
69 /* On some platforms, fds start at 0 and increment by 1 as they are
70 allocated, and old numbers get used. For these platforms, we
71 implement io maps just like signal maps: as an array of pointers to
72 struct evmap_io. But on other platforms (windows), sockets are not
73 0-indexed, not necessarily consecutive, and not necessarily reused.
74 There, we use a hashtable to implement evmap_io.
76 #ifdef EVMAP_USE_HT
77 struct event_map_entry {
78 HT_ENTRY(event_map_entry) map_node;
79 evutil_socket_t fd;
80 union { /* This is a union in case we need to make more things that can
81 be in the hashtable. */
82 struct evmap_io evmap_io;
83 } ent;
86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
87 * of the fd in e->fd. */
88 static inline unsigned
89 hashsocket(struct event_map_entry *e)
91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
92 * matter. Our hashtable implementation really likes low-order bits,
93 * though, so let's do the rotate-and-add trick. */
94 unsigned h = (unsigned) e->fd;
95 h += (h >> 2) | (h << 30);
96 return h;
99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
100 * have the same e->fd. */
101 static inline int
102 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
104 return e1->fd == e2->fd;
107 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
108 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
109 0.5, mm_malloc, mm_realloc, mm_free)
111 #define GET_IO_SLOT(x, map, slot, type) \
112 do { \
113 struct event_map_entry _key, *_ent; \
114 _key.fd = slot; \
115 _ent = HT_FIND(event_io_map, map, &_key); \
116 (x) = _ent ? &_ent->ent.type : NULL; \
117 } while (0);
119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
120 do { \
121 struct event_map_entry _key, *_ent; \
122 _key.fd = slot; \
123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
124 event_map_entry, &_key, ptr, \
126 _ent = *ptr; \
127 }, \
129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
130 EVUTIL_ASSERT(_ent); \
131 _ent->fd = slot; \
132 (ctor)(&_ent->ent.type); \
133 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
134 }); \
135 (x) = &_ent->ent.type; \
136 } while (0)
138 void evmap_io_initmap(struct event_io_map *ctx)
140 HT_INIT(event_io_map, ctx);
143 void evmap_io_clear(struct event_io_map *ctx)
145 struct event_map_entry **ent, **next, *this;
146 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
147 this = *ent;
148 next = HT_NEXT_RMV(event_io_map, ctx, ent);
149 mm_free(this);
152 #endif
154 /* Set the variable 'x' to the field in event_map 'map' with fields of type
155 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
156 if there are no entries for 'slot'. Does no bounds-checking. */
157 #define GET_SIGNAL_SLOT(x, map, slot, type) \
158 (x) = (struct type *)((map)->entries[slot])
159 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
160 by allocating enough memory for a 'struct type', and initializing the new
161 value by calling the function 'ctor' on it.
163 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
164 do { \
165 if ((map)->entries[slot] == NULL) { \
166 EVUTIL_ASSERT(ctor != NULL); \
167 (map)->entries[slot] = \
168 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
169 EVUTIL_ASSERT((map)->entries[slot] != NULL); \
170 (ctor)((struct type *)(map)->entries[slot]); \
172 (x) = (struct type *)((map)->entries[slot]); \
173 } while (0)
175 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
176 as thin aliases over the SIGNAL_SLOT versions. */
177 #ifndef EVMAP_USE_HT
178 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
179 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
180 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
181 #define FDINFO_OFFSET sizeof(struct evmap_io)
182 void
183 evmap_io_initmap(struct event_io_map* ctx)
185 evmap_signal_initmap(ctx);
187 void
188 evmap_io_clear(struct event_io_map* ctx)
190 evmap_signal_clear(ctx);
192 #endif
195 /** Expand 'map' with new entries of width 'msize' until it is big enough
196 to store a value in 'slot'.
198 static int
199 evmap_make_space(struct event_signal_map *map, int slot, int msize)
201 if (map->nentries <= slot) {
202 int nentries = map->nentries ? map->nentries : 32;
203 void **tmp;
205 while (nentries <= slot)
206 nentries <<= 1;
208 tmp = (void **)mm_realloc(map->entries, nentries * msize);
209 if (tmp == NULL)
210 return (-1);
212 memset(&tmp[map->nentries], 0,
213 (nentries - map->nentries) * msize);
215 map->nentries = nentries;
216 map->entries = tmp;
219 return (0);
222 void
223 evmap_signal_initmap(struct event_signal_map *ctx)
225 ctx->nentries = 0;
226 ctx->entries = NULL;
229 void
230 evmap_signal_clear(struct event_signal_map *ctx)
232 if (ctx->entries != NULL) {
233 int i;
234 for (i = 0; i < ctx->nentries; ++i) {
235 if (ctx->entries[i] != NULL)
236 mm_free(ctx->entries[i]);
238 mm_free(ctx->entries);
239 ctx->entries = NULL;
241 ctx->nentries = 0;
245 /* code specific to file descriptors */
247 /** Constructor for struct evmap_io */
248 static void
249 evmap_io_init(struct evmap_io *entry)
251 TAILQ_INIT(&entry->events);
252 entry->nread = 0;
253 entry->nwrite = 0;
257 /* return -1 on error, 0 on success if nothing changed in the event backend,
258 * and 1 on success if something did. */
260 evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
262 const struct eventop *evsel = base->evsel;
263 struct event_io_map *io = &base->io;
264 struct evmap_io *ctx = NULL;
265 int nread, nwrite, retval = 0;
266 short res = 0, old = 0;
267 struct event *old_ev;
269 EVUTIL_ASSERT(fd == ev->ev_fd);
271 if (fd < 0)
272 return 0;
274 #ifndef EVMAP_USE_HT
275 if (fd >= io->nentries) {
276 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
277 return (-1);
279 #endif
280 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
281 evsel->fdinfo_len);
283 nread = ctx->nread;
284 nwrite = ctx->nwrite;
286 if (nread)
287 old |= EV_READ;
288 if (nwrite)
289 old |= EV_WRITE;
291 if (ev->ev_events & EV_READ) {
292 if (++nread == 1)
293 res |= EV_READ;
295 if (ev->ev_events & EV_WRITE) {
296 if (++nwrite == 1)
297 res |= EV_WRITE;
299 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
300 event_warnx("Too many events reading or writing on fd %d",
301 (int)fd);
302 return -1;
304 if (EVENT_DEBUG_MODE_IS_ON() &&
305 (old_ev = TAILQ_FIRST(&ctx->events)) &&
306 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
307 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
308 " events on fd %d", (int)fd);
309 return -1;
312 if (res) {
313 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
314 /* XXX(niels): we cannot mix edge-triggered and
315 * level-triggered, we should probably assert on
316 * this. */
317 if (evsel->add(base, ev->ev_fd,
318 old, (ev->ev_events & EV_ET) | res, extra) == -1)
319 return (-1);
320 retval = 1;
323 ctx->nread = (ev_uint16_t) nread;
324 ctx->nwrite = (ev_uint16_t) nwrite;
325 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
327 return (retval);
330 /* return -1 on error, 0 on success if nothing changed in the event backend,
331 * and 1 on success if something did. */
333 evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
335 const struct eventop *evsel = base->evsel;
336 struct event_io_map *io = &base->io;
337 struct evmap_io *ctx;
338 int nread, nwrite, retval = 0;
339 short res = 0, old = 0;
341 if (fd < 0)
342 return 0;
344 EVUTIL_ASSERT(fd == ev->ev_fd);
346 #ifndef EVMAP_USE_HT
347 if (fd >= io->nentries)
348 return (-1);
349 #endif
351 GET_IO_SLOT(ctx, io, fd, evmap_io);
353 nread = ctx->nread;
354 nwrite = ctx->nwrite;
356 if (nread)
357 old |= EV_READ;
358 if (nwrite)
359 old |= EV_WRITE;
361 if (ev->ev_events & EV_READ) {
362 if (--nread == 0)
363 res |= EV_READ;
364 EVUTIL_ASSERT(nread >= 0);
366 if (ev->ev_events & EV_WRITE) {
367 if (--nwrite == 0)
368 res |= EV_WRITE;
369 EVUTIL_ASSERT(nwrite >= 0);
372 if (res) {
373 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
374 if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
375 return (-1);
376 retval = 1;
379 ctx->nread = nread;
380 ctx->nwrite = nwrite;
381 TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
383 return (retval);
386 void
387 evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
389 struct event_io_map *io = &base->io;
390 struct evmap_io *ctx;
391 struct event *ev;
393 #ifndef EVMAP_USE_HT
394 EVUTIL_ASSERT(fd < io->nentries);
395 #endif
396 GET_IO_SLOT(ctx, io, fd, evmap_io);
398 EVUTIL_ASSERT(ctx);
399 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
400 if (ev->ev_events & events)
401 event_active_nolock(ev, ev->ev_events & events, 1);
405 /* code specific to signals */
407 static void
408 evmap_signal_init(struct evmap_signal *entry)
410 TAILQ_INIT(&entry->events);
415 evmap_signal_add(struct event_base *base, int sig, struct event *ev)
417 const struct eventop *evsel = base->evsigsel;
418 struct event_signal_map *map = &base->sigmap;
419 struct evmap_signal *ctx = NULL;
421 if (sig >= map->nentries) {
422 if (evmap_make_space(
423 map, sig, sizeof(struct evmap_signal *)) == -1)
424 return (-1);
426 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
427 base->evsigsel->fdinfo_len);
429 if (TAILQ_EMPTY(&ctx->events)) {
430 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
431 == -1)
432 return (-1);
435 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
437 return (1);
441 evmap_signal_del(struct event_base *base, int sig, struct event *ev)
443 const struct eventop *evsel = base->evsigsel;
444 struct event_signal_map *map = &base->sigmap;
445 struct evmap_signal *ctx;
447 if (sig >= map->nentries)
448 return (-1);
450 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
452 if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
453 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
454 return (-1);
457 TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
459 return (1);
462 void
463 evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
465 struct event_signal_map *map = &base->sigmap;
466 struct evmap_signal *ctx;
467 struct event *ev;
469 EVUTIL_ASSERT(sig < map->nentries);
470 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
472 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
473 event_active_nolock(ev, EV_SIGNAL, ncalls);
476 void *
477 evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
479 struct evmap_io *ctx;
480 GET_IO_SLOT(ctx, map, fd, evmap_io);
481 if (ctx)
482 return ((char*)ctx) + sizeof(struct evmap_io);
483 else
484 return NULL;
487 /** Per-fd structure for use with changelists. It keeps track, for each fd or
488 * signal using the changelist, of where its entry in the changelist is.
490 struct event_changelist_fdinfo {
491 int idxplus1; /* this is the index +1, so that memset(0) will make it
492 * a no-such-element */
495 void
496 event_changelist_init(struct event_changelist *changelist)
498 changelist->changes = NULL;
499 changelist->changes_size = 0;
500 changelist->n_changes = 0;
503 /** Helper: return the changelist_fdinfo corresponding to a given change. */
504 static inline struct event_changelist_fdinfo *
505 event_change_get_fdinfo(struct event_base *base,
506 const struct event_change *change)
508 char *ptr;
509 if (change->read_change & EV_CHANGE_SIGNAL) {
510 struct evmap_signal *ctx;
511 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
512 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
513 } else {
514 struct evmap_io *ctx;
515 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
516 ptr = ((char*)ctx) + sizeof(struct evmap_io);
518 return (void*)ptr;
521 #ifdef DEBUG_CHANGELIST
522 /** Make sure that the changelist is consistent with the evmap structures. */
523 static void
524 event_changelist_check(struct event_base *base)
526 int i;
527 struct event_changelist *changelist = &base->changelist;
529 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
530 for (i = 0; i < changelist->n_changes; ++i) {
531 struct event_change *c = &changelist->changes[i];
532 struct event_changelist_fdinfo *f;
533 EVUTIL_ASSERT(c->fd >= 0);
534 f = event_change_get_fdinfo(base, c);
535 EVUTIL_ASSERT(f);
536 EVUTIL_ASSERT(f->idxplus1 == i + 1);
539 for (i = 0; i < base->io.nentries; ++i) {
540 struct evmap_io *io = base->io.entries[i];
541 struct event_changelist_fdinfo *f;
542 if (!io)
543 continue;
544 f = (void*)
545 ( ((char*)io) + sizeof(struct evmap_io) );
546 if (f->idxplus1) {
547 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
548 EVUTIL_ASSERT(c->fd == i);
552 #else
553 #define event_changelist_check(base) ((void)0)
554 #endif
556 void
557 event_changelist_remove_all(struct event_changelist *changelist,
558 struct event_base *base)
560 int i;
562 event_changelist_check(base);
564 for (i = 0; i < changelist->n_changes; ++i) {
565 struct event_change *ch = &changelist->changes[i];
566 struct event_changelist_fdinfo *fdinfo =
567 event_change_get_fdinfo(base, ch);
568 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
569 fdinfo->idxplus1 = 0;
572 changelist->n_changes = 0;
574 event_changelist_check(base);
577 void
578 event_changelist_freemem(struct event_changelist *changelist)
580 if (changelist->changes)
581 mm_free(changelist->changes);
582 event_changelist_init(changelist); /* zero it all out. */
585 /** Increase the size of 'changelist' to hold more changes. */
586 static int
587 event_changelist_grow(struct event_changelist *changelist)
589 int new_size;
590 struct event_change *new_changes;
591 if (changelist->changes_size < 64)
592 new_size = 64;
593 else
594 new_size = changelist->changes_size * 2;
596 new_changes = mm_realloc(changelist->changes,
597 new_size * sizeof(struct event_change));
599 if (EVUTIL_UNLIKELY(new_changes == NULL))
600 return (-1);
602 changelist->changes = new_changes;
603 changelist->changes_size = new_size;
605 return (0);
608 /** Return a pointer to the changelist entry for the file descriptor or signal
609 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
610 * old_events field to old_events.
612 static struct event_change *
613 event_changelist_get_or_construct(struct event_changelist *changelist,
614 evutil_socket_t fd,
615 short old_events,
616 struct event_changelist_fdinfo *fdinfo)
618 struct event_change *change;
620 if (fdinfo->idxplus1 == 0) {
621 int idx;
622 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
624 if (changelist->n_changes == changelist->changes_size) {
625 if (event_changelist_grow(changelist) < 0)
626 return NULL;
629 idx = changelist->n_changes++;
630 change = &changelist->changes[idx];
631 fdinfo->idxplus1 = idx + 1;
633 memset(change, 0, sizeof(struct event_change));
634 change->fd = fd;
635 change->old_events = old_events;
636 } else {
637 change = &changelist->changes[fdinfo->idxplus1 - 1];
638 EVUTIL_ASSERT(change->fd == fd);
640 return change;
644 event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
645 void *p)
647 struct event_changelist *changelist = &base->changelist;
648 struct event_changelist_fdinfo *fdinfo = p;
649 struct event_change *change;
651 event_changelist_check(base);
653 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
654 if (!change)
655 return -1;
657 /* An add replaces any previous delete, but doesn't result in a no-op,
658 * since the delete might fail (because the fd had been closed since
659 * the last add, for instance. */
661 if (events & (EV_READ|EV_SIGNAL)) {
662 change->read_change = EV_CHANGE_ADD |
663 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
665 if (events & EV_WRITE) {
666 change->write_change = EV_CHANGE_ADD |
667 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
670 event_changelist_check(base);
671 return (0);
675 event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
676 void *p)
678 struct event_changelist *changelist = &base->changelist;
679 struct event_changelist_fdinfo *fdinfo = p;
680 struct event_change *change;
682 event_changelist_check(base);
683 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
684 event_changelist_check(base);
685 if (!change)
686 return -1;
688 /* A delete removes any previous add, rather than replacing it:
689 on those platforms where "add, delete, dispatch" is not the same
690 as "no-op, dispatch", we want the no-op behavior.
692 As well as checking the current operation we should also check
693 the original set of events to make sure were not ignoring
694 the case where the add operation is present on an event that
695 was already set.
697 If we have a no-op item, we could remove it it from the list
698 entirely, but really there's not much point: skipping the no-op
699 change when we do the dispatch later is far cheaper than rejuggling
700 the array now.
702 As this stands, it also lets through deletions of events that are
703 not currently set.
706 if (events & (EV_READ|EV_SIGNAL)) {
707 if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
708 (change->read_change & EV_CHANGE_ADD))
709 change->read_change = 0;
710 else
711 change->read_change = EV_CHANGE_DEL;
713 if (events & EV_WRITE) {
714 if (!(change->old_events & EV_WRITE) &&
715 (change->write_change & EV_CHANGE_ADD))
716 change->write_change = 0;
717 else
718 change->write_change = EV_CHANGE_DEL;
721 event_changelist_check(base);
722 return (0);