MiniDLNA update: 1.0.19.1 to 1.0.20
[tomato.git] / release / src / router / libevent / evmap.c
blob5167d7abfa2dd0873d2fb87490069a211045e6ee
1 /*
2 * Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "event2/event-config.h"
28 #ifdef WIN32
29 #include <winsock2.h>
30 #define WIN32_LEAN_AND_MEAN
31 #include <windows.h>
32 #undef WIN32_LEAN_AND_MEAN
33 #endif
34 #include <sys/types.h>
35 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
36 #include <sys/time.h>
37 #endif
38 #include <sys/queue.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #ifndef WIN32
42 #include <unistd.h>
43 #endif
44 #include <errno.h>
45 #include <signal.h>
46 #include <string.h>
47 #include <time.h>
49 #include "event-internal.h"
50 #include "evmap-internal.h"
51 #include "mm-internal.h"
52 #include "changelist-internal.h"
54 /** An entry for an evmap_io list: notes all the events that want to read or
55 write on a given fd, and the number of each.
57 struct evmap_io {
58 struct event_list events;
59 ev_uint16_t nread;
60 ev_uint16_t nwrite;
63 /* An entry for an evmap_signal list: notes all the events that want to know
64 when a signal triggers. */
65 struct evmap_signal {
66 struct event_list events;
69 /* On some platforms, fds start at 0 and increment by 1 as they are
70 allocated, and old numbers get used. For these platforms, we
71 implement io maps just like signal maps: as an array of pointers to
72 struct evmap_io. But on other platforms (windows), sockets are not
73 0-indexed, not necessarily consecutive, and not necessarily reused.
74 There, we use a hashtable to implement evmap_io.
76 #ifdef EVMAP_USE_HT
77 struct event_map_entry {
78 HT_ENTRY(event_map_entry) map_node;
79 evutil_socket_t fd;
80 union { /* This is a union in case we need to make more things that can
81 be in the hashtable. */
82 struct evmap_io evmap_io;
83 } ent;
86 /* Helper used by the event_io_map hashtable code; tries to return a good hash
87 * of the fd in e->fd. */
88 static inline unsigned
89 hashsocket(struct event_map_entry *e)
91 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
92 * matter. Our hashtable implementation really likes low-order bits,
93 * though, so let's do the rotate-and-add trick. */
94 unsigned h = (unsigned) e->fd;
95 h += (h >> 2) | (h << 30);
96 return h;
99 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
100 * have the same e->fd. */
101 static inline int
102 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
104 return e1->fd == e2->fd;
107 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
108 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
109 0.5, mm_malloc, mm_realloc, mm_free)
111 #define GET_IO_SLOT(x, map, slot, type) \
112 do { \
113 struct event_map_entry _key, *_ent; \
114 _key.fd = slot; \
115 _ent = HT_FIND(event_io_map, map, &_key); \
116 (x) = _ent ? &_ent->ent.type : NULL; \
117 } while (0);
119 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
120 do { \
121 struct event_map_entry _key, *_ent; \
122 _key.fd = slot; \
123 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
124 event_map_entry, &_key, ptr, \
126 _ent = *ptr; \
127 }, \
129 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
130 EVUTIL_ASSERT(_ent); \
131 _ent->fd = slot; \
132 (ctor)(&_ent->ent.type); \
133 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
134 }); \
135 (x) = &_ent->ent.type; \
136 } while (0)
138 void evmap_io_initmap(struct event_io_map *ctx)
140 HT_INIT(event_io_map, ctx);
143 void evmap_io_clear(struct event_io_map *ctx)
145 struct event_map_entry **ent, **next, *this;
146 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
147 this = *ent;
148 next = HT_NEXT_RMV(event_io_map, ctx, ent);
149 mm_free(this);
151 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
153 #endif
155 /* Set the variable 'x' to the field in event_map 'map' with fields of type
156 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
157 if there are no entries for 'slot'. Does no bounds-checking. */
158 #define GET_SIGNAL_SLOT(x, map, slot, type) \
159 (x) = (struct type *)((map)->entries[slot])
160 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
161 by allocating enough memory for a 'struct type', and initializing the new
162 value by calling the function 'ctor' on it.
164 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
165 do { \
166 if ((map)->entries[slot] == NULL) { \
167 EVUTIL_ASSERT(ctor != NULL); \
168 (map)->entries[slot] = \
169 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
170 EVUTIL_ASSERT((map)->entries[slot] != NULL); \
171 (ctor)((struct type *)(map)->entries[slot]); \
173 (x) = (struct type *)((map)->entries[slot]); \
174 } while (0)
176 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
177 as thin aliases over the SIGNAL_SLOT versions. */
178 #ifndef EVMAP_USE_HT
179 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
180 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
181 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
182 #define FDINFO_OFFSET sizeof(struct evmap_io)
183 void
184 evmap_io_initmap(struct event_io_map* ctx)
186 evmap_signal_initmap(ctx);
188 void
189 evmap_io_clear(struct event_io_map* ctx)
191 evmap_signal_clear(ctx);
193 #endif
196 /** Expand 'map' with new entries of width 'msize' until it is big enough
197 to store a value in 'slot'.
199 static int
200 evmap_make_space(struct event_signal_map *map, int slot, int msize)
202 if (map->nentries <= slot) {
203 int nentries = map->nentries ? map->nentries : 32;
204 void **tmp;
206 while (nentries <= slot)
207 nentries <<= 1;
209 tmp = (void **)mm_realloc(map->entries, nentries * msize);
210 if (tmp == NULL)
211 return (-1);
213 memset(&tmp[map->nentries], 0,
214 (nentries - map->nentries) * msize);
216 map->nentries = nentries;
217 map->entries = tmp;
220 return (0);
223 void
224 evmap_signal_initmap(struct event_signal_map *ctx)
226 ctx->nentries = 0;
227 ctx->entries = NULL;
230 void
231 evmap_signal_clear(struct event_signal_map *ctx)
233 if (ctx->entries != NULL) {
234 int i;
235 for (i = 0; i < ctx->nentries; ++i) {
236 if (ctx->entries[i] != NULL)
237 mm_free(ctx->entries[i]);
239 mm_free(ctx->entries);
240 ctx->entries = NULL;
242 ctx->nentries = 0;
246 /* code specific to file descriptors */
248 /** Constructor for struct evmap_io */
249 static void
250 evmap_io_init(struct evmap_io *entry)
252 TAILQ_INIT(&entry->events);
253 entry->nread = 0;
254 entry->nwrite = 0;
258 /* return -1 on error, 0 on success if nothing changed in the event backend,
259 * and 1 on success if something did. */
261 evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
263 const struct eventop *evsel = base->evsel;
264 struct event_io_map *io = &base->io;
265 struct evmap_io *ctx = NULL;
266 int nread, nwrite, retval = 0;
267 short res = 0, old = 0;
268 struct event *old_ev;
270 EVUTIL_ASSERT(fd == ev->ev_fd);
272 if (fd < 0)
273 return 0;
275 #ifndef EVMAP_USE_HT
276 if (fd >= io->nentries) {
277 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
278 return (-1);
280 #endif
281 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
282 evsel->fdinfo_len);
284 nread = ctx->nread;
285 nwrite = ctx->nwrite;
287 if (nread)
288 old |= EV_READ;
289 if (nwrite)
290 old |= EV_WRITE;
292 if (ev->ev_events & EV_READ) {
293 if (++nread == 1)
294 res |= EV_READ;
296 if (ev->ev_events & EV_WRITE) {
297 if (++nwrite == 1)
298 res |= EV_WRITE;
300 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
301 event_warnx("Too many events reading or writing on fd %d",
302 (int)fd);
303 return -1;
305 if (EVENT_DEBUG_MODE_IS_ON() &&
306 (old_ev = TAILQ_FIRST(&ctx->events)) &&
307 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
308 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
309 " events on fd %d", (int)fd);
310 return -1;
313 if (res) {
314 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
315 /* XXX(niels): we cannot mix edge-triggered and
316 * level-triggered, we should probably assert on
317 * this. */
318 if (evsel->add(base, ev->ev_fd,
319 old, (ev->ev_events & EV_ET) | res, extra) == -1)
320 return (-1);
321 retval = 1;
324 ctx->nread = (ev_uint16_t) nread;
325 ctx->nwrite = (ev_uint16_t) nwrite;
326 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
328 return (retval);
331 /* return -1 on error, 0 on success if nothing changed in the event backend,
332 * and 1 on success if something did. */
334 evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
336 const struct eventop *evsel = base->evsel;
337 struct event_io_map *io = &base->io;
338 struct evmap_io *ctx;
339 int nread, nwrite, retval = 0;
340 short res = 0, old = 0;
342 if (fd < 0)
343 return 0;
345 EVUTIL_ASSERT(fd == ev->ev_fd);
347 #ifndef EVMAP_USE_HT
348 if (fd >= io->nentries)
349 return (-1);
350 #endif
352 GET_IO_SLOT(ctx, io, fd, evmap_io);
354 nread = ctx->nread;
355 nwrite = ctx->nwrite;
357 if (nread)
358 old |= EV_READ;
359 if (nwrite)
360 old |= EV_WRITE;
362 if (ev->ev_events & EV_READ) {
363 if (--nread == 0)
364 res |= EV_READ;
365 EVUTIL_ASSERT(nread >= 0);
367 if (ev->ev_events & EV_WRITE) {
368 if (--nwrite == 0)
369 res |= EV_WRITE;
370 EVUTIL_ASSERT(nwrite >= 0);
373 if (res) {
374 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
375 if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
376 return (-1);
377 retval = 1;
380 ctx->nread = nread;
381 ctx->nwrite = nwrite;
382 TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
384 return (retval);
387 void
388 evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
390 struct event_io_map *io = &base->io;
391 struct evmap_io *ctx;
392 struct event *ev;
394 #ifndef EVMAP_USE_HT
395 EVUTIL_ASSERT(fd < io->nentries);
396 #endif
397 GET_IO_SLOT(ctx, io, fd, evmap_io);
399 EVUTIL_ASSERT(ctx);
400 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
401 if (ev->ev_events & events)
402 event_active_nolock(ev, ev->ev_events & events, 1);
406 /* code specific to signals */
408 static void
409 evmap_signal_init(struct evmap_signal *entry)
411 TAILQ_INIT(&entry->events);
416 evmap_signal_add(struct event_base *base, int sig, struct event *ev)
418 const struct eventop *evsel = base->evsigsel;
419 struct event_signal_map *map = &base->sigmap;
420 struct evmap_signal *ctx = NULL;
422 if (sig >= map->nentries) {
423 if (evmap_make_space(
424 map, sig, sizeof(struct evmap_signal *)) == -1)
425 return (-1);
427 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
428 base->evsigsel->fdinfo_len);
430 if (TAILQ_EMPTY(&ctx->events)) {
431 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
432 == -1)
433 return (-1);
436 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
438 return (1);
442 evmap_signal_del(struct event_base *base, int sig, struct event *ev)
444 const struct eventop *evsel = base->evsigsel;
445 struct event_signal_map *map = &base->sigmap;
446 struct evmap_signal *ctx;
448 if (sig >= map->nentries)
449 return (-1);
451 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
453 if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
454 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
455 return (-1);
458 TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
460 return (1);
463 void
464 evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
466 struct event_signal_map *map = &base->sigmap;
467 struct evmap_signal *ctx;
468 struct event *ev;
470 EVUTIL_ASSERT(sig < map->nentries);
471 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
473 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
474 event_active_nolock(ev, EV_SIGNAL, ncalls);
477 void *
478 evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
480 struct evmap_io *ctx;
481 GET_IO_SLOT(ctx, map, fd, evmap_io);
482 if (ctx)
483 return ((char*)ctx) + sizeof(struct evmap_io);
484 else
485 return NULL;
488 /** Per-fd structure for use with changelists. It keeps track, for each fd or
489 * signal using the changelist, of where its entry in the changelist is.
491 struct event_changelist_fdinfo {
492 int idxplus1; /* this is the index +1, so that memset(0) will make it
493 * a no-such-element */
496 void
497 event_changelist_init(struct event_changelist *changelist)
499 changelist->changes = NULL;
500 changelist->changes_size = 0;
501 changelist->n_changes = 0;
504 /** Helper: return the changelist_fdinfo corresponding to a given change. */
505 static inline struct event_changelist_fdinfo *
506 event_change_get_fdinfo(struct event_base *base,
507 const struct event_change *change)
509 char *ptr;
510 if (change->read_change & EV_CHANGE_SIGNAL) {
511 struct evmap_signal *ctx;
512 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
513 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
514 } else {
515 struct evmap_io *ctx;
516 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
517 ptr = ((char*)ctx) + sizeof(struct evmap_io);
519 return (void*)ptr;
522 #ifdef DEBUG_CHANGELIST
523 /** Make sure that the changelist is consistent with the evmap structures. */
524 static void
525 event_changelist_check(struct event_base *base)
527 int i;
528 struct event_changelist *changelist = &base->changelist;
530 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
531 for (i = 0; i < changelist->n_changes; ++i) {
532 struct event_change *c = &changelist->changes[i];
533 struct event_changelist_fdinfo *f;
534 EVUTIL_ASSERT(c->fd >= 0);
535 f = event_change_get_fdinfo(base, c);
536 EVUTIL_ASSERT(f);
537 EVUTIL_ASSERT(f->idxplus1 == i + 1);
540 for (i = 0; i < base->io.nentries; ++i) {
541 struct evmap_io *io = base->io.entries[i];
542 struct event_changelist_fdinfo *f;
543 if (!io)
544 continue;
545 f = (void*)
546 ( ((char*)io) + sizeof(struct evmap_io) );
547 if (f->idxplus1) {
548 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
549 EVUTIL_ASSERT(c->fd == i);
553 #else
554 #define event_changelist_check(base) ((void)0)
555 #endif
557 void
558 event_changelist_remove_all(struct event_changelist *changelist,
559 struct event_base *base)
561 int i;
563 event_changelist_check(base);
565 for (i = 0; i < changelist->n_changes; ++i) {
566 struct event_change *ch = &changelist->changes[i];
567 struct event_changelist_fdinfo *fdinfo =
568 event_change_get_fdinfo(base, ch);
569 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
570 fdinfo->idxplus1 = 0;
573 changelist->n_changes = 0;
575 event_changelist_check(base);
578 void
579 event_changelist_freemem(struct event_changelist *changelist)
581 if (changelist->changes)
582 mm_free(changelist->changes);
583 event_changelist_init(changelist); /* zero it all out. */
586 /** Increase the size of 'changelist' to hold more changes. */
587 static int
588 event_changelist_grow(struct event_changelist *changelist)
590 int new_size;
591 struct event_change *new_changes;
592 if (changelist->changes_size < 64)
593 new_size = 64;
594 else
595 new_size = changelist->changes_size * 2;
597 new_changes = mm_realloc(changelist->changes,
598 new_size * sizeof(struct event_change));
600 if (EVUTIL_UNLIKELY(new_changes == NULL))
601 return (-1);
603 changelist->changes = new_changes;
604 changelist->changes_size = new_size;
606 return (0);
609 /** Return a pointer to the changelist entry for the file descriptor or signal
610 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
611 * old_events field to old_events.
613 static struct event_change *
614 event_changelist_get_or_construct(struct event_changelist *changelist,
615 evutil_socket_t fd,
616 short old_events,
617 struct event_changelist_fdinfo *fdinfo)
619 struct event_change *change;
621 if (fdinfo->idxplus1 == 0) {
622 int idx;
623 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
625 if (changelist->n_changes == changelist->changes_size) {
626 if (event_changelist_grow(changelist) < 0)
627 return NULL;
630 idx = changelist->n_changes++;
631 change = &changelist->changes[idx];
632 fdinfo->idxplus1 = idx + 1;
634 memset(change, 0, sizeof(struct event_change));
635 change->fd = fd;
636 change->old_events = old_events;
637 } else {
638 change = &changelist->changes[fdinfo->idxplus1 - 1];
639 EVUTIL_ASSERT(change->fd == fd);
641 return change;
645 event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
646 void *p)
648 struct event_changelist *changelist = &base->changelist;
649 struct event_changelist_fdinfo *fdinfo = p;
650 struct event_change *change;
652 event_changelist_check(base);
654 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
655 if (!change)
656 return -1;
658 /* An add replaces any previous delete, but doesn't result in a no-op,
659 * since the delete might fail (because the fd had been closed since
660 * the last add, for instance. */
662 if (events & (EV_READ|EV_SIGNAL)) {
663 change->read_change = EV_CHANGE_ADD |
664 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
666 if (events & EV_WRITE) {
667 change->write_change = EV_CHANGE_ADD |
668 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
671 event_changelist_check(base);
672 return (0);
676 event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
677 void *p)
679 struct event_changelist *changelist = &base->changelist;
680 struct event_changelist_fdinfo *fdinfo = p;
681 struct event_change *change;
683 event_changelist_check(base);
684 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
685 event_changelist_check(base);
686 if (!change)
687 return -1;
689 /* A delete removes any previous add, rather than replacing it:
690 on those platforms where "add, delete, dispatch" is not the same
691 as "no-op, dispatch", we want the no-op behavior.
693 As well as checking the current operation we should also check
694 the original set of events to make sure were not ignoring
695 the case where the add operation is present on an event that
696 was already set.
698 If we have a no-op item, we could remove it it from the list
699 entirely, but really there's not much point: skipping the no-op
700 change when we do the dispatch later is far cheaper than rejuggling
701 the array now.
703 As this stands, it also lets through deletions of events that are
704 not currently set.
707 if (events & (EV_READ|EV_SIGNAL)) {
708 if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
709 (change->read_change & EV_CHANGE_ADD))
710 change->read_change = 0;
711 else
712 change->read_change = EV_CHANGE_DEL;
714 if (events & EV_WRITE) {
715 if (!(change->old_events & EV_WRITE) &&
716 (change->write_change & EV_CHANGE_ADD))
717 change->write_change = 0;
718 else
719 change->write_change = EV_CHANGE_DEL;
722 event_changelist_check(base);
723 return (0);