tevent: always go through epoll_update_event()
[Samba/gbeck.git] / lib / tevent / tevent_epoll.c
blobe68e37b45d8cd2935300bf059ded25c20d771e90
1 /*
2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
11 ** under the LGPL
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "replace.h"
28 #include "system/filesys.h"
29 #include "system/select.h"
30 #include "tevent.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
39 int epoll_fd;
41 pid_t pid;
43 bool panic_force_replay;
44 bool *panic_state;
45 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
48 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
49 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
50 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
52 #ifdef TEST_PANIC_FALLBACK
54 static int epoll_create_panic_fallback(struct epoll_event_context *epoll_ev,
55 int size)
57 if (epoll_ev->panic_fallback == NULL) {
58 return epoll_create(size);
61 /* 50% of the time, fail... */
62 if ((random() % 2) == 0) {
63 errno = EINVAL;
64 return -1;
67 return epoll_create(size);
70 static int epoll_ctl_panic_fallback(struct epoll_event_context *epoll_ev,
71 int epfd, int op, int fd,
72 struct epoll_event *event)
74 if (epoll_ev->panic_fallback == NULL) {
75 return epoll_ctl(epfd, op, fd, event);
78 /* 50% of the time, fail... */
79 if ((random() % 2) == 0) {
80 errno = EINVAL;
81 return -1;
84 return epoll_ctl(epfd, op, fd, event);
87 static int epoll_wait_panic_fallback(struct epoll_event_context *epoll_ev,
88 int epfd,
89 struct epoll_event *events,
90 int maxevents,
91 int timeout)
93 if (epoll_ev->panic_fallback == NULL) {
94 return epoll_wait(epfd, events, maxevents, timeout);
97 /* 50% of the time, fail... */
98 if ((random() % 2) == 0) {
99 errno = EINVAL;
100 return -1;
103 return epoll_wait(epfd, events, maxevents, timeout);
106 #define epoll_create(_size) \
107 epoll_create_panic_fallback(epoll_ev, _size)
108 #define epoll_ctl(_epfd, _op, _fd, _event) \
109 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
110 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
111 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
112 #endif
115 called to set the panic fallback function.
117 _PRIVATE_ bool tevent_epoll_set_panic_fallback(struct tevent_context *ev,
118 bool (*panic_fallback)(struct tevent_context *ev,
119 bool replay))
121 struct epoll_event_context *epoll_ev;
123 if (ev->additional_data == NULL) {
124 return false;
127 epoll_ev = talloc_get_type(ev->additional_data,
128 struct epoll_event_context);
129 if (epoll_ev == NULL) {
130 return false;
132 epoll_ev->panic_fallback = panic_fallback;
133 return true;
137 called when a epoll call fails
139 static void epoll_panic(struct epoll_event_context *epoll_ev,
140 const char *reason, bool replay)
142 struct tevent_context *ev = epoll_ev->ev;
143 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
145 panic_fallback = epoll_ev->panic_fallback;
147 if (epoll_ev->panic_state != NULL) {
148 *epoll_ev->panic_state = true;
151 if (epoll_ev->panic_force_replay) {
152 replay = true;
155 TALLOC_FREE(ev->additional_data);
157 if (panic_fallback == NULL) {
158 tevent_debug(ev, TEVENT_DEBUG_FATAL,
159 "%s (%s) replay[%u] - calling abort()\n",
160 reason, strerror(errno), (unsigned)replay);
161 abort();
164 tevent_debug(ev, TEVENT_DEBUG_WARNING,
165 "%s (%s) replay[%u] - calling panic_fallback\n",
166 reason, strerror(errno), (unsigned)replay);
168 if (!panic_fallback(ev, replay)) {
169 /* Fallback failed. */
170 tevent_debug(ev, TEVENT_DEBUG_FATAL,
171 "%s (%s) replay[%u] - calling abort()\n",
172 reason, strerror(errno), (unsigned)replay);
173 abort();
178 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
180 static uint32_t epoll_map_flags(uint16_t flags)
182 uint32_t ret = 0;
183 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
184 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
185 return ret;
189 free the epoll fd
191 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
193 close(epoll_ev->epoll_fd);
194 epoll_ev->epoll_fd = -1;
195 return 0;
199 init the epoll fd
201 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
203 epoll_ev->epoll_fd = epoll_create(64);
204 if (epoll_ev->epoll_fd == -1) {
205 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
206 "Failed to create epoll handle.\n");
207 return -1;
210 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
211 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
212 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
215 epoll_ev->pid = getpid();
216 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
218 return 0;
221 static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
224 reopen the epoll handle when our pid changes
225 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
226 demonstration of why this is needed
228 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
230 struct tevent_fd *fde;
231 bool *caller_panic_state = epoll_ev->panic_state;
232 bool panic_triggered = false;
234 if (epoll_ev->pid == getpid()) {
235 return;
238 close(epoll_ev->epoll_fd);
239 epoll_ev->epoll_fd = epoll_create(64);
240 if (epoll_ev->epoll_fd == -1) {
241 epoll_panic(epoll_ev, "epoll_create() failed", false);
242 return;
245 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
246 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
247 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
250 epoll_ev->pid = getpid();
251 epoll_ev->panic_state = &panic_triggered;
252 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
253 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
254 epoll_update_event(epoll_ev, fde);
256 if (panic_triggered) {
257 if (caller_panic_state != NULL) {
258 *caller_panic_state = true;
260 return;
263 epoll_ev->panic_state = NULL;
267 add the epoll event to the given fd_event
269 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
271 struct epoll_event event;
272 int ret;
274 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
276 /* if we don't want events yet, don't add an epoll_event */
277 if (fde->flags == 0) return;
279 ZERO_STRUCT(event);
280 event.events = epoll_map_flags(fde->flags);
281 event.data.ptr = fde;
282 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event);
283 if (ret != 0) {
284 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false);
285 return;
287 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
289 /* only if we want to read we want to tell the event handler about errors */
290 if (fde->flags & TEVENT_FD_READ) {
291 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
296 delete the epoll event for given fd_event
298 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
300 struct epoll_event event;
301 int ret;
303 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
305 /* if there's no epoll_event, we don't need to delete it */
306 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
308 ZERO_STRUCT(event);
309 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
310 if (ret != 0 && errno == ENOENT) {
312 * This can happen after a epoll_check_reopen
313 * within epoll_event_fd_destructor.
315 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_TRACE,
316 "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n",
317 fde->fd);
318 } else if (ret != 0) {
319 epoll_panic(epoll_ev, "EPOLL_CTL_DEL failed", false);
320 return;
322 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
326 change the epoll event to the given fd_event
328 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
330 struct epoll_event event;
331 int ret;
333 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
335 ZERO_STRUCT(event);
336 event.events = epoll_map_flags(fde->flags);
337 event.data.ptr = fde;
338 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event);
339 if (ret != 0) {
340 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false);
341 return;
344 /* only if we want to read we want to tell the event handler about errors */
345 if (fde->flags & TEVENT_FD_READ) {
346 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
350 static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
352 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
353 bool want_read = (fde->flags & TEVENT_FD_READ);
354 bool want_write= (fde->flags & TEVENT_FD_WRITE);
356 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
358 /* there's already an event */
359 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
360 if (want_read || (want_write && !got_error)) {
361 epoll_mod_event(epoll_ev, fde);
362 return;
365 * if we want to match the select behavior, we need to remove the epoll_event
366 * when the caller isn't interested in events.
368 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
370 epoll_del_event(epoll_ev, fde);
371 return;
374 /* there's no epoll_event attached to the fde */
375 if (want_read || (want_write && !got_error)) {
376 epoll_add_event(epoll_ev, fde);
377 return;
382 event loop handling using epoll
384 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
386 int ret, i;
387 #define MAXEVENTS 1
388 struct epoll_event events[MAXEVENTS];
389 int timeout = -1;
390 int wait_errno;
392 if (tvalp) {
393 /* it's better to trigger timed events a bit later than too early */
394 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
397 if (epoll_ev->ev->signal_events &&
398 tevent_common_check_signal(epoll_ev->ev)) {
399 return 0;
402 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
403 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
404 wait_errno = errno;
405 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
407 if (ret == -1 && wait_errno == EINTR && epoll_ev->ev->signal_events) {
408 if (tevent_common_check_signal(epoll_ev->ev)) {
409 return 0;
413 if (ret == -1 && wait_errno != EINTR) {
414 epoll_panic(epoll_ev, "epoll_wait() failed", true);
415 return -1;
418 if (ret == 0 && tvalp) {
419 /* we don't care about a possible delay here */
420 tevent_common_loop_timer_delay(epoll_ev->ev);
421 return 0;
424 for (i=0;i<ret;i++) {
425 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
426 struct tevent_fd);
427 uint16_t flags = 0;
429 if (fde == NULL) {
430 epoll_panic(epoll_ev, "epoll_wait() gave bad data", true);
431 return -1;
433 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
434 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
436 * if we only wait for TEVENT_FD_WRITE, we should not tell the
437 * event handler about it, and remove the epoll_event,
438 * as we only report errors when waiting for read events,
439 * to match the select() behavior
441 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
442 epoll_update_event(epoll_ev, fde);
443 continue;
445 flags |= TEVENT_FD_READ;
447 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
448 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
449 if (flags) {
450 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
451 break;
455 return 0;
459 create a epoll_event_context structure.
461 static int epoll_event_context_init(struct tevent_context *ev)
463 int ret;
464 struct epoll_event_context *epoll_ev;
467 * We might be called during tevent_re_initialise()
468 * which means we need to free our old additional_data.
470 TALLOC_FREE(ev->additional_data);
472 epoll_ev = talloc_zero(ev, struct epoll_event_context);
473 if (!epoll_ev) return -1;
474 epoll_ev->ev = ev;
475 epoll_ev->epoll_fd = -1;
477 ret = epoll_init_ctx(epoll_ev);
478 if (ret != 0) {
479 talloc_free(epoll_ev);
480 return ret;
483 ev->additional_data = epoll_ev;
484 return 0;
488 destroy an fd_event
490 static int epoll_event_fd_destructor(struct tevent_fd *fde)
492 struct tevent_context *ev = fde->event_ctx;
493 struct epoll_event_context *epoll_ev = NULL;
494 bool panic_triggered = false;
495 int flags = fde->flags;
497 if (ev == NULL) {
498 return tevent_common_fd_destructor(fde);
501 epoll_ev = talloc_get_type_abort(ev->additional_data,
502 struct epoll_event_context);
505 * we must remove the event from the list
506 * otherwise a panic fallback handler may
507 * reuse invalid memory
509 DLIST_REMOVE(ev->fd_events, fde);
511 epoll_ev->panic_state = &panic_triggered;
512 epoll_check_reopen(epoll_ev);
513 if (panic_triggered) {
514 return tevent_common_fd_destructor(fde);
517 fde->flags = 0;
518 epoll_update_event(epoll_ev, fde);
519 fde->flags = flags;
520 if (panic_triggered) {
521 return tevent_common_fd_destructor(fde);
523 epoll_ev->panic_state = NULL;
525 return tevent_common_fd_destructor(fde);
529 add a fd based event
530 return NULL on failure (memory allocation error)
532 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
533 int fd, uint16_t flags,
534 tevent_fd_handler_t handler,
535 void *private_data,
536 const char *handler_name,
537 const char *location)
539 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
540 struct epoll_event_context);
541 struct tevent_fd *fde;
542 bool panic_triggered = false;
544 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
545 handler, private_data,
546 handler_name, location);
547 if (!fde) return NULL;
549 talloc_set_destructor(fde, epoll_event_fd_destructor);
551 epoll_ev->panic_state = &panic_triggered;
552 epoll_check_reopen(epoll_ev);
553 if (panic_triggered) {
554 return fde;
556 epoll_ev->panic_state = NULL;
558 epoll_update_event(epoll_ev, fde);
560 return fde;
564 set the fd event flags
566 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
568 struct tevent_context *ev;
569 struct epoll_event_context *epoll_ev;
570 bool panic_triggered = false;
572 if (fde->flags == flags) return;
574 ev = fde->event_ctx;
575 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
577 fde->flags = flags;
579 epoll_ev->panic_state = &panic_triggered;
580 epoll_check_reopen(epoll_ev);
581 if (panic_triggered) {
582 return;
584 epoll_ev->panic_state = NULL;
586 epoll_update_event(epoll_ev, fde);
590 do a single event loop using the events defined in ev
592 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
594 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
595 struct epoll_event_context);
596 struct timeval tval;
597 bool panic_triggered = false;
599 if (ev->signal_events &&
600 tevent_common_check_signal(ev)) {
601 return 0;
604 if (ev->immediate_events &&
605 tevent_common_loop_immediate(ev)) {
606 return 0;
609 tval = tevent_common_loop_timer_delay(ev);
610 if (tevent_timeval_is_zero(&tval)) {
611 return 0;
614 epoll_ev->panic_state = &panic_triggered;
615 epoll_ev->panic_force_replay = true;
616 epoll_check_reopen(epoll_ev);
617 if (panic_triggered) {
618 errno = EINVAL;
619 return -1;
621 epoll_ev->panic_force_replay = false;
622 epoll_ev->panic_state = NULL;
624 return epoll_event_loop(epoll_ev, &tval);
627 static const struct tevent_ops epoll_event_ops = {
628 .context_init = epoll_event_context_init,
629 .add_fd = epoll_event_add_fd,
630 .set_fd_close_fn = tevent_common_fd_set_close_fn,
631 .get_fd_flags = tevent_common_fd_get_flags,
632 .set_fd_flags = epoll_event_set_fd_flags,
633 .add_timer = tevent_common_add_timer,
634 .schedule_immediate = tevent_common_schedule_immediate,
635 .add_signal = tevent_common_add_signal,
636 .loop_once = epoll_event_loop_once,
637 .loop_wait = tevent_common_loop_wait,
640 _PRIVATE_ bool tevent_epoll_init(void)
642 return tevent_register_backend("epoll", &epoll_event_ops);