tevent: ignore EBADF from epoll_ctl() and disable the event
[Samba/gebeck_regimport.git] / lib / tevent / tevent_epoll.c
blobadea677096b0e8bde64c36a08f3a79e6d5165134
1 /*
2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
11 ** under the LGPL
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "replace.h"
28 #include "system/filesys.h"
29 #include "system/select.h"
30 #include "tevent.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
39 int epoll_fd;
41 pid_t pid;
43 bool panic_force_replay;
44 bool *panic_state;
45 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
48 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
49 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
50 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
52 #ifdef TEST_PANIC_FALLBACK
54 static int epoll_create_panic_fallback(struct epoll_event_context *epoll_ev,
55 int size)
57 if (epoll_ev->panic_fallback == NULL) {
58 return epoll_create(size);
61 /* 50% of the time, fail... */
62 if ((random() % 2) == 0) {
63 errno = EINVAL;
64 return -1;
67 return epoll_create(size);
70 static int epoll_ctl_panic_fallback(struct epoll_event_context *epoll_ev,
71 int epfd, int op, int fd,
72 struct epoll_event *event)
74 if (epoll_ev->panic_fallback == NULL) {
75 return epoll_ctl(epfd, op, fd, event);
78 /* 50% of the time, fail... */
79 if ((random() % 2) == 0) {
80 errno = EINVAL;
81 return -1;
84 return epoll_ctl(epfd, op, fd, event);
87 static int epoll_wait_panic_fallback(struct epoll_event_context *epoll_ev,
88 int epfd,
89 struct epoll_event *events,
90 int maxevents,
91 int timeout)
93 if (epoll_ev->panic_fallback == NULL) {
94 return epoll_wait(epfd, events, maxevents, timeout);
97 /* 50% of the time, fail... */
98 if ((random() % 2) == 0) {
99 errno = EINVAL;
100 return -1;
103 return epoll_wait(epfd, events, maxevents, timeout);
106 #define epoll_create(_size) \
107 epoll_create_panic_fallback(epoll_ev, _size)
108 #define epoll_ctl(_epfd, _op, _fd, _event) \
109 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
110 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
111 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
112 #endif
115 called to set the panic fallback function.
117 _PRIVATE_ bool tevent_epoll_set_panic_fallback(struct tevent_context *ev,
118 bool (*panic_fallback)(struct tevent_context *ev,
119 bool replay))
121 struct epoll_event_context *epoll_ev;
123 if (ev->additional_data == NULL) {
124 return false;
127 epoll_ev = talloc_get_type(ev->additional_data,
128 struct epoll_event_context);
129 if (epoll_ev == NULL) {
130 return false;
132 epoll_ev->panic_fallback = panic_fallback;
133 return true;
137 called when a epoll call fails
139 static void epoll_panic(struct epoll_event_context *epoll_ev,
140 const char *reason, bool replay)
142 struct tevent_context *ev = epoll_ev->ev;
143 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
145 panic_fallback = epoll_ev->panic_fallback;
147 if (epoll_ev->panic_state != NULL) {
148 *epoll_ev->panic_state = true;
151 if (epoll_ev->panic_force_replay) {
152 replay = true;
155 TALLOC_FREE(ev->additional_data);
157 if (panic_fallback == NULL) {
158 tevent_debug(ev, TEVENT_DEBUG_FATAL,
159 "%s (%s) replay[%u] - calling abort()\n",
160 reason, strerror(errno), (unsigned)replay);
161 abort();
164 tevent_debug(ev, TEVENT_DEBUG_WARNING,
165 "%s (%s) replay[%u] - calling panic_fallback\n",
166 reason, strerror(errno), (unsigned)replay);
168 if (!panic_fallback(ev, replay)) {
169 /* Fallback failed. */
170 tevent_debug(ev, TEVENT_DEBUG_FATAL,
171 "%s (%s) replay[%u] - calling abort()\n",
172 reason, strerror(errno), (unsigned)replay);
173 abort();
178 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
180 static uint32_t epoll_map_flags(uint16_t flags)
182 uint32_t ret = 0;
183 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
184 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
185 return ret;
189 free the epoll fd
191 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
193 close(epoll_ev->epoll_fd);
194 epoll_ev->epoll_fd = -1;
195 return 0;
199 init the epoll fd
201 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
203 epoll_ev->epoll_fd = epoll_create(64);
204 if (epoll_ev->epoll_fd == -1) {
205 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
206 "Failed to create epoll handle.\n");
207 return -1;
210 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
211 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
212 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
215 epoll_ev->pid = getpid();
216 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
218 return 0;
221 static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
224 reopen the epoll handle when our pid changes
225 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
226 demonstration of why this is needed
228 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
230 struct tevent_fd *fde;
231 bool *caller_panic_state = epoll_ev->panic_state;
232 bool panic_triggered = false;
234 if (epoll_ev->pid == getpid()) {
235 return;
238 close(epoll_ev->epoll_fd);
239 epoll_ev->epoll_fd = epoll_create(64);
240 if (epoll_ev->epoll_fd == -1) {
241 epoll_panic(epoll_ev, "epoll_create() failed", false);
242 return;
245 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
246 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
247 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
250 epoll_ev->pid = getpid();
251 epoll_ev->panic_state = &panic_triggered;
252 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
253 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
254 epoll_update_event(epoll_ev, fde);
256 if (panic_triggered) {
257 if (caller_panic_state != NULL) {
258 *caller_panic_state = true;
260 return;
263 epoll_ev->panic_state = NULL;
267 add the epoll event to the given fd_event
269 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
271 struct epoll_event event;
272 int ret;
274 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
275 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
277 ZERO_STRUCT(event);
278 event.events = epoll_map_flags(fde->flags);
279 event.data.ptr = fde;
280 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event);
281 if (ret != 0 && errno == EBADF) {
282 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_ERROR,
283 "EPOLL_CTL_ADD EBADF for "
284 "fde[%p] fd[%d] - disabling\n",
285 fde, fde->fd);
286 DLIST_REMOVE(epoll_ev->ev->fd_events, fde);
287 fde->event_ctx = NULL;
288 return;
289 } else if (ret != 0) {
290 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false);
291 return;
294 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
295 /* only if we want to read we want to tell the event handler about errors */
296 if (fde->flags & TEVENT_FD_READ) {
297 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
302 delete the epoll event for given fd_event
304 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
306 struct epoll_event event;
307 int ret;
309 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
310 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
312 ZERO_STRUCT(event);
313 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
314 if (ret != 0 && errno == ENOENT) {
316 * This can happen after a epoll_check_reopen
317 * within epoll_event_fd_destructor.
319 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_TRACE,
320 "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n",
321 fde->fd);
322 return;
323 } else if (ret != 0 && errno == EBADF) {
324 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
325 "EPOLL_CTL_DEL EBADF for "
326 "fde[%p] fd[%d] - disabling\n",
327 fde, fde->fd);
328 DLIST_REMOVE(epoll_ev->ev->fd_events, fde);
329 fde->event_ctx = NULL;
330 return;
331 } else if (ret != 0) {
332 epoll_panic(epoll_ev, "EPOLL_CTL_DEL failed", false);
333 return;
338 change the epoll event to the given fd_event
340 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
342 struct epoll_event event;
343 int ret;
345 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
346 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
348 ZERO_STRUCT(event);
349 event.events = epoll_map_flags(fde->flags);
350 event.data.ptr = fde;
351 ret = epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event);
352 if (ret != 0 && errno == EBADF) {
353 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_ERROR,
354 "EPOLL_CTL_MOD EBADF for "
355 "fde[%p] fd[%d] - disabling\n",
356 fde, fde->fd);
357 DLIST_REMOVE(epoll_ev->ev->fd_events, fde);
358 fde->event_ctx = NULL;
359 return;
360 } else if (ret != 0) {
361 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false);
362 return;
365 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
366 /* only if we want to read we want to tell the event handler about errors */
367 if (fde->flags & TEVENT_FD_READ) {
368 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
372 static void epoll_update_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
374 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
375 bool want_read = (fde->flags & TEVENT_FD_READ);
376 bool want_write= (fde->flags & TEVENT_FD_WRITE);
378 /* there's already an event */
379 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
380 if (want_read || (want_write && !got_error)) {
381 epoll_mod_event(epoll_ev, fde);
382 return;
385 * if we want to match the select behavior, we need to remove the epoll_event
386 * when the caller isn't interested in events.
388 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
390 epoll_del_event(epoll_ev, fde);
391 return;
394 /* there's no epoll_event attached to the fde */
395 if (want_read || (want_write && !got_error)) {
396 epoll_add_event(epoll_ev, fde);
397 return;
402 event loop handling using epoll
404 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
406 int ret, i;
407 #define MAXEVENTS 1
408 struct epoll_event events[MAXEVENTS];
409 int timeout = -1;
410 int wait_errno;
412 if (tvalp) {
413 /* it's better to trigger timed events a bit later than too early */
414 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
417 if (epoll_ev->ev->signal_events &&
418 tevent_common_check_signal(epoll_ev->ev)) {
419 return 0;
422 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
423 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
424 wait_errno = errno;
425 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
427 if (ret == -1 && wait_errno == EINTR && epoll_ev->ev->signal_events) {
428 if (tevent_common_check_signal(epoll_ev->ev)) {
429 return 0;
433 if (ret == -1 && wait_errno != EINTR) {
434 epoll_panic(epoll_ev, "epoll_wait() failed", true);
435 return -1;
438 if (ret == 0 && tvalp) {
439 /* we don't care about a possible delay here */
440 tevent_common_loop_timer_delay(epoll_ev->ev);
441 return 0;
444 for (i=0;i<ret;i++) {
445 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
446 struct tevent_fd);
447 uint16_t flags = 0;
449 if (fde == NULL) {
450 epoll_panic(epoll_ev, "epoll_wait() gave bad data", true);
451 return -1;
453 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
454 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
456 * if we only wait for TEVENT_FD_WRITE, we should not tell the
457 * event handler about it, and remove the epoll_event,
458 * as we only report errors when waiting for read events,
459 * to match the select() behavior
461 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
462 epoll_update_event(epoll_ev, fde);
463 continue;
465 flags |= TEVENT_FD_READ;
467 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
468 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
469 if (flags) {
470 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
471 break;
475 return 0;
479 create a epoll_event_context structure.
481 static int epoll_event_context_init(struct tevent_context *ev)
483 int ret;
484 struct epoll_event_context *epoll_ev;
487 * We might be called during tevent_re_initialise()
488 * which means we need to free our old additional_data.
490 TALLOC_FREE(ev->additional_data);
492 epoll_ev = talloc_zero(ev, struct epoll_event_context);
493 if (!epoll_ev) return -1;
494 epoll_ev->ev = ev;
495 epoll_ev->epoll_fd = -1;
497 ret = epoll_init_ctx(epoll_ev);
498 if (ret != 0) {
499 talloc_free(epoll_ev);
500 return ret;
503 ev->additional_data = epoll_ev;
504 return 0;
508 destroy an fd_event
510 static int epoll_event_fd_destructor(struct tevent_fd *fde)
512 struct tevent_context *ev = fde->event_ctx;
513 struct epoll_event_context *epoll_ev = NULL;
514 bool panic_triggered = false;
515 int flags = fde->flags;
517 if (ev == NULL) {
518 return tevent_common_fd_destructor(fde);
521 epoll_ev = talloc_get_type_abort(ev->additional_data,
522 struct epoll_event_context);
525 * we must remove the event from the list
526 * otherwise a panic fallback handler may
527 * reuse invalid memory
529 DLIST_REMOVE(ev->fd_events, fde);
531 epoll_ev->panic_state = &panic_triggered;
532 epoll_check_reopen(epoll_ev);
533 if (panic_triggered) {
534 return tevent_common_fd_destructor(fde);
537 fde->flags = 0;
538 epoll_update_event(epoll_ev, fde);
539 fde->flags = flags;
540 if (panic_triggered) {
541 return tevent_common_fd_destructor(fde);
543 epoll_ev->panic_state = NULL;
545 return tevent_common_fd_destructor(fde);
549 add a fd based event
550 return NULL on failure (memory allocation error)
552 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
553 int fd, uint16_t flags,
554 tevent_fd_handler_t handler,
555 void *private_data,
556 const char *handler_name,
557 const char *location)
559 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
560 struct epoll_event_context);
561 struct tevent_fd *fde;
562 bool panic_triggered = false;
564 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
565 handler, private_data,
566 handler_name, location);
567 if (!fde) return NULL;
569 talloc_set_destructor(fde, epoll_event_fd_destructor);
571 epoll_ev->panic_state = &panic_triggered;
572 epoll_check_reopen(epoll_ev);
573 if (panic_triggered) {
574 return fde;
576 epoll_ev->panic_state = NULL;
578 epoll_update_event(epoll_ev, fde);
580 return fde;
584 set the fd event flags
586 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
588 struct tevent_context *ev;
589 struct epoll_event_context *epoll_ev;
590 bool panic_triggered = false;
592 if (fde->flags == flags) return;
594 ev = fde->event_ctx;
595 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
597 fde->flags = flags;
599 epoll_ev->panic_state = &panic_triggered;
600 epoll_check_reopen(epoll_ev);
601 if (panic_triggered) {
602 return;
604 epoll_ev->panic_state = NULL;
606 epoll_update_event(epoll_ev, fde);
610 do a single event loop using the events defined in ev
612 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
614 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
615 struct epoll_event_context);
616 struct timeval tval;
617 bool panic_triggered = false;
619 if (ev->signal_events &&
620 tevent_common_check_signal(ev)) {
621 return 0;
624 if (ev->immediate_events &&
625 tevent_common_loop_immediate(ev)) {
626 return 0;
629 tval = tevent_common_loop_timer_delay(ev);
630 if (tevent_timeval_is_zero(&tval)) {
631 return 0;
634 epoll_ev->panic_state = &panic_triggered;
635 epoll_ev->panic_force_replay = true;
636 epoll_check_reopen(epoll_ev);
637 if (panic_triggered) {
638 errno = EINVAL;
639 return -1;
641 epoll_ev->panic_force_replay = false;
642 epoll_ev->panic_state = NULL;
644 return epoll_event_loop(epoll_ev, &tval);
647 static const struct tevent_ops epoll_event_ops = {
648 .context_init = epoll_event_context_init,
649 .add_fd = epoll_event_add_fd,
650 .set_fd_close_fn = tevent_common_fd_set_close_fn,
651 .get_fd_flags = tevent_common_fd_get_flags,
652 .set_fd_flags = epoll_event_set_fd_flags,
653 .add_timer = tevent_common_add_timer,
654 .schedule_immediate = tevent_common_schedule_immediate,
655 .add_signal = tevent_common_add_signal,
656 .loop_once = epoll_event_loop_once,
657 .loop_wait = tevent_common_loop_wait,
660 _PRIVATE_ bool tevent_epoll_init(void)
662 return tevent_register_backend("epoll", &epoll_event_ops);