tevent: add epoll_{create,ctl}_panic_fallback() for testing
[Samba/gebeck_regimport.git] / lib / tevent / tevent_epoll.c
blob4c16aec5d69b5f4976ff8daa454e20b1a56fe815
1 /*
2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
11 ** under the LGPL
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "replace.h"
28 #include "system/filesys.h"
29 #include "system/select.h"
30 #include "tevent.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
39 int epoll_fd;
41 pid_t pid;
43 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
46 #ifdef TEST_PANIC_FALLBACK
48 static int epoll_create_panic_fallback(struct epoll_event_context *epoll_ev,
49 int size)
51 if (epoll_ev->panic_fallback == NULL) {
52 return epoll_create(size);
55 /* 50% of the time, fail... */
56 if ((random() % 2) == 0) {
57 errno = EINVAL;
58 return -1;
61 return epoll_create(size);
64 static int epoll_ctl_panic_fallback(struct epoll_event_context *epoll_ev,
65 int epfd, int op, int fd,
66 struct epoll_event *event)
68 if (epoll_ev->panic_fallback == NULL) {
69 return epoll_ctl(epfd, op, fd, event);
72 /* 50% of the time, fail... */
73 if ((random() % 2) == 0) {
74 errno = EINVAL;
75 return -1;
78 return epoll_ctl(epfd, op, fd, event);
81 static int epoll_wait_panic_fallback(struct epoll_event_context *epoll_ev,
82 int epfd,
83 struct epoll_event *events,
84 int maxevents,
85 int timeout)
87 if (epoll_ev->panic_fallback == NULL) {
88 return epoll_wait(epfd, events, maxevents, timeout);
91 /* 50% of the time, fail... */
92 if ((random() % 2) == 0) {
93 errno = EINVAL;
94 return -1;
97 return epoll_wait(epfd, events, maxevents, timeout);
100 #define epoll_create(_size) \
101 epoll_create_panic_fallback(epoll_ev, _size)
102 #define epoll_ctl(_epfd, _op, _fd, _event) \
103 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
104 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
105 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
106 #endif
109 called to set the panic fallback function.
111 _PRIVATE_ bool tevent_epoll_set_panic_fallback(struct tevent_context *ev,
112 bool (*panic_fallback)(struct tevent_context *ev,
113 bool replay))
115 struct epoll_event_context *epoll_ev;
117 if (ev->additional_data == NULL) {
118 return false;
121 epoll_ev = talloc_get_type(ev->additional_data,
122 struct epoll_event_context);
123 if (epoll_ev == NULL) {
124 return false;
126 epoll_ev->panic_fallback = panic_fallback;
127 return true;
131 called when a epoll call fails
133 static void epoll_panic(struct epoll_event_context *epoll_ev,
134 const char *reason, bool replay)
136 struct tevent_context *ev = epoll_ev->ev;
138 if (epoll_ev->panic_fallback == NULL) {
139 tevent_debug(ev, TEVENT_DEBUG_FATAL,
140 "%s (%s) replay[%u] - calling abort()\n",
141 reason, strerror(errno), (unsigned)replay);
142 abort();
145 tevent_debug(ev, TEVENT_DEBUG_WARNING,
146 "%s (%s) replay[%u] - calling panic_fallback\n",
147 reason, strerror(errno), (unsigned)replay);
149 if (!epoll_ev->panic_fallback(ev, replay)) {
150 /* Fallback failed. */
151 tevent_debug(ev, TEVENT_DEBUG_FATAL,
152 "%s (%s) replay[%u] - calling abort()\n",
153 reason, strerror(errno), (unsigned)replay);
154 abort();
159 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
161 static uint32_t epoll_map_flags(uint16_t flags)
163 uint32_t ret = 0;
164 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
165 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
166 return ret;
170 free the epoll fd
172 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
174 close(epoll_ev->epoll_fd);
175 epoll_ev->epoll_fd = -1;
176 return 0;
180 init the epoll fd
182 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
184 epoll_ev->epoll_fd = epoll_create(64);
185 if (epoll_ev->epoll_fd == -1) {
186 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
187 "Failed to create epoll handle.\n");
188 return -1;
191 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
192 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
193 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
196 epoll_ev->pid = getpid();
197 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
199 return 0;
202 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
205 reopen the epoll handle when our pid changes
206 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
207 demonstration of why this is needed
209 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
211 struct tevent_fd *fde;
213 if (epoll_ev->pid == getpid()) {
214 return;
217 close(epoll_ev->epoll_fd);
218 epoll_ev->epoll_fd = epoll_create(64);
219 if (epoll_ev->epoll_fd == -1) {
220 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
221 "Failed to recreate epoll handle after fork\n");
222 return;
225 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
226 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
227 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
230 epoll_ev->pid = getpid();
231 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
232 epoll_add_event(epoll_ev, fde);
236 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
237 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
238 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
241 add the epoll event to the given fd_event
243 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
245 struct epoll_event event;
247 if (epoll_ev->epoll_fd == -1) return;
249 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
251 /* if we don't want events yet, don't add an epoll_event */
252 if (fde->flags == 0) return;
254 ZERO_STRUCT(event);
255 event.events = epoll_map_flags(fde->flags);
256 event.data.ptr = fde;
257 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
258 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false);
259 return;
261 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
263 /* only if we want to read we want to tell the event handler about errors */
264 if (fde->flags & TEVENT_FD_READ) {
265 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
270 delete the epoll event for given fd_event
272 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
274 struct epoll_event event;
276 if (epoll_ev->epoll_fd == -1) return;
278 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
280 /* if there's no epoll_event, we don't need to delete it */
281 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
283 ZERO_STRUCT(event);
284 event.events = epoll_map_flags(fde->flags);
285 event.data.ptr = fde;
286 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
287 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
288 "epoll_del_event failed! probable early close bug (%s)\n",
289 strerror(errno));
291 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
295 change the epoll event to the given fd_event
297 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
299 struct epoll_event event;
300 if (epoll_ev->epoll_fd == -1) return;
302 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
304 ZERO_STRUCT(event);
305 event.events = epoll_map_flags(fde->flags);
306 event.data.ptr = fde;
307 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
308 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false);
309 return;
312 /* only if we want to read we want to tell the event handler about errors */
313 if (fde->flags & TEVENT_FD_READ) {
314 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
318 static void epoll_change_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
320 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
321 bool want_read = (fde->flags & TEVENT_FD_READ);
322 bool want_write= (fde->flags & TEVENT_FD_WRITE);
324 if (epoll_ev->epoll_fd == -1) return;
326 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
328 /* there's already an event */
329 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
330 if (want_read || (want_write && !got_error)) {
331 epoll_mod_event(epoll_ev, fde);
332 return;
335 * if we want to match the select behavior, we need to remove the epoll_event
336 * when the caller isn't interested in events.
338 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
340 epoll_del_event(epoll_ev, fde);
341 return;
344 /* there's no epoll_event attached to the fde */
345 if (want_read || (want_write && !got_error)) {
346 epoll_add_event(epoll_ev, fde);
347 return;
352 event loop handling using epoll
354 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
356 int ret, i;
357 #define MAXEVENTS 1
358 struct epoll_event events[MAXEVENTS];
359 int timeout = -1;
360 int wait_errno;
362 if (epoll_ev->epoll_fd == -1) return -1;
364 if (tvalp) {
365 /* it's better to trigger timed events a bit later than too early */
366 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
369 if (epoll_ev->ev->signal_events &&
370 tevent_common_check_signal(epoll_ev->ev)) {
371 return 0;
374 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
375 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
376 wait_errno = errno;
377 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
379 if (ret == -1 && wait_errno == EINTR && epoll_ev->ev->signal_events) {
380 if (tevent_common_check_signal(epoll_ev->ev)) {
381 return 0;
385 if (ret == -1 && wait_errno != EINTR) {
386 epoll_panic(epoll_ev, "epoll_wait() failed", true);
387 return -1;
390 if (ret == 0 && tvalp) {
391 /* we don't care about a possible delay here */
392 tevent_common_loop_timer_delay(epoll_ev->ev);
393 return 0;
396 for (i=0;i<ret;i++) {
397 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
398 struct tevent_fd);
399 uint16_t flags = 0;
401 if (fde == NULL) {
402 epoll_panic(epoll_ev, "epoll_wait() gave bad data", true);
403 return -1;
405 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
406 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
408 * if we only wait for TEVENT_FD_WRITE, we should not tell the
409 * event handler about it, and remove the epoll_event,
410 * as we only report errors when waiting for read events,
411 * to match the select() behavior
413 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
414 epoll_del_event(epoll_ev, fde);
415 continue;
417 flags |= TEVENT_FD_READ;
419 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
420 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
421 if (flags) {
422 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
423 break;
427 return 0;
431 create a epoll_event_context structure.
433 static int epoll_event_context_init(struct tevent_context *ev)
435 int ret;
436 struct epoll_event_context *epoll_ev;
439 * We might be called during tevent_re_initialise()
440 * which means we need to free our old additional_data.
442 TALLOC_FREE(ev->additional_data);
444 epoll_ev = talloc_zero(ev, struct epoll_event_context);
445 if (!epoll_ev) return -1;
446 epoll_ev->ev = ev;
447 epoll_ev->epoll_fd = -1;
449 ret = epoll_init_ctx(epoll_ev);
450 if (ret != 0) {
451 talloc_free(epoll_ev);
452 return ret;
455 ev->additional_data = epoll_ev;
456 return 0;
460 destroy an fd_event
462 static int epoll_event_fd_destructor(struct tevent_fd *fde)
464 struct tevent_context *ev = fde->event_ctx;
465 struct epoll_event_context *epoll_ev = NULL;
467 if (ev) {
468 epoll_ev = talloc_get_type(ev->additional_data,
469 struct epoll_event_context);
471 epoll_check_reopen(epoll_ev);
473 epoll_del_event(epoll_ev, fde);
476 return tevent_common_fd_destructor(fde);
480 add a fd based event
481 return NULL on failure (memory allocation error)
483 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
484 int fd, uint16_t flags,
485 tevent_fd_handler_t handler,
486 void *private_data,
487 const char *handler_name,
488 const char *location)
490 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
491 struct epoll_event_context);
492 struct tevent_fd *fde;
494 epoll_check_reopen(epoll_ev);
496 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
497 handler, private_data,
498 handler_name, location);
499 if (!fde) return NULL;
501 talloc_set_destructor(fde, epoll_event_fd_destructor);
503 epoll_add_event(epoll_ev, fde);
505 return fde;
509 set the fd event flags
511 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
513 struct tevent_context *ev;
514 struct epoll_event_context *epoll_ev;
516 if (fde->flags == flags) return;
518 ev = fde->event_ctx;
519 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
521 fde->flags = flags;
523 epoll_check_reopen(epoll_ev);
525 epoll_change_event(epoll_ev, fde);
529 do a single event loop using the events defined in ev
531 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
533 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
534 struct epoll_event_context);
535 struct timeval tval;
537 if (ev->signal_events &&
538 tevent_common_check_signal(ev)) {
539 return 0;
542 if (ev->immediate_events &&
543 tevent_common_loop_immediate(ev)) {
544 return 0;
547 tval = tevent_common_loop_timer_delay(ev);
548 if (tevent_timeval_is_zero(&tval)) {
549 return 0;
552 epoll_check_reopen(epoll_ev);
554 return epoll_event_loop(epoll_ev, &tval);
557 static const struct tevent_ops epoll_event_ops = {
558 .context_init = epoll_event_context_init,
559 .add_fd = epoll_event_add_fd,
560 .set_fd_close_fn = tevent_common_fd_set_close_fn,
561 .get_fd_flags = tevent_common_fd_get_flags,
562 .set_fd_flags = epoll_event_set_fd_flags,
563 .add_timer = tevent_common_add_timer,
564 .schedule_immediate = tevent_common_schedule_immediate,
565 .add_signal = tevent_common_add_signal,
566 .loop_once = epoll_event_loop_once,
567 .loop_wait = tevent_common_loop_wait,
570 _PRIVATE_ bool tevent_epoll_init(void)
572 return tevent_register_backend("epoll", &epoll_event_ops);