s3:lib/events: s/EVENT_FD/TEVENT_FD
[Samba/gebeck_regimport.git] / lib / tevent / tevent_epoll.c
blob8696215f49f3793cf871b1626c46fdf63aae1530
1 /*
2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
11 ** under the LGPL
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "replace.h"
28 #include "system/filesys.h"
29 #include "system/select.h"
30 #include "tevent.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
39 int epoll_fd;
41 pid_t pid;
43 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
46 #ifdef TEST_PANIC_FALLBACK
47 static int epoll_wait_panic_fallback(int epfd,
48 struct epoll_event *events,
49 int maxevents,
50 int timeout)
52 /* 50% of the time, fail... */
53 if ((random() % 2) == 0) {
54 errno = EINVAL;
55 return -1;
58 return epoll_wait(epfd, events, maxevents, timeout);
61 #define epoll_wait epoll_wait_panic_fallback
62 #endif
65 called to set the panic fallback function.
67 _PRIVATE_ bool tevent_epoll_set_panic_fallback(struct tevent_context *ev,
68 bool (*panic_fallback)(struct tevent_context *ev,
69 bool replay))
71 struct epoll_event_context *epoll_ev;
73 if (ev->additional_data == NULL) {
74 return false;
77 epoll_ev = talloc_get_type(ev->additional_data,
78 struct epoll_event_context);
79 if (epoll_ev == NULL) {
80 return false;
82 epoll_ev->panic_fallback = panic_fallback;
83 return true;
87 called when a epoll call fails
89 static void epoll_panic(struct epoll_event_context *epoll_ev,
90 const char *reason, bool replay)
92 struct tevent_context *ev = epoll_ev->ev;
94 if (epoll_ev->panic_fallback == NULL) {
95 tevent_debug(ev, TEVENT_DEBUG_FATAL,
96 "%s (%s) replay[%u] - calling abort()\n",
97 reason, strerror(errno), (unsigned)replay);
98 abort();
101 tevent_debug(ev, TEVENT_DEBUG_WARNING,
102 "%s (%s) replay[%u] - calling panic_fallback\n",
103 reason, strerror(errno), (unsigned)replay);
105 if (!epoll_ev->panic_fallback(ev, replay)) {
106 /* Fallback failed. */
107 tevent_debug(ev, TEVENT_DEBUG_FATAL,
108 "%s (%s) replay[%u] - calling abort()\n",
109 reason, strerror(errno), (unsigned)replay);
110 abort();
115 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
117 static uint32_t epoll_map_flags(uint16_t flags)
119 uint32_t ret = 0;
120 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
121 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
122 return ret;
126 free the epoll fd
128 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
130 close(epoll_ev->epoll_fd);
131 epoll_ev->epoll_fd = -1;
132 return 0;
136 init the epoll fd
138 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
140 epoll_ev->epoll_fd = epoll_create(64);
141 if (epoll_ev->epoll_fd == -1) {
142 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
143 "Failed to create epoll handle.\n");
144 return -1;
147 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
148 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
149 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
152 epoll_ev->pid = getpid();
153 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
155 return 0;
158 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
161 reopen the epoll handle when our pid changes
162 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
163 demonstration of why this is needed
165 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
167 struct tevent_fd *fde;
169 if (epoll_ev->pid == getpid()) {
170 return;
173 close(epoll_ev->epoll_fd);
174 epoll_ev->epoll_fd = epoll_create(64);
175 if (epoll_ev->epoll_fd == -1) {
176 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
177 "Failed to recreate epoll handle after fork\n");
178 return;
181 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
182 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
183 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
186 epoll_ev->pid = getpid();
187 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
188 epoll_add_event(epoll_ev, fde);
192 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
193 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
194 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
197 add the epoll event to the given fd_event
199 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
201 struct epoll_event event;
203 if (epoll_ev->epoll_fd == -1) return;
205 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
207 /* if we don't want events yet, don't add an epoll_event */
208 if (fde->flags == 0) return;
210 ZERO_STRUCT(event);
211 event.events = epoll_map_flags(fde->flags);
212 event.data.ptr = fde;
213 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
214 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false);
215 return;
217 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
219 /* only if we want to read we want to tell the event handler about errors */
220 if (fde->flags & TEVENT_FD_READ) {
221 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
226 delete the epoll event for given fd_event
228 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
230 struct epoll_event event;
232 if (epoll_ev->epoll_fd == -1) return;
234 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
236 /* if there's no epoll_event, we don't need to delete it */
237 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
239 ZERO_STRUCT(event);
240 event.events = epoll_map_flags(fde->flags);
241 event.data.ptr = fde;
242 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
243 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
244 "epoll_del_event failed! probable early close bug (%s)\n",
245 strerror(errno));
247 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
251 change the epoll event to the given fd_event
253 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
255 struct epoll_event event;
256 if (epoll_ev->epoll_fd == -1) return;
258 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
260 ZERO_STRUCT(event);
261 event.events = epoll_map_flags(fde->flags);
262 event.data.ptr = fde;
263 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
264 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false);
265 return;
268 /* only if we want to read we want to tell the event handler about errors */
269 if (fde->flags & TEVENT_FD_READ) {
270 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
274 static void epoll_change_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
276 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
277 bool want_read = (fde->flags & TEVENT_FD_READ);
278 bool want_write= (fde->flags & TEVENT_FD_WRITE);
280 if (epoll_ev->epoll_fd == -1) return;
282 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
284 /* there's already an event */
285 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
286 if (want_read || (want_write && !got_error)) {
287 epoll_mod_event(epoll_ev, fde);
288 return;
291 * if we want to match the select behavior, we need to remove the epoll_event
292 * when the caller isn't interested in events.
294 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
296 epoll_del_event(epoll_ev, fde);
297 return;
300 /* there's no epoll_event attached to the fde */
301 if (want_read || (want_write && !got_error)) {
302 epoll_add_event(epoll_ev, fde);
303 return;
308 event loop handling using epoll
310 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
312 int ret, i;
313 #define MAXEVENTS 1
314 struct epoll_event events[MAXEVENTS];
315 int timeout = -1;
317 if (epoll_ev->epoll_fd == -1) return -1;
319 if (tvalp) {
320 /* it's better to trigger timed events a bit later than to early */
321 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
324 if (epoll_ev->ev->signal_events &&
325 tevent_common_check_signal(epoll_ev->ev)) {
326 return 0;
329 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
330 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
331 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
333 if (ret == -1 && errno == EINTR && epoll_ev->ev->signal_events) {
334 if (tevent_common_check_signal(epoll_ev->ev)) {
335 return 0;
339 if (ret == -1 && errno != EINTR) {
340 epoll_panic(epoll_ev, "epoll_wait() failed", true);
341 return -1;
344 if (ret == 0 && tvalp) {
345 /* we don't care about a possible delay here */
346 tevent_common_loop_timer_delay(epoll_ev->ev);
347 return 0;
350 for (i=0;i<ret;i++) {
351 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
352 struct tevent_fd);
353 uint16_t flags = 0;
355 if (fde == NULL) {
356 epoll_panic(epoll_ev, "epoll_wait() gave bad data", true);
357 return -1;
359 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
360 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
362 * if we only wait for TEVENT_FD_WRITE, we should not tell the
363 * event handler about it, and remove the epoll_event,
364 * as we only report errors when waiting for read events,
365 * to match the select() behavior
367 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
368 epoll_del_event(epoll_ev, fde);
369 continue;
371 flags |= TEVENT_FD_READ;
373 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
374 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
375 if (flags) {
376 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
377 break;
381 return 0;
385 create a epoll_event_context structure.
387 static int epoll_event_context_init(struct tevent_context *ev)
389 int ret;
390 struct epoll_event_context *epoll_ev;
393 * We might be called during tevent_re_initialise()
394 * which means we need to free our old additional_data.
396 TALLOC_FREE(ev->additional_data);
398 epoll_ev = talloc_zero(ev, struct epoll_event_context);
399 if (!epoll_ev) return -1;
400 epoll_ev->ev = ev;
401 epoll_ev->epoll_fd = -1;
403 ret = epoll_init_ctx(epoll_ev);
404 if (ret != 0) {
405 talloc_free(epoll_ev);
406 return ret;
409 ev->additional_data = epoll_ev;
410 return 0;
414 destroy an fd_event
416 static int epoll_event_fd_destructor(struct tevent_fd *fde)
418 struct tevent_context *ev = fde->event_ctx;
419 struct epoll_event_context *epoll_ev = NULL;
421 if (ev) {
422 epoll_ev = talloc_get_type(ev->additional_data,
423 struct epoll_event_context);
425 epoll_check_reopen(epoll_ev);
427 epoll_del_event(epoll_ev, fde);
430 return tevent_common_fd_destructor(fde);
434 add a fd based event
435 return NULL on failure (memory allocation error)
437 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
438 int fd, uint16_t flags,
439 tevent_fd_handler_t handler,
440 void *private_data,
441 const char *handler_name,
442 const char *location)
444 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
445 struct epoll_event_context);
446 struct tevent_fd *fde;
448 epoll_check_reopen(epoll_ev);
450 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
451 handler, private_data,
452 handler_name, location);
453 if (!fde) return NULL;
455 talloc_set_destructor(fde, epoll_event_fd_destructor);
457 epoll_add_event(epoll_ev, fde);
459 return fde;
463 set the fd event flags
465 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
467 struct tevent_context *ev;
468 struct epoll_event_context *epoll_ev;
470 if (fde->flags == flags) return;
472 ev = fde->event_ctx;
473 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
475 fde->flags = flags;
477 epoll_check_reopen(epoll_ev);
479 epoll_change_event(epoll_ev, fde);
483 do a single event loop using the events defined in ev
485 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
487 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
488 struct epoll_event_context);
489 struct timeval tval;
491 if (ev->signal_events &&
492 tevent_common_check_signal(ev)) {
493 return 0;
496 if (ev->immediate_events &&
497 tevent_common_loop_immediate(ev)) {
498 return 0;
501 tval = tevent_common_loop_timer_delay(ev);
502 if (tevent_timeval_is_zero(&tval)) {
503 return 0;
506 epoll_check_reopen(epoll_ev);
508 return epoll_event_loop(epoll_ev, &tval);
511 static const struct tevent_ops epoll_event_ops = {
512 .context_init = epoll_event_context_init,
513 .add_fd = epoll_event_add_fd,
514 .set_fd_close_fn = tevent_common_fd_set_close_fn,
515 .get_fd_flags = tevent_common_fd_get_flags,
516 .set_fd_flags = epoll_event_set_fd_flags,
517 .add_timer = tevent_common_add_timer,
518 .schedule_immediate = tevent_common_schedule_immediate,
519 .add_signal = tevent_common_add_signal,
520 .loop_once = epoll_event_loop_once,
521 .loop_wait = tevent_common_loop_wait,
524 _PRIVATE_ bool tevent_epoll_init(void)
526 return tevent_register_backend("epoll", &epoll_event_ops);