2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "system/filesys.h"
29 #include "system/select.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context
{
35 /* a pointer back to the generic event_context */
36 struct tevent_context
*ev
;
38 /* when using epoll this is the handle from epoll_create */
43 bool panic_force_replay
;
45 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
48 #ifdef TEST_PANIC_FALLBACK
50 static int epoll_create_panic_fallback(struct epoll_event_context
*epoll_ev
,
53 if (epoll_ev
->panic_fallback
== NULL
) {
54 return epoll_create(size
);
57 /* 50% of the time, fail... */
58 if ((random() % 2) == 0) {
63 return epoll_create(size
);
66 static int epoll_ctl_panic_fallback(struct epoll_event_context
*epoll_ev
,
67 int epfd
, int op
, int fd
,
68 struct epoll_event
*event
)
70 if (epoll_ev
->panic_fallback
== NULL
) {
71 return epoll_ctl(epfd
, op
, fd
, event
);
74 /* 50% of the time, fail... */
75 if ((random() % 2) == 0) {
80 return epoll_ctl(epfd
, op
, fd
, event
);
83 static int epoll_wait_panic_fallback(struct epoll_event_context
*epoll_ev
,
85 struct epoll_event
*events
,
89 if (epoll_ev
->panic_fallback
== NULL
) {
90 return epoll_wait(epfd
, events
, maxevents
, timeout
);
93 /* 50% of the time, fail... */
94 if ((random() % 2) == 0) {
99 return epoll_wait(epfd
, events
, maxevents
, timeout
);
102 #define epoll_create(_size) \
103 epoll_create_panic_fallback(epoll_ev, _size)
104 #define epoll_ctl(_epfd, _op, _fd, _event) \
105 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
106 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
107 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
111 called to set the panic fallback function.
113 _PRIVATE_
bool tevent_epoll_set_panic_fallback(struct tevent_context
*ev
,
114 bool (*panic_fallback
)(struct tevent_context
*ev
,
117 struct epoll_event_context
*epoll_ev
;
119 if (ev
->additional_data
== NULL
) {
123 epoll_ev
= talloc_get_type(ev
->additional_data
,
124 struct epoll_event_context
);
125 if (epoll_ev
== NULL
) {
128 epoll_ev
->panic_fallback
= panic_fallback
;
133 called when a epoll call fails
135 static void epoll_panic(struct epoll_event_context
*epoll_ev
,
136 const char *reason
, bool replay
)
138 struct tevent_context
*ev
= epoll_ev
->ev
;
139 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
141 panic_fallback
= epoll_ev
->panic_fallback
;
143 if (epoll_ev
->panic_state
!= NULL
) {
144 *epoll_ev
->panic_state
= true;
147 if (epoll_ev
->panic_force_replay
) {
151 TALLOC_FREE(ev
->additional_data
);
153 if (panic_fallback
== NULL
) {
154 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
155 "%s (%s) replay[%u] - calling abort()\n",
156 reason
, strerror(errno
), (unsigned)replay
);
160 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
161 "%s (%s) replay[%u] - calling panic_fallback\n",
162 reason
, strerror(errno
), (unsigned)replay
);
164 if (!panic_fallback(ev
, replay
)) {
165 /* Fallback failed. */
166 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
167 "%s (%s) replay[%u] - calling abort()\n",
168 reason
, strerror(errno
), (unsigned)replay
);
174 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
176 static uint32_t epoll_map_flags(uint16_t flags
)
179 if (flags
& TEVENT_FD_READ
) ret
|= (EPOLLIN
| EPOLLERR
| EPOLLHUP
);
180 if (flags
& TEVENT_FD_WRITE
) ret
|= (EPOLLOUT
| EPOLLERR
| EPOLLHUP
);
187 static int epoll_ctx_destructor(struct epoll_event_context
*epoll_ev
)
189 close(epoll_ev
->epoll_fd
);
190 epoll_ev
->epoll_fd
= -1;
197 static int epoll_init_ctx(struct epoll_event_context
*epoll_ev
)
199 epoll_ev
->epoll_fd
= epoll_create(64);
200 if (epoll_ev
->epoll_fd
== -1) {
201 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
202 "Failed to create epoll handle.\n");
206 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
207 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
208 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
211 epoll_ev
->pid
= getpid();
212 talloc_set_destructor(epoll_ev
, epoll_ctx_destructor
);
217 static void epoll_add_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
);
220 reopen the epoll handle when our pid changes
221 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
222 demonstration of why this is needed
224 static void epoll_check_reopen(struct epoll_event_context
*epoll_ev
)
226 struct tevent_fd
*fde
;
227 bool *caller_panic_state
= epoll_ev
->panic_state
;
228 bool panic_triggered
= false;
230 if (epoll_ev
->pid
== getpid()) {
234 close(epoll_ev
->epoll_fd
);
235 epoll_ev
->epoll_fd
= epoll_create(64);
236 if (epoll_ev
->epoll_fd
== -1) {
237 epoll_panic(epoll_ev
, "epoll_create() failed", false);
241 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
242 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
243 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
246 epoll_ev
->pid
= getpid();
247 epoll_ev
->panic_state
= &panic_triggered
;
248 for (fde
=epoll_ev
->ev
->fd_events
;fde
;fde
=fde
->next
) {
249 epoll_add_event(epoll_ev
, fde
);
250 if (panic_triggered
) {
251 if (caller_panic_state
!= NULL
) {
252 *caller_panic_state
= true;
257 epoll_ev
->panic_state
= NULL
;
260 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
261 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
262 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
265 add the epoll event to the given fd_event
267 static void epoll_add_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
269 struct epoll_event event
;
272 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
274 /* if we don't want events yet, don't add an epoll_event */
275 if (fde
->flags
== 0) return;
278 event
.events
= epoll_map_flags(fde
->flags
);
279 event
.data
.ptr
= fde
;
280 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_ADD
, fde
->fd
, &event
);
282 epoll_panic(epoll_ev
, "EPOLL_CTL_ADD failed", false);
285 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
287 /* only if we want to read we want to tell the event handler about errors */
288 if (fde
->flags
& TEVENT_FD_READ
) {
289 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
294 delete the epoll event for given fd_event
296 static void epoll_del_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
298 struct epoll_event event
;
301 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
303 /* if there's no epoll_event, we don't need to delete it */
304 if (!(fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
)) return;
307 event
.events
= epoll_map_flags(fde
->flags
);
308 event
.data
.ptr
= fde
;
309 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_DEL
, fde
->fd
, &event
);
310 if (ret
!= 0 && errno
== ENOENT
) {
312 * This can happen after a epoll_check_reopen
313 * within epoll_event_fd_destructor.
315 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_TRACE
,
316 "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n",
318 } else if (ret
!= 0) {
319 epoll_panic(epoll_ev
, "EPOLL_CTL_DEL failed", false);
322 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
326 change the epoll event to the given fd_event
328 static void epoll_mod_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
330 struct epoll_event event
;
333 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
336 event
.events
= epoll_map_flags(fde
->flags
);
337 event
.data
.ptr
= fde
;
338 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_MOD
, fde
->fd
, &event
);
340 epoll_panic(epoll_ev
, "EPOLL_CTL_MOD failed", false);
344 /* only if we want to read we want to tell the event handler about errors */
345 if (fde
->flags
& TEVENT_FD_READ
) {
346 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
350 static void epoll_change_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
352 bool got_error
= (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
);
353 bool want_read
= (fde
->flags
& TEVENT_FD_READ
);
354 bool want_write
= (fde
->flags
& TEVENT_FD_WRITE
);
356 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
358 /* there's already an event */
359 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
) {
360 if (want_read
|| (want_write
&& !got_error
)) {
361 epoll_mod_event(epoll_ev
, fde
);
365 * if we want to match the select behavior, we need to remove the epoll_event
366 * when the caller isn't interested in events.
368 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
370 epoll_del_event(epoll_ev
, fde
);
374 /* there's no epoll_event attached to the fde */
375 if (want_read
|| (want_write
&& !got_error
)) {
376 epoll_add_event(epoll_ev
, fde
);
382 event loop handling using epoll
384 static int epoll_event_loop(struct epoll_event_context
*epoll_ev
, struct timeval
*tvalp
)
388 struct epoll_event events
[MAXEVENTS
];
393 /* it's better to trigger timed events a bit later than too early */
394 timeout
= ((tvalp
->tv_usec
+999) / 1000) + (tvalp
->tv_sec
*1000);
397 if (epoll_ev
->ev
->signal_events
&&
398 tevent_common_check_signal(epoll_ev
->ev
)) {
402 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
403 ret
= epoll_wait(epoll_ev
->epoll_fd
, events
, MAXEVENTS
, timeout
);
405 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
407 if (ret
== -1 && wait_errno
== EINTR
&& epoll_ev
->ev
->signal_events
) {
408 if (tevent_common_check_signal(epoll_ev
->ev
)) {
413 if (ret
== -1 && wait_errno
!= EINTR
) {
414 epoll_panic(epoll_ev
, "epoll_wait() failed", true);
418 if (ret
== 0 && tvalp
) {
419 /* we don't care about a possible delay here */
420 tevent_common_loop_timer_delay(epoll_ev
->ev
);
424 for (i
=0;i
<ret
;i
++) {
425 struct tevent_fd
*fde
= talloc_get_type(events
[i
].data
.ptr
,
430 epoll_panic(epoll_ev
, "epoll_wait() gave bad data", true);
433 if (events
[i
].events
& (EPOLLHUP
|EPOLLERR
)) {
434 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
;
436 * if we only wait for TEVENT_FD_WRITE, we should not tell the
437 * event handler about it, and remove the epoll_event,
438 * as we only report errors when waiting for read events,
439 * to match the select() behavior
441 if (!(fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
)) {
442 epoll_del_event(epoll_ev
, fde
);
445 flags
|= TEVENT_FD_READ
;
447 if (events
[i
].events
& EPOLLIN
) flags
|= TEVENT_FD_READ
;
448 if (events
[i
].events
& EPOLLOUT
) flags
|= TEVENT_FD_WRITE
;
450 fde
->handler(epoll_ev
->ev
, fde
, flags
, fde
->private_data
);
459 create a epoll_event_context structure.
461 static int epoll_event_context_init(struct tevent_context
*ev
)
464 struct epoll_event_context
*epoll_ev
;
467 * We might be called during tevent_re_initialise()
468 * which means we need to free our old additional_data.
470 TALLOC_FREE(ev
->additional_data
);
472 epoll_ev
= talloc_zero(ev
, struct epoll_event_context
);
473 if (!epoll_ev
) return -1;
475 epoll_ev
->epoll_fd
= -1;
477 ret
= epoll_init_ctx(epoll_ev
);
479 talloc_free(epoll_ev
);
483 ev
->additional_data
= epoll_ev
;
490 static int epoll_event_fd_destructor(struct tevent_fd
*fde
)
492 struct tevent_context
*ev
= fde
->event_ctx
;
493 struct epoll_event_context
*epoll_ev
= NULL
;
494 bool panic_triggered
= false;
497 return tevent_common_fd_destructor(fde
);
500 epoll_ev
= talloc_get_type_abort(ev
->additional_data
,
501 struct epoll_event_context
);
504 * we must remove the event from the list
505 * otherwise a panic fallback handler may
506 * reuse invalid memory
508 DLIST_REMOVE(ev
->fd_events
, fde
);
510 epoll_ev
->panic_state
= &panic_triggered
;
511 epoll_check_reopen(epoll_ev
);
512 if (panic_triggered
) {
513 return tevent_common_fd_destructor(fde
);
516 epoll_del_event(epoll_ev
, fde
);
517 if (panic_triggered
) {
518 return tevent_common_fd_destructor(fde
);
520 epoll_ev
->panic_state
= NULL
;
522 return tevent_common_fd_destructor(fde
);
527 return NULL on failure (memory allocation error)
529 static struct tevent_fd
*epoll_event_add_fd(struct tevent_context
*ev
, TALLOC_CTX
*mem_ctx
,
530 int fd
, uint16_t flags
,
531 tevent_fd_handler_t handler
,
533 const char *handler_name
,
534 const char *location
)
536 struct epoll_event_context
*epoll_ev
= talloc_get_type(ev
->additional_data
,
537 struct epoll_event_context
);
538 struct tevent_fd
*fde
;
539 bool panic_triggered
= false;
541 fde
= tevent_common_add_fd(ev
, mem_ctx
, fd
, flags
,
542 handler
, private_data
,
543 handler_name
, location
);
544 if (!fde
) return NULL
;
546 talloc_set_destructor(fde
, epoll_event_fd_destructor
);
548 epoll_ev
->panic_state
= &panic_triggered
;
549 epoll_check_reopen(epoll_ev
);
550 if (panic_triggered
) {
553 epoll_ev
->panic_state
= NULL
;
555 epoll_add_event(epoll_ev
, fde
);
561 set the fd event flags
563 static void epoll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
565 struct tevent_context
*ev
;
566 struct epoll_event_context
*epoll_ev
;
567 bool panic_triggered
= false;
569 if (fde
->flags
== flags
) return;
572 epoll_ev
= talloc_get_type(ev
->additional_data
, struct epoll_event_context
);
576 epoll_ev
->panic_state
= &panic_triggered
;
577 epoll_check_reopen(epoll_ev
);
578 if (panic_triggered
) {
581 epoll_ev
->panic_state
= NULL
;
583 epoll_change_event(epoll_ev
, fde
);
587 do a single event loop using the events defined in ev
589 static int epoll_event_loop_once(struct tevent_context
*ev
, const char *location
)
591 struct epoll_event_context
*epoll_ev
= talloc_get_type(ev
->additional_data
,
592 struct epoll_event_context
);
594 bool panic_triggered
= false;
596 if (ev
->signal_events
&&
597 tevent_common_check_signal(ev
)) {
601 if (ev
->immediate_events
&&
602 tevent_common_loop_immediate(ev
)) {
606 tval
= tevent_common_loop_timer_delay(ev
);
607 if (tevent_timeval_is_zero(&tval
)) {
611 epoll_ev
->panic_state
= &panic_triggered
;
612 epoll_ev
->panic_force_replay
= true;
613 epoll_check_reopen(epoll_ev
);
614 if (panic_triggered
) {
618 epoll_ev
->panic_force_replay
= false;
619 epoll_ev
->panic_state
= NULL
;
621 return epoll_event_loop(epoll_ev
, &tval
);
624 static const struct tevent_ops epoll_event_ops
= {
625 .context_init
= epoll_event_context_init
,
626 .add_fd
= epoll_event_add_fd
,
627 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
628 .get_fd_flags
= tevent_common_fd_get_flags
,
629 .set_fd_flags
= epoll_event_set_fd_flags
,
630 .add_timer
= tevent_common_add_timer
,
631 .schedule_immediate
= tevent_common_schedule_immediate
,
632 .add_signal
= tevent_common_add_signal
,
633 .loop_once
= epoll_event_loop_once
,
634 .loop_wait
= tevent_common_loop_wait
,
637 _PRIVATE_
bool tevent_epoll_init(void)
639 return tevent_register_backend("epoll", &epoll_event_ops
);