2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "system/filesys.h"
29 #include "system/select.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context
{
35 /* a pointer back to the generic event_context */
36 struct tevent_context
*ev
;
38 /* when using epoll this is the handle from epoll_create */
43 bool panic_force_replay
;
45 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
48 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
49 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
50 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
52 #ifdef TEST_PANIC_FALLBACK
54 static int epoll_create_panic_fallback(struct epoll_event_context
*epoll_ev
,
57 if (epoll_ev
->panic_fallback
== NULL
) {
58 return epoll_create(size
);
61 /* 50% of the time, fail... */
62 if ((random() % 2) == 0) {
67 return epoll_create(size
);
70 static int epoll_ctl_panic_fallback(struct epoll_event_context
*epoll_ev
,
71 int epfd
, int op
, int fd
,
72 struct epoll_event
*event
)
74 if (epoll_ev
->panic_fallback
== NULL
) {
75 return epoll_ctl(epfd
, op
, fd
, event
);
78 /* 50% of the time, fail... */
79 if ((random() % 2) == 0) {
84 return epoll_ctl(epfd
, op
, fd
, event
);
87 static int epoll_wait_panic_fallback(struct epoll_event_context
*epoll_ev
,
89 struct epoll_event
*events
,
93 if (epoll_ev
->panic_fallback
== NULL
) {
94 return epoll_wait(epfd
, events
, maxevents
, timeout
);
97 /* 50% of the time, fail... */
98 if ((random() % 2) == 0) {
103 return epoll_wait(epfd
, events
, maxevents
, timeout
);
106 #define epoll_create(_size) \
107 epoll_create_panic_fallback(epoll_ev, _size)
108 #define epoll_ctl(_epfd, _op, _fd, _event) \
109 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
110 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
111 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
115 called to set the panic fallback function.
117 _PRIVATE_
bool tevent_epoll_set_panic_fallback(struct tevent_context
*ev
,
118 bool (*panic_fallback
)(struct tevent_context
*ev
,
121 struct epoll_event_context
*epoll_ev
;
123 if (ev
->additional_data
== NULL
) {
127 epoll_ev
= talloc_get_type(ev
->additional_data
,
128 struct epoll_event_context
);
129 if (epoll_ev
== NULL
) {
132 epoll_ev
->panic_fallback
= panic_fallback
;
137 called when a epoll call fails
139 static void epoll_panic(struct epoll_event_context
*epoll_ev
,
140 const char *reason
, bool replay
)
142 struct tevent_context
*ev
= epoll_ev
->ev
;
143 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
145 panic_fallback
= epoll_ev
->panic_fallback
;
147 if (epoll_ev
->panic_state
!= NULL
) {
148 *epoll_ev
->panic_state
= true;
151 if (epoll_ev
->panic_force_replay
) {
155 TALLOC_FREE(ev
->additional_data
);
157 if (panic_fallback
== NULL
) {
158 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
159 "%s (%s) replay[%u] - calling abort()\n",
160 reason
, strerror(errno
), (unsigned)replay
);
164 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
165 "%s (%s) replay[%u] - calling panic_fallback\n",
166 reason
, strerror(errno
), (unsigned)replay
);
168 if (!panic_fallback(ev
, replay
)) {
169 /* Fallback failed. */
170 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
171 "%s (%s) replay[%u] - calling abort()\n",
172 reason
, strerror(errno
), (unsigned)replay
);
178 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
180 static uint32_t epoll_map_flags(uint16_t flags
)
183 if (flags
& TEVENT_FD_READ
) ret
|= (EPOLLIN
| EPOLLERR
| EPOLLHUP
);
184 if (flags
& TEVENT_FD_WRITE
) ret
|= (EPOLLOUT
| EPOLLERR
| EPOLLHUP
);
191 static int epoll_ctx_destructor(struct epoll_event_context
*epoll_ev
)
193 close(epoll_ev
->epoll_fd
);
194 epoll_ev
->epoll_fd
= -1;
201 static int epoll_init_ctx(struct epoll_event_context
*epoll_ev
)
203 epoll_ev
->epoll_fd
= epoll_create(64);
204 if (epoll_ev
->epoll_fd
== -1) {
205 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
206 "Failed to create epoll handle.\n");
210 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
211 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
212 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
215 epoll_ev
->pid
= getpid();
216 talloc_set_destructor(epoll_ev
, epoll_ctx_destructor
);
221 static void epoll_update_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
);
224 reopen the epoll handle when our pid changes
225 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
226 demonstration of why this is needed
228 static void epoll_check_reopen(struct epoll_event_context
*epoll_ev
)
230 struct tevent_fd
*fde
;
231 bool *caller_panic_state
= epoll_ev
->panic_state
;
232 bool panic_triggered
= false;
234 if (epoll_ev
->pid
== getpid()) {
238 close(epoll_ev
->epoll_fd
);
239 epoll_ev
->epoll_fd
= epoll_create(64);
240 if (epoll_ev
->epoll_fd
== -1) {
241 epoll_panic(epoll_ev
, "epoll_create() failed", false);
245 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
246 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
247 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
250 epoll_ev
->pid
= getpid();
251 epoll_ev
->panic_state
= &panic_triggered
;
252 for (fde
=epoll_ev
->ev
->fd_events
;fde
;fde
=fde
->next
) {
253 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
254 epoll_update_event(epoll_ev
, fde
);
256 if (panic_triggered
) {
257 if (caller_panic_state
!= NULL
) {
258 *caller_panic_state
= true;
263 epoll_ev
->panic_state
= NULL
;
267 add the epoll event to the given fd_event
269 static void epoll_add_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
271 struct epoll_event event
;
274 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
275 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
278 event
.events
= epoll_map_flags(fde
->flags
);
279 event
.data
.ptr
= fde
;
280 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_ADD
, fde
->fd
, &event
);
282 epoll_panic(epoll_ev
, "EPOLL_CTL_ADD failed", false);
286 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
287 /* only if we want to read we want to tell the event handler about errors */
288 if (fde
->flags
& TEVENT_FD_READ
) {
289 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
294 delete the epoll event for given fd_event
296 static void epoll_del_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
298 struct epoll_event event
;
301 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
302 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
305 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_DEL
, fde
->fd
, &event
);
306 if (ret
!= 0 && errno
== ENOENT
) {
308 * This can happen after a epoll_check_reopen
309 * within epoll_event_fd_destructor.
311 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_TRACE
,
312 "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n",
315 } else if (ret
!= 0) {
316 epoll_panic(epoll_ev
, "EPOLL_CTL_DEL failed", false);
322 change the epoll event to the given fd_event
324 static void epoll_mod_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
326 struct epoll_event event
;
329 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
330 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
333 event
.events
= epoll_map_flags(fde
->flags
);
334 event
.data
.ptr
= fde
;
335 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_MOD
, fde
->fd
, &event
);
337 epoll_panic(epoll_ev
, "EPOLL_CTL_MOD failed", false);
341 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
342 /* only if we want to read we want to tell the event handler about errors */
343 if (fde
->flags
& TEVENT_FD_READ
) {
344 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
348 static void epoll_update_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
350 bool got_error
= (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
);
351 bool want_read
= (fde
->flags
& TEVENT_FD_READ
);
352 bool want_write
= (fde
->flags
& TEVENT_FD_WRITE
);
354 /* there's already an event */
355 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
) {
356 if (want_read
|| (want_write
&& !got_error
)) {
357 epoll_mod_event(epoll_ev
, fde
);
361 * if we want to match the select behavior, we need to remove the epoll_event
362 * when the caller isn't interested in events.
364 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
366 epoll_del_event(epoll_ev
, fde
);
370 /* there's no epoll_event attached to the fde */
371 if (want_read
|| (want_write
&& !got_error
)) {
372 epoll_add_event(epoll_ev
, fde
);
378 event loop handling using epoll
380 static int epoll_event_loop(struct epoll_event_context
*epoll_ev
, struct timeval
*tvalp
)
384 struct epoll_event events
[MAXEVENTS
];
389 /* it's better to trigger timed events a bit later than too early */
390 timeout
= ((tvalp
->tv_usec
+999) / 1000) + (tvalp
->tv_sec
*1000);
393 if (epoll_ev
->ev
->signal_events
&&
394 tevent_common_check_signal(epoll_ev
->ev
)) {
398 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
399 ret
= epoll_wait(epoll_ev
->epoll_fd
, events
, MAXEVENTS
, timeout
);
401 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
403 if (ret
== -1 && wait_errno
== EINTR
&& epoll_ev
->ev
->signal_events
) {
404 if (tevent_common_check_signal(epoll_ev
->ev
)) {
409 if (ret
== -1 && wait_errno
!= EINTR
) {
410 epoll_panic(epoll_ev
, "epoll_wait() failed", true);
414 if (ret
== 0 && tvalp
) {
415 /* we don't care about a possible delay here */
416 tevent_common_loop_timer_delay(epoll_ev
->ev
);
420 for (i
=0;i
<ret
;i
++) {
421 struct tevent_fd
*fde
= talloc_get_type(events
[i
].data
.ptr
,
426 epoll_panic(epoll_ev
, "epoll_wait() gave bad data", true);
429 if (events
[i
].events
& (EPOLLHUP
|EPOLLERR
)) {
430 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
;
432 * if we only wait for TEVENT_FD_WRITE, we should not tell the
433 * event handler about it, and remove the epoll_event,
434 * as we only report errors when waiting for read events,
435 * to match the select() behavior
437 if (!(fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
)) {
438 epoll_update_event(epoll_ev
, fde
);
441 flags
|= TEVENT_FD_READ
;
443 if (events
[i
].events
& EPOLLIN
) flags
|= TEVENT_FD_READ
;
444 if (events
[i
].events
& EPOLLOUT
) flags
|= TEVENT_FD_WRITE
;
446 fde
->handler(epoll_ev
->ev
, fde
, flags
, fde
->private_data
);
455 create a epoll_event_context structure.
457 static int epoll_event_context_init(struct tevent_context
*ev
)
460 struct epoll_event_context
*epoll_ev
;
463 * We might be called during tevent_re_initialise()
464 * which means we need to free our old additional_data.
466 TALLOC_FREE(ev
->additional_data
);
468 epoll_ev
= talloc_zero(ev
, struct epoll_event_context
);
469 if (!epoll_ev
) return -1;
471 epoll_ev
->epoll_fd
= -1;
473 ret
= epoll_init_ctx(epoll_ev
);
475 talloc_free(epoll_ev
);
479 ev
->additional_data
= epoll_ev
;
486 static int epoll_event_fd_destructor(struct tevent_fd
*fde
)
488 struct tevent_context
*ev
= fde
->event_ctx
;
489 struct epoll_event_context
*epoll_ev
= NULL
;
490 bool panic_triggered
= false;
491 int flags
= fde
->flags
;
494 return tevent_common_fd_destructor(fde
);
497 epoll_ev
= talloc_get_type_abort(ev
->additional_data
,
498 struct epoll_event_context
);
501 * we must remove the event from the list
502 * otherwise a panic fallback handler may
503 * reuse invalid memory
505 DLIST_REMOVE(ev
->fd_events
, fde
);
507 epoll_ev
->panic_state
= &panic_triggered
;
508 epoll_check_reopen(epoll_ev
);
509 if (panic_triggered
) {
510 return tevent_common_fd_destructor(fde
);
514 epoll_update_event(epoll_ev
, fde
);
516 if (panic_triggered
) {
517 return tevent_common_fd_destructor(fde
);
519 epoll_ev
->panic_state
= NULL
;
521 return tevent_common_fd_destructor(fde
);
526 return NULL on failure (memory allocation error)
528 static struct tevent_fd
*epoll_event_add_fd(struct tevent_context
*ev
, TALLOC_CTX
*mem_ctx
,
529 int fd
, uint16_t flags
,
530 tevent_fd_handler_t handler
,
532 const char *handler_name
,
533 const char *location
)
535 struct epoll_event_context
*epoll_ev
= talloc_get_type(ev
->additional_data
,
536 struct epoll_event_context
);
537 struct tevent_fd
*fde
;
538 bool panic_triggered
= false;
540 fde
= tevent_common_add_fd(ev
, mem_ctx
, fd
, flags
,
541 handler
, private_data
,
542 handler_name
, location
);
543 if (!fde
) return NULL
;
545 talloc_set_destructor(fde
, epoll_event_fd_destructor
);
547 epoll_ev
->panic_state
= &panic_triggered
;
548 epoll_check_reopen(epoll_ev
);
549 if (panic_triggered
) {
552 epoll_ev
->panic_state
= NULL
;
554 epoll_update_event(epoll_ev
, fde
);
560 set the fd event flags
562 static void epoll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
564 struct tevent_context
*ev
;
565 struct epoll_event_context
*epoll_ev
;
566 bool panic_triggered
= false;
568 if (fde
->flags
== flags
) return;
571 epoll_ev
= talloc_get_type(ev
->additional_data
, struct epoll_event_context
);
575 epoll_ev
->panic_state
= &panic_triggered
;
576 epoll_check_reopen(epoll_ev
);
577 if (panic_triggered
) {
580 epoll_ev
->panic_state
= NULL
;
582 epoll_update_event(epoll_ev
, fde
);
586 do a single event loop using the events defined in ev
588 static int epoll_event_loop_once(struct tevent_context
*ev
, const char *location
)
590 struct epoll_event_context
*epoll_ev
= talloc_get_type(ev
->additional_data
,
591 struct epoll_event_context
);
593 bool panic_triggered
= false;
595 if (ev
->signal_events
&&
596 tevent_common_check_signal(ev
)) {
600 if (ev
->immediate_events
&&
601 tevent_common_loop_immediate(ev
)) {
605 tval
= tevent_common_loop_timer_delay(ev
);
606 if (tevent_timeval_is_zero(&tval
)) {
610 epoll_ev
->panic_state
= &panic_triggered
;
611 epoll_ev
->panic_force_replay
= true;
612 epoll_check_reopen(epoll_ev
);
613 if (panic_triggered
) {
617 epoll_ev
->panic_force_replay
= false;
618 epoll_ev
->panic_state
= NULL
;
620 return epoll_event_loop(epoll_ev
, &tval
);
623 static const struct tevent_ops epoll_event_ops
= {
624 .context_init
= epoll_event_context_init
,
625 .add_fd
= epoll_event_add_fd
,
626 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
627 .get_fd_flags
= tevent_common_fd_get_flags
,
628 .set_fd_flags
= epoll_event_set_fd_flags
,
629 .add_timer
= tevent_common_add_timer
,
630 .schedule_immediate
= tevent_common_schedule_immediate
,
631 .add_signal
= tevent_common_add_signal
,
632 .loop_once
= epoll_event_loop_once
,
633 .loop_wait
= tevent_common_loop_wait
,
636 _PRIVATE_
bool tevent_epoll_init(void)
638 return tevent_register_backend("epoll", &epoll_event_ops
);