2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2013
8 Copyright (C) Jeremy Allison 2013
10 ** NOTE! The following LGPL license applies to the tevent
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
29 #include "system/filesys.h"
30 #include "system/select.h"
32 #include "tevent_internal.h"
33 #include "tevent_util.h"
35 struct epoll_event_context
{
36 /* a pointer back to the generic event_context */
37 struct tevent_context
*ev
;
39 /* when using epoll this is the handle from epoll_create */
44 bool panic_force_replay
;
46 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
49 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
50 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
51 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
52 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX (1<<3)
54 #ifdef TEST_PANIC_FALLBACK
56 static int epoll_create_panic_fallback(struct epoll_event_context
*epoll_ev
,
59 if (epoll_ev
->panic_fallback
== NULL
) {
60 return epoll_create(size
);
63 /* 50% of the time, fail... */
64 if ((random() % 2) == 0) {
69 return epoll_create(size
);
72 static int epoll_ctl_panic_fallback(struct epoll_event_context
*epoll_ev
,
73 int epfd
, int op
, int fd
,
74 struct epoll_event
*event
)
76 if (epoll_ev
->panic_fallback
== NULL
) {
77 return epoll_ctl(epfd
, op
, fd
, event
);
80 /* 50% of the time, fail... */
81 if ((random() % 2) == 0) {
86 return epoll_ctl(epfd
, op
, fd
, event
);
89 static int epoll_wait_panic_fallback(struct epoll_event_context
*epoll_ev
,
91 struct epoll_event
*events
,
95 if (epoll_ev
->panic_fallback
== NULL
) {
96 return epoll_wait(epfd
, events
, maxevents
, timeout
);
99 /* 50% of the time, fail... */
100 if ((random() % 2) == 0) {
105 return epoll_wait(epfd
, events
, maxevents
, timeout
);
108 #define epoll_create(_size) \
109 epoll_create_panic_fallback(epoll_ev, _size)
110 #define epoll_ctl(_epfd, _op, _fd, _event) \
111 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
112 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
113 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
117 called to set the panic fallback function.
119 _PRIVATE_
void tevent_epoll_set_panic_fallback(struct tevent_context
*ev
,
120 bool (*panic_fallback
)(struct tevent_context
*ev
,
123 struct epoll_event_context
*epoll_ev
=
124 talloc_get_type_abort(ev
->additional_data
,
125 struct epoll_event_context
);
127 epoll_ev
->panic_fallback
= panic_fallback
;
131 called when a epoll call fails
133 static void epoll_panic(struct epoll_event_context
*epoll_ev
,
134 const char *reason
, bool replay
)
136 struct tevent_context
*ev
= epoll_ev
->ev
;
137 bool (*panic_fallback
)(struct tevent_context
*ev
, bool replay
);
139 panic_fallback
= epoll_ev
->panic_fallback
;
141 if (epoll_ev
->panic_state
!= NULL
) {
142 *epoll_ev
->panic_state
= true;
145 if (epoll_ev
->panic_force_replay
) {
149 TALLOC_FREE(ev
->additional_data
);
151 if (panic_fallback
== NULL
) {
152 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
153 "%s (%s) replay[%u] - calling abort()\n",
154 reason
, strerror(errno
), (unsigned)replay
);
158 tevent_debug(ev
, TEVENT_DEBUG_ERROR
,
159 "%s (%s) replay[%u] - calling panic_fallback\n",
160 reason
, strerror(errno
), (unsigned)replay
);
162 if (!panic_fallback(ev
, replay
)) {
163 /* Fallback failed. */
164 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
165 "%s (%s) replay[%u] - calling abort()\n",
166 reason
, strerror(errno
), (unsigned)replay
);
172 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
174 static uint32_t epoll_map_flags(uint16_t flags
)
177 if (flags
& TEVENT_FD_READ
) ret
|= (EPOLLIN
| EPOLLERR
| EPOLLHUP
);
178 if (flags
& TEVENT_FD_WRITE
) ret
|= (EPOLLOUT
| EPOLLERR
| EPOLLHUP
);
185 static int epoll_ctx_destructor(struct epoll_event_context
*epoll_ev
)
187 close(epoll_ev
->epoll_fd
);
188 epoll_ev
->epoll_fd
= -1;
195 static int epoll_init_ctx(struct epoll_event_context
*epoll_ev
)
197 epoll_ev
->epoll_fd
= epoll_create(64);
198 if (epoll_ev
->epoll_fd
== -1) {
199 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
200 "Failed to create epoll handle.\n");
204 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
205 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
206 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
209 epoll_ev
->pid
= getpid();
210 talloc_set_destructor(epoll_ev
, epoll_ctx_destructor
);
215 static void epoll_update_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
);
218 reopen the epoll handle when our pid changes
219 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
220 demonstration of why this is needed
222 static void epoll_check_reopen(struct epoll_event_context
*epoll_ev
)
224 struct tevent_fd
*fde
;
225 bool *caller_panic_state
= epoll_ev
->panic_state
;
226 bool panic_triggered
= false;
228 if (epoll_ev
->pid
== getpid()) {
232 close(epoll_ev
->epoll_fd
);
233 epoll_ev
->epoll_fd
= epoll_create(64);
234 if (epoll_ev
->epoll_fd
== -1) {
235 epoll_panic(epoll_ev
, "epoll_create() failed", false);
239 if (!ev_set_close_on_exec(epoll_ev
->epoll_fd
)) {
240 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
241 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
244 epoll_ev
->pid
= getpid();
245 epoll_ev
->panic_state
= &panic_triggered
;
246 for (fde
=epoll_ev
->ev
->fd_events
;fde
;fde
=fde
->next
) {
247 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
248 epoll_update_event(epoll_ev
, fde
);
250 if (panic_triggered
) {
251 if (caller_panic_state
!= NULL
) {
252 *caller_panic_state
= true;
257 epoll_ev
->panic_state
= NULL
;
261 epoll cannot add the same file descriptor twice, once
262 with read, once with write which is allowed by the
263 tevent backend. Multiplex the existing fde, flag it
264 as such so we can search for the correct fde on
268 static int epoll_add_multiplex_fd(struct epoll_event_context
*epoll_ev
,
269 struct tevent_fd
*add_fde
)
271 struct epoll_event event
;
272 struct tevent_fd
*mpx_fde
;
275 /* Find the existing fde that caused the EEXIST error. */
276 for (mpx_fde
= epoll_ev
->ev
->fd_events
; mpx_fde
; mpx_fde
= mpx_fde
->next
) {
277 if (mpx_fde
->fd
!= add_fde
->fd
) {
281 if (mpx_fde
== add_fde
) {
287 if (mpx_fde
== NULL
) {
288 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
289 "can't find multiplex fde for fd[%d]",
294 if (mpx_fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
295 /* Logic error. Can't have more than 2 multiplexed fde's. */
296 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
297 "multiplex fde for fd[%d] is already multiplexed\n",
303 * The multiplex fde must have the same fd, and also
304 * already have an epoll event attached.
306 if (!(mpx_fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
)) {
307 /* Logic error. Can't have more than 2 multiplexed fde's. */
308 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_FATAL
,
309 "multiplex fde for fd[%d] has no event\n",
314 /* Modify the mpx_fde to add in the new flags. */
316 event
.events
= epoll_map_flags(mpx_fde
->flags
);
317 event
.events
|= epoll_map_flags(add_fde
->flags
);
318 event
.data
.ptr
= mpx_fde
;
319 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_MOD
, mpx_fde
->fd
, &event
);
320 if (ret
!= 0 && errno
== EBADF
) {
321 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_ERROR
,
322 "EPOLL_CTL_MOD EBADF for "
323 "add_fde[%p] mpx_fde[%p] fd[%d] - disabling\n",
324 add_fde
, mpx_fde
, add_fde
->fd
);
325 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, mpx_fde
);
326 mpx_fde
->event_ctx
= NULL
;
327 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, add_fde
);
328 add_fde
->event_ctx
= NULL
;
330 } else if (ret
!= 0) {
335 * Make each fde->additional_data pointers point at each other
336 * so we can look them up from each other. They are now paired.
338 mpx_fde
->additional_data
= (struct tevent_fd
*)add_fde
;
339 add_fde
->additional_data
= (struct tevent_fd
*)mpx_fde
;
341 /* Now flag both fde's as being multiplexed. */
342 mpx_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
;
343 add_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
;
345 /* we need to keep the GOT_ERROR flag */
346 if (mpx_fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
) {
347 add_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
;
354 add the epoll event to the given fd_event
356 static void epoll_add_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
358 struct epoll_event event
;
360 struct tevent_fd
*mpx_fde
= NULL
;
362 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
363 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
365 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
367 * This is a multiplexed fde, we need to include both
368 * flags in the modified event.
370 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
373 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
374 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
378 event
.events
= epoll_map_flags(fde
->flags
);
379 if (mpx_fde
!= NULL
) {
380 event
.events
|= epoll_map_flags(mpx_fde
->flags
);
382 event
.data
.ptr
= fde
;
383 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_ADD
, fde
->fd
, &event
);
384 if (ret
!= 0 && errno
== EBADF
) {
385 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_ERROR
,
386 "EPOLL_CTL_ADD EBADF for "
387 "fde[%p] mpx_fde[%p] fd[%d] - disabling\n",
388 fde
, mpx_fde
, fde
->fd
);
389 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, fde
);
390 fde
->event_ctx
= NULL
;
391 if (mpx_fde
!= NULL
) {
392 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, mpx_fde
);
393 mpx_fde
->event_ctx
= NULL
;
396 } else if (ret
!= 0 && errno
== EEXIST
&& mpx_fde
== NULL
) {
397 ret
= epoll_add_multiplex_fd(epoll_ev
, fde
);
399 epoll_panic(epoll_ev
, "epoll_add_multiplex_fd failed",
403 } else if (ret
!= 0) {
404 epoll_panic(epoll_ev
, "EPOLL_CTL_ADD failed", false);
408 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
409 /* only if we want to read we want to tell the event handler about errors */
410 if (fde
->flags
& TEVENT_FD_READ
) {
411 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
414 if (mpx_fde
== NULL
) {
418 mpx_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
419 /* only if we want to read we want to tell the event handler about errors */
420 if (mpx_fde
->flags
& TEVENT_FD_READ
) {
421 mpx_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
426 delete the epoll event for given fd_event
428 static void epoll_del_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
430 struct epoll_event event
;
432 struct tevent_fd
*mpx_fde
= NULL
;
434 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
435 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
437 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
439 * This is a multiplexed fde, we need to modify both events.
441 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
444 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
445 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
449 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_DEL
, fde
->fd
, &event
);
450 if (ret
!= 0 && errno
== ENOENT
) {
452 * This can happen after a epoll_check_reopen
453 * within epoll_event_fd_destructor.
455 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_TRACE
,
456 "EPOLL_CTL_DEL ignoring ENOENT for fd[%d]\n",
459 } else if (ret
!= 0 && errno
== EBADF
) {
460 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_WARNING
,
461 "EPOLL_CTL_DEL EBADF for "
462 "fde[%p] mpx_fde[%p] fd[%d] - disabling\n",
463 fde
, mpx_fde
, fde
->fd
);
464 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, fde
);
465 fde
->event_ctx
= NULL
;
466 if (mpx_fde
!= NULL
) {
467 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, mpx_fde
);
468 mpx_fde
->event_ctx
= NULL
;
471 } else if (ret
!= 0) {
472 epoll_panic(epoll_ev
, "EPOLL_CTL_DEL failed", false);
478 change the epoll event to the given fd_event
480 static void epoll_mod_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
482 struct tevent_fd
*mpx_fde
= NULL
;
483 struct epoll_event event
;
486 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
487 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
489 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
491 * This is a multiplexed fde, we need to include both
492 * flags in the modified event.
494 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
497 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
498 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
502 event
.events
= epoll_map_flags(fde
->flags
);
503 if (mpx_fde
!= NULL
) {
504 event
.events
|= epoll_map_flags(mpx_fde
->flags
);
506 event
.data
.ptr
= fde
;
507 ret
= epoll_ctl(epoll_ev
->epoll_fd
, EPOLL_CTL_MOD
, fde
->fd
, &event
);
508 if (ret
!= 0 && errno
== EBADF
) {
509 tevent_debug(epoll_ev
->ev
, TEVENT_DEBUG_ERROR
,
510 "EPOLL_CTL_MOD EBADF for "
511 "fde[%p] mpx_fde[%p] fd[%d] - disabling\n",
512 fde
, mpx_fde
, fde
->fd
);
513 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, fde
);
514 fde
->event_ctx
= NULL
;
515 if (mpx_fde
!= NULL
) {
516 DLIST_REMOVE(epoll_ev
->ev
->fd_events
, mpx_fde
);
517 mpx_fde
->event_ctx
= NULL
;
520 } else if (ret
!= 0) {
521 epoll_panic(epoll_ev
, "EPOLL_CTL_MOD failed", false);
525 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
526 /* only if we want to read we want to tell the event handler about errors */
527 if (fde
->flags
& TEVENT_FD_READ
) {
528 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
531 if (mpx_fde
== NULL
) {
535 mpx_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
536 /* only if we want to read we want to tell the event handler about errors */
537 if (mpx_fde
->flags
& TEVENT_FD_READ
) {
538 mpx_fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
;
542 static void epoll_update_event(struct epoll_event_context
*epoll_ev
, struct tevent_fd
*fde
)
544 bool got_error
= (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
);
545 bool want_read
= (fde
->flags
& TEVENT_FD_READ
);
546 bool want_write
= (fde
->flags
& TEVENT_FD_WRITE
);
547 struct tevent_fd
*mpx_fde
= NULL
;
549 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
551 * work out what the multiplexed fde wants.
553 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
556 if (mpx_fde
->flags
& TEVENT_FD_READ
) {
560 if (mpx_fde
->flags
& TEVENT_FD_WRITE
) {
565 /* there's already an event */
566 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
) {
567 if (want_read
|| (want_write
&& !got_error
)) {
568 epoll_mod_event(epoll_ev
, fde
);
572 * if we want to match the select behavior, we need to remove the epoll_event
573 * when the caller isn't interested in events.
575 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
577 epoll_del_event(epoll_ev
, fde
);
581 /* there's no epoll_event attached to the fde */
582 if (want_read
|| (want_write
&& !got_error
)) {
583 epoll_add_event(epoll_ev
, fde
);
589 Cope with epoll returning EPOLLHUP|EPOLLERR on an event.
590 Return true if there's nothing else to do, false if
591 this event needs further handling.
593 static bool epoll_handle_hup_or_err(struct epoll_event_context
*epoll_ev
,
594 struct tevent_fd
*fde
)
597 /* Nothing to do if no event. */
601 fde
->additional_flags
|= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR
;
603 * if we only wait for TEVENT_FD_WRITE, we should not tell the
604 * event handler about it, and remove the epoll_event,
605 * as we only report errors when waiting for read events,
606 * to match the select() behavior
608 if (!(fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR
)) {
610 * Do the same as the poll backend and
611 * remove the writeable flag.
613 fde
->flags
&= ~TEVENT_FD_WRITE
;
616 /* This has TEVENT_FD_READ set, we're not finished. */
621 event loop handling using epoll
623 static int epoll_event_loop(struct epoll_event_context
*epoll_ev
, struct timeval
*tvalp
)
627 struct epoll_event events
[MAXEVENTS
];
632 /* it's better to trigger timed events a bit later than too early */
633 timeout
= ((tvalp
->tv_usec
+999) / 1000) + (tvalp
->tv_sec
*1000);
636 if (epoll_ev
->ev
->signal_events
&&
637 tevent_common_check_signal(epoll_ev
->ev
)) {
641 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
642 ret
= epoll_wait(epoll_ev
->epoll_fd
, events
, MAXEVENTS
, timeout
);
644 tevent_trace_point_callback(epoll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
646 if (ret
== -1 && wait_errno
== EINTR
&& epoll_ev
->ev
->signal_events
) {
647 if (tevent_common_check_signal(epoll_ev
->ev
)) {
652 if (ret
== -1 && wait_errno
!= EINTR
) {
653 epoll_panic(epoll_ev
, "epoll_wait() failed", true);
657 if (ret
== 0 && tvalp
) {
658 /* we don't care about a possible delay here */
659 tevent_common_loop_timer_delay(epoll_ev
->ev
);
663 for (i
=0;i
<ret
;i
++) {
664 struct tevent_fd
*fde
= talloc_get_type(events
[i
].data
.ptr
,
667 struct tevent_fd
*mpx_fde
= NULL
;
670 epoll_panic(epoll_ev
, "epoll_wait() gave bad data", true);
673 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
675 * Save off the multiplexed event in case we need
676 * to use it to call the handler function.
678 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
681 if (events
[i
].events
& (EPOLLHUP
|EPOLLERR
)) {
682 bool handled_fde
= epoll_handle_hup_or_err(epoll_ev
, fde
);
683 bool handled_mpx
= epoll_handle_hup_or_err(epoll_ev
, mpx_fde
);
685 if (handled_fde
&& handled_mpx
) {
686 epoll_update_event(epoll_ev
, fde
);
692 * If the mpx event was the one that needs
693 * further handling, it's the TEVENT_FD_READ
694 * event so switch over and call that handler.
699 flags
|= TEVENT_FD_READ
;
701 if (events
[i
].events
& EPOLLIN
) flags
|= TEVENT_FD_READ
;
702 if (events
[i
].events
& EPOLLOUT
) flags
|= TEVENT_FD_WRITE
;
704 if (flags
& TEVENT_FD_WRITE
) {
705 if (fde
->flags
& TEVENT_FD_WRITE
) {
708 if (mpx_fde
&& mpx_fde
->flags
& TEVENT_FD_WRITE
) {
715 /* Ensure we got the right fde. */
716 if ((flags
& fde
->flags
) == 0) {
723 * make sure we only pass the flags
724 * the handler is expecting.
728 fde
->handler(epoll_ev
->ev
, fde
, flags
, fde
->private_data
);
737 create a epoll_event_context structure.
739 static int epoll_event_context_init(struct tevent_context
*ev
)
742 struct epoll_event_context
*epoll_ev
;
745 * We might be called during tevent_re_initialise()
746 * which means we need to free our old additional_data.
748 TALLOC_FREE(ev
->additional_data
);
750 epoll_ev
= talloc_zero(ev
, struct epoll_event_context
);
751 if (!epoll_ev
) return -1;
753 epoll_ev
->epoll_fd
= -1;
755 ret
= epoll_init_ctx(epoll_ev
);
757 talloc_free(epoll_ev
);
761 ev
->additional_data
= epoll_ev
;
768 static int epoll_event_fd_destructor(struct tevent_fd
*fde
)
770 struct tevent_context
*ev
= fde
->event_ctx
;
771 struct epoll_event_context
*epoll_ev
= NULL
;
772 bool panic_triggered
= false;
773 struct tevent_fd
*mpx_fde
= NULL
;
774 int flags
= fde
->flags
;
777 return tevent_common_fd_destructor(fde
);
780 epoll_ev
= talloc_get_type_abort(ev
->additional_data
,
781 struct epoll_event_context
);
784 * we must remove the event from the list
785 * otherwise a panic fallback handler may
786 * reuse invalid memory
788 DLIST_REMOVE(ev
->fd_events
, fde
);
790 if (fde
->additional_flags
& EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
) {
791 mpx_fde
= talloc_get_type_abort(fde
->additional_data
,
794 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
;
795 mpx_fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_MPX
;
797 fde
->additional_data
= NULL
;
798 mpx_fde
->additional_data
= NULL
;
800 fde
->additional_flags
&= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT
;
803 epoll_ev
->panic_state
= &panic_triggered
;
804 epoll_check_reopen(epoll_ev
);
805 if (panic_triggered
) {
806 return tevent_common_fd_destructor(fde
);
809 if (mpx_fde
!= NULL
) {
810 epoll_update_event(epoll_ev
, mpx_fde
);
811 if (panic_triggered
) {
812 return tevent_common_fd_destructor(fde
);
817 epoll_update_event(epoll_ev
, fde
);
819 if (panic_triggered
) {
820 return tevent_common_fd_destructor(fde
);
822 epoll_ev
->panic_state
= NULL
;
824 return tevent_common_fd_destructor(fde
);
829 return NULL on failure (memory allocation error)
831 static struct tevent_fd
*epoll_event_add_fd(struct tevent_context
*ev
, TALLOC_CTX
*mem_ctx
,
832 int fd
, uint16_t flags
,
833 tevent_fd_handler_t handler
,
835 const char *handler_name
,
836 const char *location
)
838 struct epoll_event_context
*epoll_ev
=
839 talloc_get_type_abort(ev
->additional_data
,
840 struct epoll_event_context
);
841 struct tevent_fd
*fde
;
842 bool panic_triggered
= false;
844 fde
= tevent_common_add_fd(ev
, mem_ctx
, fd
, flags
,
845 handler
, private_data
,
846 handler_name
, location
);
847 if (!fde
) return NULL
;
849 talloc_set_destructor(fde
, epoll_event_fd_destructor
);
851 epoll_ev
->panic_state
= &panic_triggered
;
852 epoll_check_reopen(epoll_ev
);
853 if (panic_triggered
) {
856 epoll_ev
->panic_state
= NULL
;
858 epoll_update_event(epoll_ev
, fde
);
864 set the fd event flags
866 static void epoll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
868 struct tevent_context
*ev
;
869 struct epoll_event_context
*epoll_ev
;
870 bool panic_triggered
= false;
872 if (fde
->flags
== flags
) return;
875 epoll_ev
= talloc_get_type_abort(ev
->additional_data
,
876 struct epoll_event_context
);
880 epoll_ev
->panic_state
= &panic_triggered
;
881 epoll_check_reopen(epoll_ev
);
882 if (panic_triggered
) {
885 epoll_ev
->panic_state
= NULL
;
887 epoll_update_event(epoll_ev
, fde
);
891 do a single event loop using the events defined in ev
893 static int epoll_event_loop_once(struct tevent_context
*ev
, const char *location
)
895 struct epoll_event_context
*epoll_ev
=
896 talloc_get_type_abort(ev
->additional_data
,
897 struct epoll_event_context
);
899 bool panic_triggered
= false;
901 if (ev
->signal_events
&&
902 tevent_common_check_signal(ev
)) {
906 if (ev
->threaded_contexts
!= NULL
) {
907 tevent_common_threaded_activate_immediate(ev
);
910 if (ev
->immediate_events
&&
911 tevent_common_loop_immediate(ev
)) {
915 tval
= tevent_common_loop_timer_delay(ev
);
916 if (tevent_timeval_is_zero(&tval
)) {
920 epoll_ev
->panic_state
= &panic_triggered
;
921 epoll_ev
->panic_force_replay
= true;
922 epoll_check_reopen(epoll_ev
);
923 if (panic_triggered
) {
927 epoll_ev
->panic_force_replay
= false;
928 epoll_ev
->panic_state
= NULL
;
930 return epoll_event_loop(epoll_ev
, &tval
);
933 static const struct tevent_ops epoll_event_ops
= {
934 .context_init
= epoll_event_context_init
,
935 .add_fd
= epoll_event_add_fd
,
936 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
937 .get_fd_flags
= tevent_common_fd_get_flags
,
938 .set_fd_flags
= epoll_event_set_fd_flags
,
939 .add_timer
= tevent_common_add_timer_v2
,
940 .schedule_immediate
= tevent_common_schedule_immediate
,
941 .add_signal
= tevent_common_add_signal
,
942 .loop_once
= epoll_event_loop_once
,
943 .loop_wait
= tevent_common_loop_wait
,
946 _PRIVATE_
bool tevent_epoll_init(void)
948 return tevent_register_backend("epoll", &epoll_event_ops
);