pytest:sddl debugging: should_fail test says how it failed
[Samba.git] / lib / tevent / tevent.c
blob4142da78f607593615c3fc36b3d86ce026767cef
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 PLEASE READ THIS BEFORE MODIFYING!
28 This module is a general abstraction for the main select loop and
29 event handling. Do not ever put any localised hacks in here, instead
30 register one of the possible event types and implement that event
31 somewhere else.
33 There are 2 types of event handling that are handled in this module:
35 1) a file descriptor becoming readable or writeable. This is mostly
36 used for network sockets, but can be used for any type of file
37 descriptor. You may only register one handler for each file
38 descriptor/io combination or you will get unpredictable results
39 (this means that you can have a handler for read events, and a
40 separate handler for write events, but not two handlers that are
41 both handling read events)
43 2) a timed event. You can register an event that happens at a
44 specific time. You can register as many of these as you
45 like. They are single shot - add a new timed event in the event
46 handler to get another event.
48 To setup a set of events you first need to create a event_context
49 structure using the function tevent_context_init(); This returns a
50 'struct tevent_context' that you use in all subsequent calls.
52 After that you can add/remove events that you are interested in
53 using tevent_add_*() and talloc_free()
55 Finally, you call tevent_loop_wait_once() to block waiting for one of the
56 events to occor or tevent_loop_wait() which will loop
57 forever.
60 #include "replace.h"
61 #include "system/filesys.h"
62 #ifdef HAVE_PTHREAD
63 #include "system/threads.h"
64 #endif
65 #define TEVENT_DEPRECATED 1
66 #include "tevent.h"
67 #include "tevent_internal.h"
68 #include "tevent_util.h"
69 #ifdef HAVE_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
73 struct tevent_ops_list {
74 struct tevent_ops_list *next, *prev;
75 const char *name;
76 const struct tevent_ops *ops;
79 /* list of registered event backends */
80 static struct tevent_ops_list *tevent_backends = NULL;
81 static char *tevent_default_backend = NULL;
84 register an events backend
86 bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
88 struct tevent_ops_list *e;
90 for (e = tevent_backends; e != NULL; e = e->next) {
91 if (0 == strcmp(e->name, name)) {
92 /* already registered, skip it */
93 return true;
97 e = talloc(NULL, struct tevent_ops_list);
98 if (e == NULL) return false;
100 e->name = name;
101 e->ops = ops;
102 DLIST_ADD(tevent_backends, e);
104 return true;
108 set the default event backend
110 void tevent_set_default_backend(const char *backend)
112 talloc_free(tevent_default_backend);
113 tevent_default_backend = talloc_strdup(NULL, backend);
117 initialise backends if not already done
119 static void tevent_backend_init(void)
121 static bool done;
123 if (done) {
124 return;
127 done = true;
129 tevent_poll_init();
130 tevent_poll_mt_init();
131 #if defined(HAVE_EPOLL)
132 tevent_epoll_init();
133 #endif
135 tevent_standard_init();
138 const struct tevent_ops *tevent_find_ops_byname(const char *name)
140 struct tevent_ops_list *e;
142 tevent_backend_init();
144 if (name == NULL) {
145 name = tevent_default_backend;
147 if (name == NULL) {
148 name = "standard";
151 for (e = tevent_backends; e != NULL; e = e->next) {
152 if (0 == strcmp(e->name, name)) {
153 return e->ops;
157 return NULL;
161 list available backends
163 const char **tevent_backend_list(TALLOC_CTX *mem_ctx)
165 const char **list = NULL;
166 struct tevent_ops_list *e;
167 size_t idx = 0;
169 tevent_backend_init();
171 for (e=tevent_backends;e;e=e->next) {
172 idx += 1;
175 list = talloc_zero_array(mem_ctx, const char *, idx+1);
176 if (list == NULL) {
177 return NULL;
180 idx = 0;
181 for (e=tevent_backends;e;e=e->next) {
182 list[idx] = talloc_strdup(list, e->name);
183 if (list[idx] == NULL) {
184 TALLOC_FREE(list);
185 return NULL;
187 idx += 1;
190 return list;
193 static void tevent_common_wakeup_fini(struct tevent_context *ev);
195 #ifdef HAVE_PTHREAD
197 static pthread_mutex_t tevent_contexts_mutex = PTHREAD_MUTEX_INITIALIZER;
198 static struct tevent_context *tevent_contexts = NULL;
199 static pthread_once_t tevent_atfork_initialized = PTHREAD_ONCE_INIT;
200 static pid_t tevent_cached_global_pid = 0;
202 static void tevent_atfork_prepare(void)
204 struct tevent_context *ev;
205 int ret;
207 ret = pthread_mutex_lock(&tevent_contexts_mutex);
208 if (ret != 0) {
209 abort();
212 for (ev = tevent_contexts; ev != NULL; ev = ev->next) {
213 struct tevent_threaded_context *tctx;
215 for (tctx = ev->threaded_contexts; tctx != NULL;
216 tctx = tctx->next) {
217 ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
218 if (ret != 0) {
219 tevent_abort(ev, "pthread_mutex_lock failed");
223 ret = pthread_mutex_lock(&ev->scheduled_mutex);
224 if (ret != 0) {
225 tevent_abort(ev, "pthread_mutex_lock failed");
230 static void tevent_atfork_parent(void)
232 struct tevent_context *ev;
233 int ret;
235 for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
236 ev = DLIST_PREV(ev)) {
237 struct tevent_threaded_context *tctx;
239 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
240 if (ret != 0) {
241 tevent_abort(ev, "pthread_mutex_unlock failed");
244 for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
245 tctx = DLIST_PREV(tctx)) {
246 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
247 if (ret != 0) {
248 tevent_abort(
249 ev, "pthread_mutex_unlock failed");
254 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
255 if (ret != 0) {
256 abort();
260 static void tevent_atfork_child(void)
262 struct tevent_context *ev;
263 int ret;
265 tevent_cached_global_pid = getpid();
267 for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
268 ev = DLIST_PREV(ev)) {
269 struct tevent_threaded_context *tctx;
271 for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
272 tctx = DLIST_PREV(tctx)) {
273 tctx->event_ctx = NULL;
275 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
276 if (ret != 0) {
277 tevent_abort(
278 ev, "pthread_mutex_unlock failed");
282 ev->threaded_contexts = NULL;
284 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
285 if (ret != 0) {
286 tevent_abort(ev, "pthread_mutex_unlock failed");
290 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
291 if (ret != 0) {
292 abort();
296 static void tevent_prep_atfork(void)
298 int ret;
300 ret = pthread_atfork(tevent_atfork_prepare,
301 tevent_atfork_parent,
302 tevent_atfork_child);
303 if (ret != 0) {
304 abort();
307 tevent_cached_global_pid = getpid();
310 #endif
312 static int tevent_init_globals(void)
314 #ifdef HAVE_PTHREAD
315 int ret;
317 ret = pthread_once(&tevent_atfork_initialized, tevent_prep_atfork);
318 if (ret != 0) {
319 return ret;
321 #endif
323 return 0;
326 _PUBLIC_ pid_t tevent_cached_getpid(void)
328 #ifdef HAVE_PTHREAD
329 tevent_init_globals();
330 #ifdef TEVENT_VERIFY_CACHED_GETPID
331 if (tevent_cached_global_pid != getpid()) {
332 tevent_abort(NULL, "tevent_cached_global_pid invalid");
334 #endif
335 if (tevent_cached_global_pid != 0) {
336 return tevent_cached_global_pid;
338 #endif
339 return getpid();
342 int tevent_common_context_destructor(struct tevent_context *ev)
344 struct tevent_fd *fd, *fn;
345 struct tevent_timer *te, *tn;
346 struct tevent_immediate *ie, *in;
347 struct tevent_signal *se, *sn;
348 struct tevent_wrapper_glue *gl, *gn;
349 #ifdef HAVE_PTHREAD
350 int ret;
351 #endif
353 if (ev->wrapper.glue != NULL) {
354 tevent_abort(ev,
355 "tevent_common_context_destructor() active on wrapper");
358 #ifdef HAVE_PTHREAD
359 ret = pthread_mutex_lock(&tevent_contexts_mutex);
360 if (ret != 0) {
361 abort();
364 DLIST_REMOVE(tevent_contexts, ev);
366 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
367 if (ret != 0) {
368 abort();
371 while (ev->threaded_contexts != NULL) {
372 struct tevent_threaded_context *tctx = ev->threaded_contexts;
374 ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
375 if (ret != 0) {
376 abort();
380 * Indicate to the thread that the tevent_context is
381 * gone. The counterpart of this is in
382 * _tevent_threaded_schedule_immediate, there we read
383 * this under the threaded_context's mutex.
386 tctx->event_ctx = NULL;
388 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
389 if (ret != 0) {
390 abort();
393 DLIST_REMOVE(ev->threaded_contexts, tctx);
396 ret = pthread_mutex_destroy(&ev->scheduled_mutex);
397 if (ret != 0) {
398 abort();
400 #endif
402 for (gl = ev->wrapper.list; gl; gl = gn) {
403 gn = gl->next;
405 gl->main_ev = NULL;
406 DLIST_REMOVE(ev->wrapper.list, gl);
409 tevent_common_wakeup_fini(ev);
411 for (fd = ev->fd_events; fd; fd = fn) {
412 fn = fd->next;
413 tevent_trace_fd_callback(fd->event_ctx, fd, TEVENT_EVENT_TRACE_DETACH);
414 fd->wrapper = NULL;
415 fd->event_ctx = NULL;
416 DLIST_REMOVE(ev->fd_events, fd);
419 ev->last_zero_timer = NULL;
420 for (te = ev->timer_events; te; te = tn) {
421 tn = te->next;
422 tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH);
423 te->wrapper = NULL;
424 te->event_ctx = NULL;
425 DLIST_REMOVE(ev->timer_events, te);
428 for (ie = ev->immediate_events; ie; ie = in) {
429 in = ie->next;
430 tevent_trace_immediate_callback(ie->event_ctx, ie, TEVENT_EVENT_TRACE_DETACH);
431 ie->wrapper = NULL;
432 ie->event_ctx = NULL;
433 ie->cancel_fn = NULL;
434 DLIST_REMOVE(ev->immediate_events, ie);
437 for (se = ev->signal_events; se; se = sn) {
438 sn = se->next;
439 tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_DETACH);
440 se->wrapper = NULL;
441 se->event_ctx = NULL;
442 DLIST_REMOVE(ev->signal_events, se);
444 * This is important, Otherwise signals
445 * are handled twice in child. eg, SIGHUP.
446 * one added in parent, and another one in
447 * the child. -- BoYang
449 tevent_cleanup_pending_signal_handlers(se);
452 /* removing nesting hook or we get an abort when nesting is
453 * not allowed. -- SSS
454 * Note that we need to leave the allowed flag at its current
455 * value, otherwise the use in tevent_re_initialise() will
456 * leave the event context with allowed forced to false, which
457 * will break users that expect nesting to be allowed
459 ev->nesting.level = 0;
460 ev->nesting.hook_fn = NULL;
461 ev->nesting.hook_private = NULL;
463 return 0;
466 static int tevent_common_context_constructor(struct tevent_context *ev)
468 int ret;
470 ret = tevent_init_globals();
471 if (ret != 0) {
472 return ret;
475 #ifdef HAVE_PTHREAD
477 ret = pthread_mutex_init(&ev->scheduled_mutex, NULL);
478 if (ret != 0) {
479 return ret;
482 ret = pthread_mutex_lock(&tevent_contexts_mutex);
483 if (ret != 0) {
484 pthread_mutex_destroy(&ev->scheduled_mutex);
485 return ret;
488 DLIST_ADD(tevent_contexts, ev);
490 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
491 if (ret != 0) {
492 abort();
494 #endif
496 talloc_set_destructor(ev, tevent_common_context_destructor);
498 return 0;
501 void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason)
503 void *parent_ptr = talloc_parent(ptr);
504 size_t parent_blocks = talloc_total_blocks(parent_ptr);
506 if (parent_ptr != NULL && parent_blocks == 0) {
508 * This is an implicit talloc free, as we still have a parent
509 * but it's already being destroyed. Note that
510 * talloc_total_blocks(ptr) also just returns 0 if a
511 * talloc_free(ptr) is still in progress of freeing all
512 * children.
514 return;
517 tevent_abort(NULL, reason);
521 create a event_context structure for a specific implementation.
522 This must be the first events call, and all subsequent calls pass
523 this event_context as the first element. Event handlers also
524 receive this as their first argument.
526 This function is for allowing third-party-applications to hook in gluecode
527 to their own event loop code, so that they can make async usage of our client libs
529 NOTE: use tevent_context_init() inside of samba!
531 struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx,
532 const struct tevent_ops *ops,
533 void *additional_data)
535 struct tevent_context *ev;
536 int ret;
538 ev = talloc_zero(mem_ctx, struct tevent_context);
539 if (!ev) return NULL;
541 ret = tevent_common_context_constructor(ev);
542 if (ret != 0) {
543 talloc_free(ev);
544 return NULL;
547 ev->ops = ops;
548 ev->additional_data = additional_data;
550 ret = ev->ops->context_init(ev);
551 if (ret != 0) {
552 talloc_free(ev);
553 return NULL;
556 return ev;
560 create a event_context structure. This must be the first events
561 call, and all subsequent calls pass this event_context as the first
562 element. Event handlers also receive this as their first argument.
564 struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx,
565 const char *name)
567 const struct tevent_ops *ops;
569 ops = tevent_find_ops_byname(name);
570 if (ops == NULL) {
571 return NULL;
574 return tevent_context_init_ops(mem_ctx, ops, NULL);
579 create a event_context structure. This must be the first events
580 call, and all subsequent calls pass this event_context as the first
581 element. Event handlers also receive this as their first argument.
583 struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx)
585 return tevent_context_init_byname(mem_ctx, NULL);
589 add a fd based event
590 return NULL on failure (memory allocation error)
592 struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
593 TALLOC_CTX *mem_ctx,
594 int fd,
595 uint16_t flags,
596 tevent_fd_handler_t handler,
597 void *private_data,
598 const char *handler_name,
599 const char *location)
601 return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data,
602 handler_name, location);
606 set a close function on the fd event
608 void tevent_fd_set_close_fn(struct tevent_fd *fde,
609 tevent_fd_close_fn_t close_fn)
611 if (!fde) return;
612 if (!fde->event_ctx) return;
613 fde->event_ctx->ops->set_fd_close_fn(fde, close_fn);
616 static void tevent_fd_auto_close_fn(struct tevent_context *ev,
617 struct tevent_fd *fde,
618 int fd,
619 void *private_data)
621 close(fd);
624 void tevent_fd_set_auto_close(struct tevent_fd *fde)
626 tevent_fd_set_close_fn(fde, tevent_fd_auto_close_fn);
630 return the fd event flags
632 uint16_t tevent_fd_get_flags(struct tevent_fd *fde)
634 if (!fde) return 0;
635 if (!fde->event_ctx) return 0;
636 return fde->event_ctx->ops->get_fd_flags(fde);
640 set the fd event flags
642 void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags)
644 if (!fde) return;
645 if (!fde->event_ctx) return;
646 fde->event_ctx->ops->set_fd_flags(fde, flags);
649 bool tevent_signal_support(struct tevent_context *ev)
651 if (ev->ops->add_signal) {
652 return true;
654 return false;
657 static void (*tevent_abort_fn)(const char *reason);
659 void tevent_set_abort_fn(void (*abort_fn)(const char *reason))
661 tevent_abort_fn = abort_fn;
664 void tevent_abort(struct tevent_context *ev, const char *reason)
666 if (ev != NULL) {
667 tevent_debug(ev, TEVENT_DEBUG_FATAL,
668 "abort: %s\n", reason);
671 if (!tevent_abort_fn) {
672 abort();
675 tevent_abort_fn(reason);
679 add a timer event
680 return NULL on failure
682 struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
683 TALLOC_CTX *mem_ctx,
684 struct timeval next_event,
685 tevent_timer_handler_t handler,
686 void *private_data,
687 const char *handler_name,
688 const char *location)
690 return ev->ops->add_timer(ev, mem_ctx, next_event, handler, private_data,
691 handler_name, location);
695 allocate an immediate event
696 return NULL on failure (memory allocation error)
698 struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
699 const char *location)
701 struct tevent_immediate *im;
703 im = talloc(mem_ctx, struct tevent_immediate);
704 if (im == NULL) return NULL;
706 *im = (struct tevent_immediate) { .create_location = location };
708 return im;
712 schedule an immediate event
714 void _tevent_schedule_immediate(struct tevent_immediate *im,
715 struct tevent_context *ev,
716 tevent_immediate_handler_t handler,
717 void *private_data,
718 const char *handler_name,
719 const char *location)
721 ev->ops->schedule_immediate(im, ev, handler, private_data,
722 handler_name, location);
726 add a signal event
728 sa_flags are flags to sigaction(2)
730 return NULL on failure
732 struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
733 TALLOC_CTX *mem_ctx,
734 int signum,
735 int sa_flags,
736 tevent_signal_handler_t handler,
737 void *private_data,
738 const char *handler_name,
739 const char *location)
741 return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data,
742 handler_name, location);
745 void tevent_loop_allow_nesting(struct tevent_context *ev)
747 if (ev->wrapper.glue != NULL) {
748 tevent_abort(ev, "tevent_loop_allow_nesting() on wrapper");
749 return;
752 if (ev->wrapper.list != NULL) {
753 tevent_abort(ev, "tevent_loop_allow_nesting() with wrapper");
754 return;
757 ev->nesting.allowed = true;
760 void tevent_loop_set_nesting_hook(struct tevent_context *ev,
761 tevent_nesting_hook hook,
762 void *private_data)
764 if (ev->nesting.hook_fn &&
765 (ev->nesting.hook_fn != hook ||
766 ev->nesting.hook_private != private_data)) {
767 /* the way the nesting hook code is currently written
768 we cannot support two different nesting hooks at the
769 same time. */
770 tevent_abort(ev, "tevent: Violation of nesting hook rules\n");
772 ev->nesting.hook_fn = hook;
773 ev->nesting.hook_private = private_data;
776 static void tevent_abort_nesting(struct tevent_context *ev, const char *location)
778 const char *reason;
780 reason = talloc_asprintf(NULL, "tevent_loop_once() nesting at %s",
781 location);
782 if (!reason) {
783 reason = "tevent_loop_once() nesting";
786 tevent_abort(ev, reason);
790 do a single event loop using the events defined in ev
792 int _tevent_loop_once(struct tevent_context *ev, const char *location)
794 int ret;
795 void *nesting_stack_ptr = NULL;
797 ev->nesting.level++;
799 if (ev->nesting.level > 1) {
800 if (!ev->nesting.allowed) {
801 tevent_abort_nesting(ev, location);
802 errno = ELOOP;
803 return -1;
806 if (ev->nesting.level > 0) {
807 if (ev->nesting.hook_fn) {
808 int ret2;
809 ret2 = ev->nesting.hook_fn(ev,
810 ev->nesting.hook_private,
811 ev->nesting.level,
812 true,
813 (void *)&nesting_stack_ptr,
814 location);
815 if (ret2 != 0) {
816 ret = ret2;
817 goto done;
822 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
823 ret = ev->ops->loop_once(ev, location);
824 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
826 /* New event (and request) will always start with call depth 0. */
827 tevent_thread_call_depth_set(0);
829 if (ev->nesting.level > 0) {
830 if (ev->nesting.hook_fn) {
831 int ret2;
832 ret2 = ev->nesting.hook_fn(ev,
833 ev->nesting.hook_private,
834 ev->nesting.level,
835 false,
836 (void *)&nesting_stack_ptr,
837 location);
838 if (ret2 != 0) {
839 ret = ret2;
840 goto done;
845 done:
846 ev->nesting.level--;
847 return ret;
851 this is a performance optimization for the samba4 nested event loop problems
853 int _tevent_loop_until(struct tevent_context *ev,
854 bool (*finished)(void *private_data),
855 void *private_data,
856 const char *location)
858 int ret = 0;
859 void *nesting_stack_ptr = NULL;
861 ev->nesting.level++;
863 if (ev->nesting.level > 1) {
864 if (!ev->nesting.allowed) {
865 tevent_abort_nesting(ev, location);
866 errno = ELOOP;
867 return -1;
870 if (ev->nesting.level > 0) {
871 if (ev->nesting.hook_fn) {
872 int ret2;
873 ret2 = ev->nesting.hook_fn(ev,
874 ev->nesting.hook_private,
875 ev->nesting.level,
876 true,
877 (void *)&nesting_stack_ptr,
878 location);
879 if (ret2 != 0) {
880 ret = ret2;
881 goto done;
886 while (!finished(private_data)) {
887 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
888 ret = ev->ops->loop_once(ev, location);
889 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
890 if (ret != 0) {
891 break;
895 if (ev->nesting.level > 0) {
896 if (ev->nesting.hook_fn) {
897 int ret2;
898 ret2 = ev->nesting.hook_fn(ev,
899 ev->nesting.hook_private,
900 ev->nesting.level,
901 false,
902 (void *)&nesting_stack_ptr,
903 location);
904 if (ret2 != 0) {
905 ret = ret2;
906 goto done;
911 done:
912 ev->nesting.level--;
913 return ret;
916 bool tevent_common_have_events(struct tevent_context *ev)
918 if (ev->fd_events != NULL) {
919 if (ev->fd_events != ev->wakeup_fde) {
920 return true;
922 if (ev->fd_events->next != NULL) {
923 return true;
927 * At this point we just have the wakeup pipe event as
928 * the only fd_event. That one does not count as a
929 * regular event, so look at the other event types.
933 return ((ev->timer_events != NULL) ||
934 (ev->immediate_events != NULL) ||
935 (ev->signal_events != NULL));
939 return on failure or (with 0) if all fd events are removed
941 int tevent_common_loop_wait(struct tevent_context *ev,
942 const char *location)
945 * loop as long as we have events pending
947 while (tevent_common_have_events(ev)) {
948 int ret;
949 ret = _tevent_loop_once(ev, location);
950 if (ret != 0) {
951 tevent_debug(ev, TEVENT_DEBUG_FATAL,
952 "_tevent_loop_once() failed: %d - %s\n",
953 ret, strerror(errno));
954 return ret;
958 tevent_debug(ev, TEVENT_DEBUG_WARNING,
959 "tevent_common_loop_wait() out of events\n");
960 return 0;
964 return on failure or (with 0) if all fd events are removed
966 int _tevent_loop_wait(struct tevent_context *ev, const char *location)
968 return ev->ops->loop_wait(ev, location);
973 re-initialise a tevent context. This leaves you with the same
974 event context, but all events are wiped and the structure is
975 re-initialised. This is most useful after a fork()
977 zero is returned on success, non-zero on failure
979 int tevent_re_initialise(struct tevent_context *ev)
981 tevent_common_context_destructor(ev);
983 tevent_common_context_constructor(ev);
985 return ev->ops->context_init(ev);
988 static void wakeup_pipe_handler(struct tevent_context *ev,
989 struct tevent_fd *fde,
990 uint16_t flags, void *_private)
992 ssize_t ret;
994 do {
996 * This is the boilerplate for eventfd, but it works
997 * for pipes too. And as we don't care about the data
998 * we read, we're fine.
1000 uint64_t val;
1001 ret = read(fde->fd, &val, sizeof(val));
1002 } while (ret == -1 && errno == EINTR);
1006 * Initialize the wakeup pipe and pipe fde
1009 int tevent_common_wakeup_init(struct tevent_context *ev)
1011 int ret, read_fd;
1013 if (ev->wakeup_fde != NULL) {
1014 return 0;
1017 #ifdef HAVE_EVENTFD
1018 ret = eventfd(0, EFD_NONBLOCK);
1019 if (ret == -1) {
1020 return errno;
1022 read_fd = ev->wakeup_fd = ret;
1023 #else
1025 int pipe_fds[2];
1026 ret = pipe(pipe_fds);
1027 if (ret == -1) {
1028 return errno;
1030 ev->wakeup_fd = pipe_fds[1];
1031 ev->wakeup_read_fd = pipe_fds[0];
1033 ev_set_blocking(ev->wakeup_fd, false);
1034 ev_set_blocking(ev->wakeup_read_fd, false);
1036 read_fd = ev->wakeup_read_fd;
1038 #endif
1040 ev->wakeup_fde = tevent_add_fd(ev, ev, read_fd, TEVENT_FD_READ,
1041 wakeup_pipe_handler, NULL);
1042 if (ev->wakeup_fde == NULL) {
1043 close(ev->wakeup_fd);
1044 #ifndef HAVE_EVENTFD
1045 close(ev->wakeup_read_fd);
1046 #endif
1047 return ENOMEM;
1050 return 0;
1053 int tevent_common_wakeup_fd(int fd)
1055 ssize_t ret;
1057 do {
1058 #ifdef HAVE_EVENTFD
1059 uint64_t val = 1;
1060 ret = write(fd, &val, sizeof(val));
1061 #else
1062 char c = '\0';
1063 ret = write(fd, &c, 1);
1064 #endif
1065 } while ((ret == -1) && (errno == EINTR));
1067 return 0;
1070 int tevent_common_wakeup(struct tevent_context *ev)
1072 if (ev->wakeup_fde == NULL) {
1073 return ENOTCONN;
1076 return tevent_common_wakeup_fd(ev->wakeup_fd);
1079 static void tevent_common_wakeup_fini(struct tevent_context *ev)
1081 if (ev->wakeup_fde == NULL) {
1082 return;
1085 TALLOC_FREE(ev->wakeup_fde);
1087 close(ev->wakeup_fd);
1088 #ifndef HAVE_EVENTFD
1089 close(ev->wakeup_read_fd);
1090 #endif