py/dnsserver: add a missing exception variable
[Samba.git] / lib / tevent / tevent.c
blob81268a1bd7aaa23f2ca874598f4e64ae2d06e2c5
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 PLEASE READ THIS BEFORE MODIFYING!
28 This module is a general abstraction for the main select loop and
29 event handling. Do not ever put any localised hacks in here, instead
30 register one of the possible event types and implement that event
31 somewhere else.
33 There are 2 types of event handling that are handled in this module:
35 1) a file descriptor becoming readable or writeable. This is mostly
36 used for network sockets, but can be used for any type of file
37 descriptor. You may only register one handler for each file
38 descriptor/io combination or you will get unpredictable results
39 (this means that you can have a handler for read events, and a
40 separate handler for write events, but not two handlers that are
41 both handling read events)
43 2) a timed event. You can register an event that happens at a
44 specific time. You can register as many of these as you
45 like. They are single shot - add a new timed event in the event
46 handler to get another event.
48 To setup a set of events you first need to create a event_context
49 structure using the function tevent_context_init(); This returns a
50 'struct tevent_context' that you use in all subsequent calls.
52 After that you can add/remove events that you are interested in
53 using tevent_add_*() and talloc_free()
55 Finally, you call tevent_loop_wait_once() to block waiting for one of the
56 events to occor or tevent_loop_wait() which will loop
57 forever.
60 #include "replace.h"
61 #include "system/filesys.h"
62 #ifdef HAVE_PTHREAD
63 #include "system/threads.h"
64 #endif
65 #define TEVENT_DEPRECATED 1
66 #include "tevent.h"
67 #include "tevent_internal.h"
68 #include "tevent_util.h"
69 #ifdef HAVE_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
73 struct tevent_ops_list {
74 struct tevent_ops_list *next, *prev;
75 const char *name;
76 const struct tevent_ops *ops;
79 /* list of registered event backends */
80 static struct tevent_ops_list *tevent_backends = NULL;
81 static char *tevent_default_backend = NULL;
84 register an events backend
86 bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
88 struct tevent_ops_list *e;
90 for (e = tevent_backends; e != NULL; e = e->next) {
91 if (0 == strcmp(e->name, name)) {
92 /* already registered, skip it */
93 return true;
97 e = talloc(NULL, struct tevent_ops_list);
98 if (e == NULL) return false;
100 e->name = name;
101 e->ops = ops;
102 DLIST_ADD(tevent_backends, e);
104 return true;
108 set the default event backend
110 void tevent_set_default_backend(const char *backend)
112 talloc_free(tevent_default_backend);
113 tevent_default_backend = talloc_strdup(NULL, backend);
117 initialise backends if not already done
119 static void tevent_backend_init(void)
121 static bool done;
123 if (done) {
124 return;
127 done = true;
129 tevent_poll_init();
130 tevent_poll_mt_init();
131 #if defined(HAVE_EPOLL)
132 tevent_epoll_init();
133 #elif defined(HAVE_SOLARIS_PORTS)
134 tevent_port_init();
135 #endif
137 tevent_standard_init();
140 _PRIVATE_ const struct tevent_ops *tevent_find_ops_byname(const char *name)
142 struct tevent_ops_list *e;
144 tevent_backend_init();
146 if (name == NULL) {
147 name = tevent_default_backend;
149 if (name == NULL) {
150 name = "standard";
153 for (e = tevent_backends; e != NULL; e = e->next) {
154 if (0 == strcmp(e->name, name)) {
155 return e->ops;
159 return NULL;
163 list available backends
165 const char **tevent_backend_list(TALLOC_CTX *mem_ctx)
167 const char **list = NULL;
168 struct tevent_ops_list *e;
169 size_t idx = 0;
171 tevent_backend_init();
173 for (e=tevent_backends;e;e=e->next) {
174 idx += 1;
177 list = talloc_zero_array(mem_ctx, const char *, idx+1);
178 if (list == NULL) {
179 return NULL;
182 idx = 0;
183 for (e=tevent_backends;e;e=e->next) {
184 list[idx] = talloc_strdup(list, e->name);
185 if (list[idx] == NULL) {
186 TALLOC_FREE(list);
187 return NULL;
189 idx += 1;
192 return list;
195 static void tevent_common_wakeup_fini(struct tevent_context *ev);
197 #ifdef HAVE_PTHREAD
199 static pthread_mutex_t tevent_contexts_mutex = PTHREAD_MUTEX_INITIALIZER;
200 static struct tevent_context *tevent_contexts = NULL;
201 static pthread_once_t tevent_atfork_initialized = PTHREAD_ONCE_INIT;
203 static void tevent_atfork_prepare(void)
205 struct tevent_context *ev;
206 int ret;
208 ret = pthread_mutex_lock(&tevent_contexts_mutex);
209 if (ret != 0) {
210 abort();
213 for (ev = tevent_contexts; ev != NULL; ev = ev->next) {
214 struct tevent_threaded_context *tctx;
216 for (tctx = ev->threaded_contexts; tctx != NULL;
217 tctx = tctx->next) {
218 ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
219 if (ret != 0) {
220 tevent_abort(ev, "pthread_mutex_lock failed");
224 ret = pthread_mutex_lock(&ev->scheduled_mutex);
225 if (ret != 0) {
226 tevent_abort(ev, "pthread_mutex_lock failed");
231 static void tevent_atfork_parent(void)
233 struct tevent_context *ev;
234 int ret;
236 for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
237 ev = DLIST_PREV(ev)) {
238 struct tevent_threaded_context *tctx;
240 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
241 if (ret != 0) {
242 tevent_abort(ev, "pthread_mutex_unlock failed");
245 for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
246 tctx = DLIST_PREV(tctx)) {
247 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
248 if (ret != 0) {
249 tevent_abort(
250 ev, "pthread_mutex_unlock failed");
255 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
256 if (ret != 0) {
257 abort();
261 static void tevent_atfork_child(void)
263 struct tevent_context *ev;
264 int ret;
266 for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
267 ev = DLIST_PREV(ev)) {
268 struct tevent_threaded_context *tctx;
270 for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
271 tctx = DLIST_PREV(tctx)) {
272 tctx->event_ctx = NULL;
274 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
275 if (ret != 0) {
276 tevent_abort(
277 ev, "pthread_mutex_unlock failed");
281 ev->threaded_contexts = NULL;
283 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
284 if (ret != 0) {
285 tevent_abort(ev, "pthread_mutex_unlock failed");
289 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
290 if (ret != 0) {
291 abort();
295 static void tevent_prep_atfork(void)
297 int ret;
299 ret = pthread_atfork(tevent_atfork_prepare,
300 tevent_atfork_parent,
301 tevent_atfork_child);
302 if (ret != 0) {
303 abort();
307 #endif
309 int tevent_common_context_destructor(struct tevent_context *ev)
311 struct tevent_fd *fd, *fn;
312 struct tevent_timer *te, *tn;
313 struct tevent_immediate *ie, *in;
314 struct tevent_signal *se, *sn;
315 struct tevent_wrapper_glue *gl, *gn;
316 #ifdef HAVE_PTHREAD
317 int ret;
318 #endif
320 if (ev->wrapper.glue != NULL) {
321 tevent_abort(ev,
322 "tevent_common_context_destructor() active on wrapper");
325 #ifdef HAVE_PTHREAD
326 ret = pthread_mutex_lock(&tevent_contexts_mutex);
327 if (ret != 0) {
328 abort();
331 DLIST_REMOVE(tevent_contexts, ev);
333 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
334 if (ret != 0) {
335 abort();
338 while (ev->threaded_contexts != NULL) {
339 struct tevent_threaded_context *tctx = ev->threaded_contexts;
341 ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
342 if (ret != 0) {
343 abort();
347 * Indicate to the thread that the tevent_context is
348 * gone. The counterpart of this is in
349 * _tevent_threaded_schedule_immediate, there we read
350 * this under the threaded_context's mutex.
353 tctx->event_ctx = NULL;
355 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
356 if (ret != 0) {
357 abort();
360 DLIST_REMOVE(ev->threaded_contexts, tctx);
363 ret = pthread_mutex_destroy(&ev->scheduled_mutex);
364 if (ret != 0) {
365 abort();
367 #endif
369 for (gl = ev->wrapper.list; gl; gl = gn) {
370 gn = gl->next;
372 gl->main_ev = NULL;
373 DLIST_REMOVE(ev->wrapper.list, gl);
376 tevent_common_wakeup_fini(ev);
378 for (fd = ev->fd_events; fd; fd = fn) {
379 fn = fd->next;
380 tevent_trace_fd_callback(fd->event_ctx, fd, TEVENT_EVENT_TRACE_DETACH);
381 fd->wrapper = NULL;
382 fd->event_ctx = NULL;
383 DLIST_REMOVE(ev->fd_events, fd);
386 ev->last_zero_timer = NULL;
387 for (te = ev->timer_events; te; te = tn) {
388 tn = te->next;
389 tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH);
390 te->wrapper = NULL;
391 te->event_ctx = NULL;
392 DLIST_REMOVE(ev->timer_events, te);
395 for (ie = ev->immediate_events; ie; ie = in) {
396 in = ie->next;
397 tevent_trace_immediate_callback(ie->event_ctx, ie, TEVENT_EVENT_TRACE_DETACH);
398 ie->wrapper = NULL;
399 ie->event_ctx = NULL;
400 ie->cancel_fn = NULL;
401 DLIST_REMOVE(ev->immediate_events, ie);
404 for (se = ev->signal_events; se; se = sn) {
405 sn = se->next;
406 tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_DETACH);
407 se->wrapper = NULL;
408 se->event_ctx = NULL;
409 DLIST_REMOVE(ev->signal_events, se);
411 * This is important, Otherwise signals
412 * are handled twice in child. eg, SIGHUP.
413 * one added in parent, and another one in
414 * the child. -- BoYang
416 tevent_cleanup_pending_signal_handlers(se);
419 /* removing nesting hook or we get an abort when nesting is
420 * not allowed. -- SSS
421 * Note that we need to leave the allowed flag at its current
422 * value, otherwise the use in tevent_re_initialise() will
423 * leave the event context with allowed forced to false, which
424 * will break users that expect nesting to be allowed
426 ev->nesting.level = 0;
427 ev->nesting.hook_fn = NULL;
428 ev->nesting.hook_private = NULL;
430 return 0;
433 static int tevent_common_context_constructor(struct tevent_context *ev)
435 int ret;
437 #ifdef HAVE_PTHREAD
439 ret = pthread_once(&tevent_atfork_initialized, tevent_prep_atfork);
440 if (ret != 0) {
441 return ret;
444 ret = pthread_mutex_init(&ev->scheduled_mutex, NULL);
445 if (ret != 0) {
446 return ret;
449 ret = pthread_mutex_lock(&tevent_contexts_mutex);
450 if (ret != 0) {
451 pthread_mutex_destroy(&ev->scheduled_mutex);
452 return ret;
455 DLIST_ADD(tevent_contexts, ev);
457 ret = pthread_mutex_unlock(&tevent_contexts_mutex);
458 if (ret != 0) {
459 abort();
461 #endif
463 talloc_set_destructor(ev, tevent_common_context_destructor);
465 return 0;
468 void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason)
470 void *parent_ptr = talloc_parent(ptr);
471 size_t parent_blocks = talloc_total_blocks(parent_ptr);
473 if (parent_ptr != NULL && parent_blocks == 0) {
475 * This is an implicit talloc free, as we still have a parent
476 * but it's already being destroyed. Note that
477 * talloc_total_blocks(ptr) also just returns 0 if a
478 * talloc_free(ptr) is still in progress of freeing all
479 * children.
481 return;
484 tevent_abort(NULL, reason);
488 create a event_context structure for a specific implemementation.
489 This must be the first events call, and all subsequent calls pass
490 this event_context as the first element. Event handlers also
491 receive this as their first argument.
493 This function is for allowing third-party-applications to hook in gluecode
494 to their own event loop code, so that they can make async usage of our client libs
496 NOTE: use tevent_context_init() inside of samba!
498 struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx,
499 const struct tevent_ops *ops,
500 void *additional_data)
502 struct tevent_context *ev;
503 int ret;
505 ev = talloc_zero(mem_ctx, struct tevent_context);
506 if (!ev) return NULL;
508 ret = tevent_common_context_constructor(ev);
509 if (ret != 0) {
510 talloc_free(ev);
511 return NULL;
514 ev->ops = ops;
515 ev->additional_data = additional_data;
517 ret = ev->ops->context_init(ev);
518 if (ret != 0) {
519 talloc_free(ev);
520 return NULL;
523 return ev;
527 create a event_context structure. This must be the first events
528 call, and all subsequent calls pass this event_context as the first
529 element. Event handlers also receive this as their first argument.
531 struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx,
532 const char *name)
534 const struct tevent_ops *ops;
536 ops = tevent_find_ops_byname(name);
537 if (ops == NULL) {
538 return NULL;
541 return tevent_context_init_ops(mem_ctx, ops, NULL);
546 create a event_context structure. This must be the first events
547 call, and all subsequent calls pass this event_context as the first
548 element. Event handlers also receive this as their first argument.
550 struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx)
552 return tevent_context_init_byname(mem_ctx, NULL);
556 add a fd based event
557 return NULL on failure (memory allocation error)
559 struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
560 TALLOC_CTX *mem_ctx,
561 int fd,
562 uint16_t flags,
563 tevent_fd_handler_t handler,
564 void *private_data,
565 const char *handler_name,
566 const char *location)
568 return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data,
569 handler_name, location);
573 set a close function on the fd event
575 void tevent_fd_set_close_fn(struct tevent_fd *fde,
576 tevent_fd_close_fn_t close_fn)
578 if (!fde) return;
579 if (!fde->event_ctx) return;
580 fde->event_ctx->ops->set_fd_close_fn(fde, close_fn);
583 static void tevent_fd_auto_close_fn(struct tevent_context *ev,
584 struct tevent_fd *fde,
585 int fd,
586 void *private_data)
588 close(fd);
591 void tevent_fd_set_auto_close(struct tevent_fd *fde)
593 tevent_fd_set_close_fn(fde, tevent_fd_auto_close_fn);
597 return the fd event flags
599 uint16_t tevent_fd_get_flags(struct tevent_fd *fde)
601 if (!fde) return 0;
602 if (!fde->event_ctx) return 0;
603 return fde->event_ctx->ops->get_fd_flags(fde);
607 set the fd event flags
609 void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags)
611 if (!fde) return;
612 if (!fde->event_ctx) return;
613 fde->event_ctx->ops->set_fd_flags(fde, flags);
616 bool tevent_signal_support(struct tevent_context *ev)
618 if (ev->ops->add_signal) {
619 return true;
621 return false;
624 static void (*tevent_abort_fn)(const char *reason);
626 void tevent_set_abort_fn(void (*abort_fn)(const char *reason))
628 tevent_abort_fn = abort_fn;
631 void tevent_abort(struct tevent_context *ev, const char *reason)
633 if (ev != NULL) {
634 tevent_debug(ev, TEVENT_DEBUG_FATAL,
635 "abort: %s\n", reason);
638 if (!tevent_abort_fn) {
639 abort();
642 tevent_abort_fn(reason);
646 add a timer event
647 return NULL on failure
649 struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
650 TALLOC_CTX *mem_ctx,
651 struct timeval next_event,
652 tevent_timer_handler_t handler,
653 void *private_data,
654 const char *handler_name,
655 const char *location)
657 return ev->ops->add_timer(ev, mem_ctx, next_event, handler, private_data,
658 handler_name, location);
662 allocate an immediate event
663 return NULL on failure (memory allocation error)
665 struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
666 const char *location)
668 struct tevent_immediate *im;
670 im = talloc(mem_ctx, struct tevent_immediate);
671 if (im == NULL) return NULL;
673 *im = (struct tevent_immediate) { .create_location = location };
675 return im;
679 schedule an immediate event
681 void _tevent_schedule_immediate(struct tevent_immediate *im,
682 struct tevent_context *ev,
683 tevent_immediate_handler_t handler,
684 void *private_data,
685 const char *handler_name,
686 const char *location)
688 ev->ops->schedule_immediate(im, ev, handler, private_data,
689 handler_name, location);
693 add a signal event
695 sa_flags are flags to sigaction(2)
697 return NULL on failure
699 struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
700 TALLOC_CTX *mem_ctx,
701 int signum,
702 int sa_flags,
703 tevent_signal_handler_t handler,
704 void *private_data,
705 const char *handler_name,
706 const char *location)
708 return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data,
709 handler_name, location);
712 void tevent_loop_allow_nesting(struct tevent_context *ev)
714 if (ev->wrapper.glue != NULL) {
715 tevent_abort(ev, "tevent_loop_allow_nesting() on wrapper");
716 return;
719 if (ev->wrapper.list != NULL) {
720 tevent_abort(ev, "tevent_loop_allow_nesting() with wrapper");
721 return;
724 ev->nesting.allowed = true;
727 void tevent_loop_set_nesting_hook(struct tevent_context *ev,
728 tevent_nesting_hook hook,
729 void *private_data)
731 if (ev->nesting.hook_fn &&
732 (ev->nesting.hook_fn != hook ||
733 ev->nesting.hook_private != private_data)) {
734 /* the way the nesting hook code is currently written
735 we cannot support two different nesting hooks at the
736 same time. */
737 tevent_abort(ev, "tevent: Violation of nesting hook rules\n");
739 ev->nesting.hook_fn = hook;
740 ev->nesting.hook_private = private_data;
743 static void tevent_abort_nesting(struct tevent_context *ev, const char *location)
745 const char *reason;
747 reason = talloc_asprintf(NULL, "tevent_loop_once() nesting at %s",
748 location);
749 if (!reason) {
750 reason = "tevent_loop_once() nesting";
753 tevent_abort(ev, reason);
757 do a single event loop using the events defined in ev
759 int _tevent_loop_once(struct tevent_context *ev, const char *location)
761 int ret;
762 void *nesting_stack_ptr = NULL;
764 ev->nesting.level++;
766 if (ev->nesting.level > 1) {
767 if (!ev->nesting.allowed) {
768 tevent_abort_nesting(ev, location);
769 errno = ELOOP;
770 return -1;
773 if (ev->nesting.level > 0) {
774 if (ev->nesting.hook_fn) {
775 int ret2;
776 ret2 = ev->nesting.hook_fn(ev,
777 ev->nesting.hook_private,
778 ev->nesting.level,
779 true,
780 (void *)&nesting_stack_ptr,
781 location);
782 if (ret2 != 0) {
783 ret = ret2;
784 goto done;
789 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
790 ret = ev->ops->loop_once(ev, location);
791 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
793 if (ev->nesting.level > 0) {
794 if (ev->nesting.hook_fn) {
795 int ret2;
796 ret2 = ev->nesting.hook_fn(ev,
797 ev->nesting.hook_private,
798 ev->nesting.level,
799 false,
800 (void *)&nesting_stack_ptr,
801 location);
802 if (ret2 != 0) {
803 ret = ret2;
804 goto done;
809 done:
810 ev->nesting.level--;
811 return ret;
815 this is a performance optimization for the samba4 nested event loop problems
817 int _tevent_loop_until(struct tevent_context *ev,
818 bool (*finished)(void *private_data),
819 void *private_data,
820 const char *location)
822 int ret = 0;
823 void *nesting_stack_ptr = NULL;
825 ev->nesting.level++;
827 if (ev->nesting.level > 1) {
828 if (!ev->nesting.allowed) {
829 tevent_abort_nesting(ev, location);
830 errno = ELOOP;
831 return -1;
834 if (ev->nesting.level > 0) {
835 if (ev->nesting.hook_fn) {
836 int ret2;
837 ret2 = ev->nesting.hook_fn(ev,
838 ev->nesting.hook_private,
839 ev->nesting.level,
840 true,
841 (void *)&nesting_stack_ptr,
842 location);
843 if (ret2 != 0) {
844 ret = ret2;
845 goto done;
850 while (!finished(private_data)) {
851 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
852 ret = ev->ops->loop_once(ev, location);
853 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
854 if (ret != 0) {
855 break;
859 if (ev->nesting.level > 0) {
860 if (ev->nesting.hook_fn) {
861 int ret2;
862 ret2 = ev->nesting.hook_fn(ev,
863 ev->nesting.hook_private,
864 ev->nesting.level,
865 false,
866 (void *)&nesting_stack_ptr,
867 location);
868 if (ret2 != 0) {
869 ret = ret2;
870 goto done;
875 done:
876 ev->nesting.level--;
877 return ret;
880 bool tevent_common_have_events(struct tevent_context *ev)
882 if (ev->fd_events != NULL) {
883 if (ev->fd_events != ev->wakeup_fde) {
884 return true;
886 if (ev->fd_events->next != NULL) {
887 return true;
891 * At this point we just have the wakeup pipe event as
892 * the only fd_event. That one does not count as a
893 * regular event, so look at the other event types.
897 return ((ev->timer_events != NULL) ||
898 (ev->immediate_events != NULL) ||
899 (ev->signal_events != NULL));
903 return on failure or (with 0) if all fd events are removed
905 int tevent_common_loop_wait(struct tevent_context *ev,
906 const char *location)
909 * loop as long as we have events pending
911 while (tevent_common_have_events(ev)) {
912 int ret;
913 ret = _tevent_loop_once(ev, location);
914 if (ret != 0) {
915 tevent_debug(ev, TEVENT_DEBUG_FATAL,
916 "_tevent_loop_once() failed: %d - %s\n",
917 ret, strerror(errno));
918 return ret;
922 tevent_debug(ev, TEVENT_DEBUG_WARNING,
923 "tevent_common_loop_wait() out of events\n");
924 return 0;
928 return on failure or (with 0) if all fd events are removed
930 int _tevent_loop_wait(struct tevent_context *ev, const char *location)
932 return ev->ops->loop_wait(ev, location);
937 re-initialise a tevent context. This leaves you with the same
938 event context, but all events are wiped and the structure is
939 re-initialised. This is most useful after a fork()
941 zero is returned on success, non-zero on failure
943 int tevent_re_initialise(struct tevent_context *ev)
945 tevent_common_context_destructor(ev);
947 tevent_common_context_constructor(ev);
949 return ev->ops->context_init(ev);
952 static void wakeup_pipe_handler(struct tevent_context *ev,
953 struct tevent_fd *fde,
954 uint16_t flags, void *_private)
956 ssize_t ret;
958 do {
960 * This is the boilerplate for eventfd, but it works
961 * for pipes too. And as we don't care about the data
962 * we read, we're fine.
964 uint64_t val;
965 ret = read(fde->fd, &val, sizeof(val));
966 } while (ret == -1 && errno == EINTR);
970 * Initialize the wakeup pipe and pipe fde
973 int tevent_common_wakeup_init(struct tevent_context *ev)
975 int ret, read_fd;
977 if (ev->wakeup_fde != NULL) {
978 return 0;
981 #ifdef HAVE_EVENTFD
982 ret = eventfd(0, EFD_NONBLOCK);
983 if (ret == -1) {
984 return errno;
986 read_fd = ev->wakeup_fd = ret;
987 #else
989 int pipe_fds[2];
990 ret = pipe(pipe_fds);
991 if (ret == -1) {
992 return errno;
994 ev->wakeup_fd = pipe_fds[1];
995 ev->wakeup_read_fd = pipe_fds[0];
997 ev_set_blocking(ev->wakeup_fd, false);
998 ev_set_blocking(ev->wakeup_read_fd, false);
1000 read_fd = ev->wakeup_read_fd;
1002 #endif
1004 ev->wakeup_fde = tevent_add_fd(ev, ev, read_fd, TEVENT_FD_READ,
1005 wakeup_pipe_handler, NULL);
1006 if (ev->wakeup_fde == NULL) {
1007 close(ev->wakeup_fd);
1008 #ifndef HAVE_EVENTFD
1009 close(ev->wakeup_read_fd);
1010 #endif
1011 return ENOMEM;
1014 return 0;
1017 int tevent_common_wakeup_fd(int fd)
1019 ssize_t ret;
1021 do {
1022 #ifdef HAVE_EVENTFD
1023 uint64_t val = 1;
1024 ret = write(fd, &val, sizeof(val));
1025 #else
1026 char c = '\0';
1027 ret = write(fd, &c, 1);
1028 #endif
1029 } while ((ret == -1) && (errno == EINTR));
1031 return 0;
1034 int tevent_common_wakeup(struct tevent_context *ev)
1036 if (ev->wakeup_fde == NULL) {
1037 return ENOTCONN;
1040 return tevent_common_wakeup_fd(ev->wakeup_fd);
1043 static void tevent_common_wakeup_fini(struct tevent_context *ev)
1045 if (ev->wakeup_fde == NULL) {
1046 return;
1049 TALLOC_FREE(ev->wakeup_fde);
1051 close(ev->wakeup_fd);
1052 #ifndef HAVE_EVENTFD
1053 close(ev->wakeup_read_fd);
1054 #endif