auth/gensec: make sure there's only one pending gensec_update_send() per context
[Samba.git] / lib / tevent / tevent_threads.c
blob8197323af020e43444415da0f3d42191e8590452
1 /*
2 tevent event library.
4 Copyright (C) Jeremy Allison 2015
6 ** NOTE! The following LGPL license applies to the tevent
7 ** library. This does NOT imply that all of Samba is released
8 ** under the LGPL
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "replace.h"
25 #include "system/filesys.h"
26 #include "talloc.h"
27 #include "tevent.h"
28 #include "tevent_internal.h"
29 #include "tevent_util.h"
31 #if defined(HAVE_PTHREAD)
32 #include <pthread.h>
34 struct tevent_immediate_list {
35 struct tevent_immediate_list *next, *prev;
36 tevent_immediate_handler_t handler;
37 struct tevent_immediate *im;
38 void *private_ptr;
41 struct tevent_thread_proxy {
42 pthread_mutex_t mutex;
43 struct tevent_context *dest_ev_ctx;
44 int read_fd;
45 int write_fd;
46 struct tevent_fd *pipe_read_fde;
47 /* Pending events list. */
48 struct tevent_immediate_list *im_list;
49 /* Completed events list. */
50 struct tevent_immediate_list *tofree_im_list;
51 struct tevent_immediate *free_im;
54 static void free_im_list(struct tevent_immediate_list **pp_list_head)
56 struct tevent_immediate_list *im_entry = NULL;
57 struct tevent_immediate_list *im_next = NULL;
59 for (im_entry = *pp_list_head; im_entry; im_entry = im_next) {
60 im_next = im_entry->next;
61 DLIST_REMOVE(*pp_list_head, im_entry);
62 TALLOC_FREE(im_entry);
66 static void free_list_handler(struct tevent_context *ev,
67 struct tevent_immediate *im,
68 void *private_ptr)
70 struct tevent_thread_proxy *tp =
71 talloc_get_type_abort(private_ptr, struct tevent_thread_proxy);
72 int ret;
74 ret = pthread_mutex_lock(&tp->mutex);
75 if (ret != 0) {
76 abort();
77 /* Notreached. */
78 return;
81 free_im_list(&tp->tofree_im_list);
83 ret = pthread_mutex_unlock(&tp->mutex);
84 if (ret != 0) {
85 abort();
86 /* Notreached. */
87 return;
91 static void schedule_immediate_functions(struct tevent_thread_proxy *tp)
93 struct tevent_immediate_list *im_entry = NULL;
94 struct tevent_immediate_list *im_next = NULL;
96 for (im_entry = tp->im_list; im_entry; im_entry = im_next) {
97 im_next = im_entry->next;
98 DLIST_REMOVE(tp->im_list, im_entry);
100 tevent_schedule_immediate(im_entry->im,
101 tp->dest_ev_ctx,
102 im_entry->handler,
103 im_entry->private_ptr);
105 /* Move from pending list to free list. */
106 DLIST_ADD(tp->tofree_im_list, im_entry);
108 if (tp->tofree_im_list != NULL) {
110 * Once the current immediate events
111 * are processed, we need to reschedule
112 * ourselves to free them. This works
113 * as tevent_schedule_immediate()
114 * always adds events to the *END* of
115 * the immediate events list.
117 tevent_schedule_immediate(tp->free_im,
118 tp->dest_ev_ctx,
119 free_list_handler,
120 tp);
124 static void pipe_read_handler(struct tevent_context *ev,
125 struct tevent_fd *fde,
126 uint16_t flags,
127 void *private_ptr)
129 struct tevent_thread_proxy *tp =
130 talloc_get_type_abort(private_ptr, struct tevent_thread_proxy);
131 ssize_t len = 64;
132 int ret;
134 ret = pthread_mutex_lock(&tp->mutex);
135 if (ret != 0) {
136 abort();
137 /* Notreached. */
138 return;
142 * Clear out all data in the pipe. We
143 * don't really care if this returns -1.
145 while (len == 64) {
146 char buf[64];
147 len = read(tp->read_fd, buf, 64);
150 schedule_immediate_functions(tp);
152 ret = pthread_mutex_unlock(&tp->mutex);
153 if (ret != 0) {
154 abort();
155 /* Notreached. */
156 return;
160 static int tevent_thread_proxy_destructor(struct tevent_thread_proxy *tp)
162 int ret;
164 ret = pthread_mutex_lock(&tp->mutex);
165 if (ret != 0) {
166 abort();
167 /* Notreached. */
168 return 0;
171 TALLOC_FREE(tp->pipe_read_fde);
173 if (tp->read_fd != -1) {
174 (void)close(tp->read_fd);
175 tp->read_fd = -1;
177 if (tp->write_fd != -1) {
178 (void)close(tp->write_fd);
179 tp->write_fd = -1;
182 /* Hmmm. It's probably an error if we get here with
183 any non-NULL immediate entries.. */
185 free_im_list(&tp->im_list);
186 free_im_list(&tp->tofree_im_list);
188 TALLOC_FREE(tp->free_im);
190 ret = pthread_mutex_unlock(&tp->mutex);
191 if (ret != 0) {
192 abort();
193 /* Notreached. */
194 return 0;
197 ret = pthread_mutex_destroy(&tp->mutex);
198 if (ret != 0) {
199 abort();
200 /* Notreached. */
201 return 0;
204 return 0;
208 * Create a struct that can be passed to other threads
209 * to allow them to signal the struct tevent_context *
210 * passed in.
213 struct tevent_thread_proxy *tevent_thread_proxy_create(
214 struct tevent_context *dest_ev_ctx)
216 int ret;
217 int pipefds[2];
218 struct tevent_thread_proxy *tp;
220 tp = talloc_zero(dest_ev_ctx, struct tevent_thread_proxy);
221 if (tp == NULL) {
222 return NULL;
225 ret = pthread_mutex_init(&tp->mutex, NULL);
226 if (ret != 0) {
227 goto fail;
230 tp->dest_ev_ctx = dest_ev_ctx;
231 tp->read_fd = -1;
232 tp->write_fd = -1;
234 talloc_set_destructor(tp, tevent_thread_proxy_destructor);
236 ret = pipe(pipefds);
237 if (ret == -1) {
238 goto fail;
241 tp->read_fd = pipefds[0];
242 tp->write_fd = pipefds[1];
244 ret = ev_set_blocking(pipefds[0], false);
245 if (ret != 0) {
246 goto fail;
248 ret = ev_set_blocking(pipefds[1], false);
249 if (ret != 0) {
250 goto fail;
252 if (!ev_set_close_on_exec(pipefds[0])) {
253 goto fail;
255 if (!ev_set_close_on_exec(pipefds[1])) {
256 goto fail;
259 tp->pipe_read_fde = tevent_add_fd(dest_ev_ctx,
261 tp->read_fd,
262 TEVENT_FD_READ,
263 pipe_read_handler,
264 tp);
265 if (tp->pipe_read_fde == NULL) {
266 goto fail;
270 * Create an immediate event to free
271 * completed lists.
273 tp->free_im = tevent_create_immediate(tp);
274 if (tp->free_im == NULL) {
275 goto fail;
278 return tp;
280 fail:
282 TALLOC_FREE(tp);
283 return NULL;
287 * This function schedules an immediate event to be called with argument
288 * *pp_private in the thread context of dest_ev_ctx. Caller doesn't
289 * wait for activation to take place, this is simply fire-and-forget.
291 * pp_im must be a pointer to an immediate event talloced on
292 * a context owned by the calling thread, or the NULL context.
293 * Ownership of *pp_im will be transfered to the tevent library.
295 * pp_private can be null, or contents of *pp_private must be
296 * talloc'ed memory on a context owned by the calling thread
297 * or the NULL context. If non-null, ownership of *pp_private will
298 * be transfered to the tevent library.
300 * If you want to return a message, have the destination use the
301 * same function call to send back to the caller.
305 void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp,
306 struct tevent_immediate **pp_im,
307 tevent_immediate_handler_t handler,
308 void *pp_private_data)
310 struct tevent_immediate_list *im_entry;
311 int ret;
312 char c;
313 ssize_t written;
315 ret = pthread_mutex_lock(&tp->mutex);
316 if (ret != 0) {
317 abort();
318 /* Notreached. */
319 return;
322 if (tp->write_fd == -1) {
323 /* In the process of being destroyed. Ignore. */
324 goto end;
327 /* Create a new immediate_list entry. MUST BE ON THE NULL CONTEXT */
328 im_entry = talloc_zero(NULL, struct tevent_immediate_list);
329 if (im_entry == NULL) {
330 goto end;
333 im_entry->handler = handler;
334 im_entry->im = talloc_move(im_entry, pp_im);
336 if (pp_private_data != NULL) {
337 void **pptr = (void **)pp_private_data;
338 im_entry->private_ptr = talloc_move(im_entry, pptr);
341 DLIST_ADD(tp->im_list, im_entry);
343 /* And notify the dest_ev_ctx to wake up. */
344 c = '\0';
345 do {
346 written = write(tp->write_fd, &c, 1);
347 } while (written == -1 && errno == EINTR);
349 end:
351 ret = pthread_mutex_unlock(&tp->mutex);
352 if (ret != 0) {
353 abort();
354 /* Notreached. */
357 #else
358 /* !HAVE_PTHREAD */
359 struct tevent_thread_proxy *tevent_thread_proxy_create(
360 struct tevent_context *dest_ev_ctx)
362 errno = ENOSYS;
363 return NULL;
366 void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp,
367 struct tevent_immediate **pp_im,
368 tevent_immediate_handler_t handler,
369 void *pp_private_data)
373 #endif
375 static int tevent_threaded_context_destructor(
376 struct tevent_threaded_context *tctx)
378 int ret;
380 if (tctx->event_ctx != NULL) {
381 DLIST_REMOVE(tctx->event_ctx->threaded_contexts, tctx);
384 ret = pthread_mutex_destroy(&tctx->event_ctx_mutex);
385 if (ret != 0) {
386 abort();
389 return 0;
392 struct tevent_threaded_context *tevent_threaded_context_create(
393 TALLOC_CTX *mem_ctx, struct tevent_context *ev)
395 #ifdef HAVE_PTHREAD
396 struct tevent_threaded_context *tctx;
397 int ret;
399 ret = tevent_common_wakeup_init(ev);
400 if (ret != 0) {
401 errno = ret;
402 return NULL;
405 tctx = talloc(mem_ctx, struct tevent_threaded_context);
406 if (tctx == NULL) {
407 return NULL;
409 tctx->event_ctx = ev;
410 tctx->wakeup_fd = ev->wakeup_fd;
412 ret = pthread_mutex_init(&tctx->event_ctx_mutex, NULL);
413 if (ret != 0) {
414 TALLOC_FREE(tctx);
415 return NULL;
418 DLIST_ADD(ev->threaded_contexts, tctx);
419 talloc_set_destructor(tctx, tevent_threaded_context_destructor);
421 return tctx;
422 #else
423 errno = ENOSYS;
424 return NULL;
425 #endif
428 void _tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx,
429 struct tevent_immediate *im,
430 tevent_immediate_handler_t handler,
431 void *private_data,
432 const char *handler_name,
433 const char *location)
435 #ifdef HAVE_PTHREAD
436 struct tevent_context *ev;
437 int ret;
439 ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
440 if (ret != 0) {
441 abort();
444 ev = tctx->event_ctx;
446 ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
447 if (ret != 0) {
448 abort();
451 if (ev == NULL) {
453 * Our event context is already gone.
455 return;
458 if ((im->event_ctx != NULL) || (handler == NULL)) {
459 abort();
462 im->event_ctx = ev;
463 im->handler = handler;
464 im->private_data = private_data;
465 im->handler_name = handler_name;
466 im->schedule_location = location;
467 im->cancel_fn = NULL;
468 im->additional_data = NULL;
470 ret = pthread_mutex_lock(&ev->scheduled_mutex);
471 if (ret != 0) {
472 abort();
475 DLIST_ADD_END(ev->scheduled_immediates, im);
477 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
478 if (ret != 0) {
479 abort();
483 * We might want to wake up the main thread under the lock. We
484 * had a slightly similar situation in pthreadpool, changed
485 * with 1c4284c7395f23. This is not exactly the same, as the
486 * wakeup is only a last-resort thing in case the main thread
487 * is sleeping. Doing the wakeup under the lock can easily
488 * lead to a contended mutex, which is much more expensive
489 * than a noncontended one. So I'd opt for the lower footprint
490 * initially. Maybe we have to change that later.
492 tevent_common_wakeup_fd(tctx->wakeup_fd);
493 #else
495 * tevent_threaded_context_create() returned NULL with ENOSYS...
497 abort();
498 #endif
501 void tevent_common_threaded_activate_immediate(struct tevent_context *ev)
503 #ifdef HAVE_PTHREAD
504 int ret;
505 ret = pthread_mutex_lock(&ev->scheduled_mutex);
506 if (ret != 0) {
507 abort();
510 while (ev->scheduled_immediates != NULL) {
511 struct tevent_immediate *im = ev->scheduled_immediates;
512 DLIST_REMOVE(ev->scheduled_immediates, im);
513 DLIST_ADD_END(ev->immediate_events, im);
516 ret = pthread_mutex_unlock(&ev->scheduled_mutex);
517 if (ret != 0) {
518 abort();
520 #else
522 * tevent_threaded_context_create() returned NULL with ENOSYS...
524 abort();
525 #endif