selftest: Prepare to allow override of lockout duration in password_lockout tests
[Samba.git] / lib / pthreadpool / pthreadpool_tevent.c
blob12148f18123b59eb273eed3fad62ca0807bd9f73
1 /*
2 * Unix SMB/CIFS implementation.
3 * threadpool implementation based on pthreads
4 * Copyright (C) Volker Lendecke 2009,2011
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "replace.h"
21 #include "system/select.h"
22 #include "system/threads.h"
23 #include "system/filesys.h"
24 #include "pthreadpool_tevent.h"
25 #include "pthreadpool.h"
26 #include "lib/util/tevent_unix.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/attr.h"
31 * We try to give some hints to helgrind/drd
33 * Note ANNOTATE_BENIGN_RACE_SIZED(address, size, describtion)
34 * takes an memory address range that ignored by helgrind/drd
35 * 'description' is just ignored...
38 * Note that ANNOTATE_HAPPENS_*(unique_uintptr)
39 * just takes a DWORD/(void *) as unique key
40 * for the barrier.
42 #ifdef HAVE_VALGRIND_HELGRIND_H
43 #include <valgrind/helgrind.h>
44 #endif
45 #ifndef ANNOTATE_BENIGN_RACE_SIZED
46 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, describtion)
47 #endif
48 #ifndef ANNOTATE_HAPPENS_BEFORE
49 #define ANNOTATE_HAPPENS_BEFORE(unique_uintptr)
50 #endif
51 #ifndef ANNOTATE_HAPPENS_AFTER
52 #define ANNOTATE_HAPPENS_AFTER(unique_uintptr)
53 #endif
54 #ifndef ANNOTATE_HAPPENS_BEFORE_FORGET_ALL
55 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(unique_uintptr)
56 #endif
58 #define PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(__job) do { \
59 _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
60 ANNOTATE_BENIGN_RACE_SIZED(&__j->needs_fence, \
61 sizeof(__j->needs_fence), \
62 "race by design, protected by fence"); \
63 } while(0);
65 #ifdef WITH_PTHREADPOOL
67 * configure checked we have pthread and atomic_thread_fence() available
69 #define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { \
70 atomic_thread_fence(__order); \
71 } while(0)
72 #else
74 * we're using lib/pthreadpool/pthreadpool_sync.c ...
76 #define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { } while(0)
77 #ifndef HAVE___THREAD
78 #define __thread
79 #endif
80 #endif
82 #define PTHREAD_TEVENT_JOB_THREAD_FENCE(__job) do { \
83 _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
84 ANNOTATE_HAPPENS_BEFORE(&__job->needs_fence); \
85 __PTHREAD_TEVENT_JOB_THREAD_FENCE(memory_order_seq_cst); \
86 ANNOTATE_HAPPENS_AFTER(&__job->needs_fence); \
87 } while(0);
89 #define PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(__job) do { \
90 _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
91 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&__job->needs_fence); \
92 } while(0);
94 struct pthreadpool_tevent_job_state;
97 * We need one pthreadpool_tevent_glue object per unique combintaion of tevent
98 * contexts and pthreadpool_tevent objects. Maintain a list of used tevent
99 * contexts in a pthreadpool_tevent.
101 struct pthreadpool_tevent_glue {
102 struct pthreadpool_tevent_glue *prev, *next;
103 struct pthreadpool_tevent *pool; /* back-pointer to owning object. */
104 /* Tuple we are keeping track of in this list. */
105 struct tevent_context *ev;
106 struct tevent_threaded_context *tctx;
107 /* recheck monitor fd event */
108 struct tevent_fd *fde;
109 /* Pointer to link object owned by *ev. */
110 struct pthreadpool_tevent_glue_ev_link *ev_link;
111 /* active jobs */
112 struct pthreadpool_tevent_job_state *states;
116 * The pthreadpool_tevent_glue_ev_link and its destructor ensure we remove the
117 * tevent context from our list of active event contexts if the event context
118 * is destroyed.
119 * This structure is talloc()'ed from the struct tevent_context *, and is a
120 * back-pointer allowing the related struct pthreadpool_tevent_glue object
121 * to be removed from the struct pthreadpool_tevent glue list if the owning
122 * tevent_context is talloc_free()'ed.
124 struct pthreadpool_tevent_glue_ev_link {
125 struct pthreadpool_tevent_glue *glue;
128 struct pthreadpool_tevent_wrapper {
129 struct pthreadpool_tevent *main_tp;
130 struct pthreadpool_tevent *wrap_tp;
131 const struct pthreadpool_tevent_wrapper_ops *ops;
132 void *private_state;
133 bool force_per_thread_cwd;
136 struct pthreadpool_tevent {
137 struct pthreadpool_tevent *prev, *next;
139 struct pthreadpool *pool;
140 struct pthreadpool_tevent_glue *glue_list;
142 struct pthreadpool_tevent_job *jobs;
144 struct {
146 * This is used on the main context
148 struct pthreadpool_tevent *list;
151 * This is used on the wrapper context
153 struct pthreadpool_tevent_wrapper *ctx;
154 } wrapper;
157 struct pthreadpool_tevent_job_state {
158 struct pthreadpool_tevent_job_state *prev, *next;
159 struct pthreadpool_tevent_glue *glue;
160 struct tevent_context *ev;
161 struct tevent_req *req;
162 struct pthreadpool_tevent_job *job;
165 struct pthreadpool_tevent_job {
166 struct pthreadpool_tevent_job *prev, *next;
168 struct pthreadpool_tevent *pool;
169 struct pthreadpool_tevent_wrapper *wrapper;
170 struct pthreadpool_tevent_job_state *state;
171 struct tevent_immediate *im;
173 void (*fn)(void *private_data);
174 void *private_data;
177 * Coordination between threads
179 * There're only one side writing each element
180 * either the main process or the job thread.
182 * The coordination is done by a full memory
183 * barrier using atomic_thread_fence(memory_order_seq_cst)
184 * wrapped in PTHREAD_TEVENT_JOB_THREAD_FENCE()
186 struct {
188 * 'maycancel'
189 * set when tevent_req_cancel() is called.
190 * (only written by main thread!)
192 bool maycancel;
195 * 'orphaned'
196 * set when talloc_free is called on the job request,
197 * tevent_context or pthreadpool_tevent.
198 * (only written by main thread!)
200 bool orphaned;
203 * 'started'
204 * set when the job is picked up by a worker thread
205 * (only written by job thread!)
207 bool started;
210 * 'wrapper'
211 * set before calling the wrapper before_job() or
212 * after_job() hooks.
213 * unset again check the hook finished.
214 * (only written by job thread!)
216 bool wrapper;
219 * 'executed'
220 * set once the job function returned.
221 * (only written by job thread!)
223 bool executed;
226 * 'finished'
227 * set when pthreadpool_tevent_job_signal() is entered
228 * (only written by job thread!)
230 bool finished;
233 * 'dropped'
234 * set when pthreadpool_tevent_job_signal() leaves with
235 * orphaned already set.
236 * (only written by job thread!)
238 bool dropped;
241 * 'signaled'
242 * set when pthreadpool_tevent_job_signal() leaves normal
243 * and the immediate event was scheduled.
244 * (only written by job thread!)
246 bool signaled;
249 * 'exit_thread'
250 * maybe set during pthreadpool_tevent_job_fn()
251 * if some wrapper related code generated an error
252 * and the environment isn't safe anymore.
254 * In such a case pthreadpool_tevent_job_signal()
255 * will pick this up and therminate the current
256 * worker thread by returning -1.
258 bool exit_thread; /* only written/read by job thread! */
259 } needs_fence;
261 bool per_thread_cwd;
264 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
266 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job);
268 static struct pthreadpool_tevent_job *orphaned_jobs;
270 void pthreadpool_tevent_cleanup_orphaned_jobs(void)
272 struct pthreadpool_tevent_job *job = NULL;
273 struct pthreadpool_tevent_job *njob = NULL;
275 for (job = orphaned_jobs; job != NULL; job = njob) {
276 njob = job->next;
279 * The job destructor keeps the job alive
280 * (and in the list) or removes it from the list.
282 TALLOC_FREE(job);
286 static int pthreadpool_tevent_job_signal(int jobid,
287 void (*job_fn)(void *private_data),
288 void *job_private_data,
289 void *private_data);
291 int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
292 struct pthreadpool_tevent **presult)
294 struct pthreadpool_tevent *pool;
295 int ret;
297 pthreadpool_tevent_cleanup_orphaned_jobs();
299 pool = talloc_zero(mem_ctx, struct pthreadpool_tevent);
300 if (pool == NULL) {
301 return ENOMEM;
304 ret = pthreadpool_init(max_threads, &pool->pool,
305 pthreadpool_tevent_job_signal, pool);
306 if (ret != 0) {
307 TALLOC_FREE(pool);
308 return ret;
311 talloc_set_destructor(pool, pthreadpool_tevent_destructor);
313 *presult = pool;
314 return 0;
317 static struct pthreadpool_tevent *pthreadpool_tevent_unwrap(
318 struct pthreadpool_tevent *pool)
320 struct pthreadpool_tevent_wrapper *wrapper = pool->wrapper.ctx;
322 if (wrapper != NULL) {
323 return wrapper->main_tp;
326 return pool;
329 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool)
331 pool = pthreadpool_tevent_unwrap(pool);
333 if (pool->pool == NULL) {
334 return 0;
337 return pthreadpool_max_threads(pool->pool);
340 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool)
342 pool = pthreadpool_tevent_unwrap(pool);
344 if (pool->pool == NULL) {
345 return 0;
348 return pthreadpool_queued_jobs(pool->pool);
351 bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool)
353 struct pthreadpool_tevent_wrapper *wrapper = pool->wrapper.ctx;
355 if (wrapper != NULL && wrapper->force_per_thread_cwd) {
356 return true;
359 pool = pthreadpool_tevent_unwrap(pool);
361 if (pool->pool == NULL) {
362 return false;
365 return pthreadpool_per_thread_cwd(pool->pool);
368 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
370 struct pthreadpool_tevent_job *job = NULL;
371 struct pthreadpool_tevent_job *njob = NULL;
372 struct pthreadpool_tevent *wrap_tp = NULL;
373 struct pthreadpool_tevent *nwrap_tp = NULL;
374 struct pthreadpool_tevent_glue *glue = NULL;
375 int ret;
377 if (pool->wrapper.ctx != NULL) {
378 struct pthreadpool_tevent_wrapper *wrapper = pool->wrapper.ctx;
380 pool->wrapper.ctx = NULL;
381 pool = wrapper->main_tp;
383 DLIST_REMOVE(pool->wrapper.list, wrapper->wrap_tp);
385 for (job = pool->jobs; job != NULL; job = njob) {
386 njob = job->next;
388 if (job->wrapper != wrapper) {
389 continue;
393 * This removes the job from the list
395 * Note that it waits in case
396 * the wrapper hooks are currently
397 * executing on the job.
399 pthreadpool_tevent_job_orphan(job);
403 * At this point we're sure that no job
404 * still references the pthreadpool_tevent_wrapper
405 * structure, so we can free it.
407 TALLOC_FREE(wrapper);
409 pthreadpool_tevent_cleanup_orphaned_jobs();
410 return 0;
413 if (pool->pool == NULL) {
415 * A dangling wrapper without main_tp.
417 return 0;
420 ret = pthreadpool_stop(pool->pool);
421 if (ret != 0) {
422 return ret;
426 * orphan all jobs (including wrapper jobs)
428 for (job = pool->jobs; job != NULL; job = njob) {
429 njob = job->next;
432 * The job this removes it from the list
434 * Note that it waits in case
435 * the wrapper hooks are currently
436 * executing on the job (thread).
438 pthreadpool_tevent_job_orphan(job);
442 * cleanup all existing wrappers, remember we just orphaned
443 * all jobs (including the once of the wrappers).
445 * So we just mark as broken, so that
446 * pthreadpool_tevent_job_send() won't accept new jobs.
448 for (wrap_tp = pool->wrapper.list; wrap_tp != NULL; wrap_tp = nwrap_tp) {
449 nwrap_tp = wrap_tp->next;
452 * Just mark them as broken, so that we can't
453 * get more jobs.
455 TALLOC_FREE(wrap_tp->wrapper.ctx);
457 DLIST_REMOVE(pool->wrapper.list, wrap_tp);
461 * Delete all the registered
462 * tevent_context/tevent_threaded_context
463 * pairs.
465 for (glue = pool->glue_list; glue != NULL; glue = pool->glue_list) {
466 /* The glue destructor removes it from the list */
467 TALLOC_FREE(glue);
469 pool->glue_list = NULL;
471 ret = pthreadpool_destroy(pool->pool);
472 if (ret != 0) {
473 return ret;
475 pool->pool = NULL;
477 pthreadpool_tevent_cleanup_orphaned_jobs();
479 return 0;
482 struct pthreadpool_tevent *_pthreadpool_tevent_wrapper_create(
483 struct pthreadpool_tevent *main_tp,
484 TALLOC_CTX *mem_ctx,
485 const struct pthreadpool_tevent_wrapper_ops *ops,
486 void *pstate,
487 size_t psize,
488 const char *type,
489 const char *location)
491 void **ppstate = (void **)pstate;
492 struct pthreadpool_tevent *wrap_tp = NULL;
493 struct pthreadpool_tevent_wrapper *wrapper = NULL;
495 pthreadpool_tevent_cleanup_orphaned_jobs();
497 if (main_tp->wrapper.ctx != NULL) {
499 * stacking of wrappers is not supported
501 errno = EINVAL;
502 return NULL;
505 if (main_tp->pool == NULL) {
507 * The pool is no longer valid,
508 * most likely it was a wrapper context
509 * where the main pool was destroyed.
511 errno = EINVAL;
512 return NULL;
515 wrap_tp = talloc_zero(mem_ctx, struct pthreadpool_tevent);
516 if (wrap_tp == NULL) {
517 return NULL;
520 wrapper = talloc_zero(wrap_tp, struct pthreadpool_tevent_wrapper);
521 if (wrapper == NULL) {
522 TALLOC_FREE(wrap_tp);
523 return NULL;
525 wrapper->main_tp = main_tp;
526 wrapper->wrap_tp = wrap_tp;
527 wrapper->ops = ops;
528 wrapper->private_state = talloc_zero_size(wrapper, psize);
529 if (wrapper->private_state == NULL) {
530 TALLOC_FREE(wrap_tp);
531 return NULL;
533 talloc_set_name_const(wrapper->private_state, type);
535 wrap_tp->wrapper.ctx = wrapper;
537 DLIST_ADD_END(main_tp->wrapper.list, wrap_tp);
539 talloc_set_destructor(wrap_tp, pthreadpool_tevent_destructor);
541 *ppstate = wrapper->private_state;
542 return wrap_tp;
545 void pthreadpool_tevent_force_per_thread_cwd(struct pthreadpool_tevent *pool,
546 const void *private_state)
548 struct pthreadpool_tevent_wrapper *wrapper = pool->wrapper.ctx;
550 if (wrapper == NULL) {
551 abort();
554 if (wrapper->private_state != private_state) {
555 abort();
558 wrapper->force_per_thread_cwd = true;
561 static int pthreadpool_tevent_glue_destructor(
562 struct pthreadpool_tevent_glue *glue)
564 struct pthreadpool_tevent_job_state *state = NULL;
565 struct pthreadpool_tevent_job_state *nstate = NULL;
567 TALLOC_FREE(glue->fde);
569 for (state = glue->states; state != NULL; state = nstate) {
570 nstate = state->next;
572 /* The job this removes it from the list */
573 pthreadpool_tevent_job_orphan(state->job);
576 if (glue->pool->glue_list != NULL) {
577 DLIST_REMOVE(glue->pool->glue_list, glue);
580 /* Ensure the ev_link destructor knows we're gone */
581 glue->ev_link->glue = NULL;
583 TALLOC_FREE(glue->ev_link);
584 TALLOC_FREE(glue->tctx);
586 return 0;
590 * Destructor called either explicitly from
591 * pthreadpool_tevent_glue_destructor(), or indirectly
592 * when owning tevent_context is destroyed.
594 * When called from pthreadpool_tevent_glue_destructor()
595 * ev_link->glue is already NULL, so this does nothing.
597 * When called from talloc_free() of the owning
598 * tevent_context we must ensure we also remove the
599 * linked glue object from the list inside
600 * struct pthreadpool_tevent.
602 static int pthreadpool_tevent_glue_link_destructor(
603 struct pthreadpool_tevent_glue_ev_link *ev_link)
605 TALLOC_FREE(ev_link->glue);
606 return 0;
609 static void pthreadpool_tevent_glue_monitor(struct tevent_context *ev,
610 struct tevent_fd *fde,
611 uint16_t flags,
612 void *private_data)
614 struct pthreadpool_tevent_glue *glue =
615 talloc_get_type_abort(private_data,
616 struct pthreadpool_tevent_glue);
617 struct pthreadpool_tevent_job *job = NULL;
618 struct pthreadpool_tevent_job *njob = NULL;
619 int ret = -1;
621 ret = pthreadpool_restart_check_monitor_drain(glue->pool->pool);
622 if (ret != 0) {
623 TALLOC_FREE(glue->fde);
626 ret = pthreadpool_restart_check(glue->pool->pool);
627 if (ret == 0) {
629 * success...
631 goto done;
635 * There's a problem and the pool
636 * has not a single thread available
637 * for pending jobs, so we can only
638 * stop the jobs and return an error.
639 * This is similar to a failure from
640 * pthreadpool_add_job().
642 for (job = glue->pool->jobs; job != NULL; job = njob) {
643 njob = job->next;
645 tevent_req_defer_callback(job->state->req,
646 job->state->ev);
647 tevent_req_error(job->state->req, ret);
650 done:
651 if (glue->states == NULL) {
653 * If the glue doesn't have any pending jobs
654 * we remove the glue.
656 * In order to remove the fd event.
658 TALLOC_FREE(glue);
662 static int pthreadpool_tevent_register_ev(
663 struct pthreadpool_tevent *pool,
664 struct pthreadpool_tevent_job_state *state)
666 struct tevent_context *ev = state->ev;
667 struct pthreadpool_tevent_glue *glue = NULL;
668 struct pthreadpool_tevent_glue_ev_link *ev_link = NULL;
669 int monitor_fd = -1;
672 * See if this tevent_context was already registered by
673 * searching the glue object list. If so we have nothing
674 * to do here - we already have a tevent_context/tevent_threaded_context
675 * pair.
677 for (glue = pool->glue_list; glue != NULL; glue = glue->next) {
678 if (glue->ev == state->ev) {
679 state->glue = glue;
680 DLIST_ADD_END(glue->states, state);
681 return 0;
686 * Event context not yet registered - create a new glue
687 * object containing a tevent_context/tevent_threaded_context
688 * pair and put it on the list to remember this registration.
689 * We also need a link object to ensure the event context
690 * can't go away without us knowing about it.
692 glue = talloc_zero(pool, struct pthreadpool_tevent_glue);
693 if (glue == NULL) {
694 return ENOMEM;
696 *glue = (struct pthreadpool_tevent_glue) {
697 .pool = pool,
698 .ev = ev,
700 talloc_set_destructor(glue, pthreadpool_tevent_glue_destructor);
702 monitor_fd = pthreadpool_restart_check_monitor_fd(pool->pool);
703 if (monitor_fd == -1 && errno != ENOSYS) {
704 int saved_errno = errno;
705 TALLOC_FREE(glue);
706 return saved_errno;
709 if (monitor_fd != -1) {
710 glue->fde = tevent_add_fd(ev,
711 glue,
712 monitor_fd,
713 TEVENT_FD_READ,
714 pthreadpool_tevent_glue_monitor,
715 glue);
716 if (glue->fde == NULL) {
717 close(monitor_fd);
718 TALLOC_FREE(glue);
719 return ENOMEM;
721 tevent_fd_set_auto_close(glue->fde);
722 monitor_fd = -1;
726 * Now allocate the link object to the event context. Note this
727 * is allocated OFF THE EVENT CONTEXT ITSELF, so if the event
728 * context is freed we are able to cleanup the glue object
729 * in the link object destructor.
732 ev_link = talloc_zero(ev, struct pthreadpool_tevent_glue_ev_link);
733 if (ev_link == NULL) {
734 TALLOC_FREE(glue);
735 return ENOMEM;
737 ev_link->glue = glue;
738 talloc_set_destructor(ev_link, pthreadpool_tevent_glue_link_destructor);
740 glue->ev_link = ev_link;
742 #ifdef HAVE_PTHREAD
743 glue->tctx = tevent_threaded_context_create(glue, ev);
744 if (glue->tctx == NULL) {
745 TALLOC_FREE(ev_link);
746 TALLOC_FREE(glue);
747 return ENOMEM;
749 #endif
751 state->glue = glue;
752 DLIST_ADD_END(glue->states, state);
754 DLIST_ADD(pool->glue_list, glue);
755 return 0;
758 static void pthreadpool_tevent_job_fn(void *private_data);
759 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
760 struct tevent_immediate *im,
761 void *private_data);
762 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req);
764 static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
767 * We should never be called with needs_fence.orphaned == false.
768 * Only pthreadpool_tevent_job_orphan() will call TALLOC_FREE(job)
769 * after detaching from the request state, glue and pool list.
771 if (!job->needs_fence.orphaned) {
772 abort();
776 * If the job is not finished (job->im still there)
777 * and it's still attached to the pool,
778 * we try to cancel it (before it was starts)
780 if (job->im != NULL && job->pool != NULL) {
781 size_t num;
783 num = pthreadpool_cancel_job(job->pool->pool, 0,
784 pthreadpool_tevent_job_fn,
785 job);
786 if (num != 0) {
788 * It was not too late to cancel the request.
790 * We can remove job->im, as it will never be used.
792 TALLOC_FREE(job->im);
796 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
797 if (job->needs_fence.dropped) {
799 * The signal function saw job->needs_fence.orphaned
800 * before it started the signaling via the immediate
801 * event. So we'll never geht triggered and can
802 * remove job->im and let the whole job go...
804 TALLOC_FREE(job->im);
808 * TODO?: We could further improve this by adjusting
809 * tevent_threaded_schedule_immediate_destructor()
810 * and allow TALLOC_FREE() during its time
811 * in the main_ev->scheduled_immediates list.
813 * PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
814 * if (state->needs_fence.signaled) {
816 * * The signal function is completed
817 * * in future we may be allowed
818 * * to call TALLOC_FREE(job->im).
820 * TALLOC_FREE(job->im);
825 * pthreadpool_tevent_job_orphan() already removed
826 * it from pool->jobs. And we don't need try
827 * pthreadpool_cancel_job() again.
829 job->pool = NULL;
831 if (job->im != NULL) {
833 * state->im still there means, we need to wait for the
834 * immediate event to be triggered or just leak the memory.
836 * Move it to the orphaned list, if it's not already there.
838 return -1;
842 * Finally remove from the orphaned_jobs list
843 * and let talloc destroy us.
845 DLIST_REMOVE(orphaned_jobs, job);
847 PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(job);
848 return 0;
851 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job)
853 job->needs_fence.orphaned = true;
854 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
857 * We're the only function that sets
858 * job->state = NULL;
860 if (job->state == NULL) {
861 abort();
865 * Once we marked the request as 'orphaned'
866 * we spin/loop if 'wrapper' is marked as active.
868 * We need to wait until the wrapper hook finished
869 * before we can set job->wrapper = NULL.
871 * This is some kind of spinlock, but with
872 * 1 millisecond sleeps in between, in order
873 * to give the thread more cpu time to finish.
875 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
876 while (job->needs_fence.wrapper) {
877 (void)poll(NULL, 0, 1);
878 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
880 job->wrapper = NULL;
883 * Once we marked the request as 'orphaned'
884 * we spin/loop if it's already marked
885 * as 'finished' (which means that
886 * pthreadpool_tevent_job_signal() was entered.
887 * If it saw 'orphaned' it will exit after setting
888 * 'dropped', otherwise it dereferences
889 * job->state->glue->{tctx,ev} until it exited
890 * after setting 'signaled'.
892 * We need to close this potential gab before
893 * we can set job->state = NULL.
895 * This is some kind of spinlock, but with
896 * 1 millisecond sleeps in between, in order
897 * to give the thread more cpu time to finish.
899 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
900 while (job->needs_fence.finished) {
901 if (job->needs_fence.dropped) {
902 break;
904 if (job->needs_fence.signaled) {
905 break;
907 (void)poll(NULL, 0, 1);
908 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
912 * Once the gab is closed, we can remove
913 * the glue link.
915 DLIST_REMOVE(job->state->glue->states, job->state);
916 job->state->glue = NULL;
919 * We need to reparent to a long term context.
920 * And detach from the request state.
921 * Maybe the destructor will keep the memory
922 * and leak it for now.
924 (void)talloc_reparent(job->state, NULL, job);
925 job->state->job = NULL;
926 job->state = NULL;
929 * job->pool will only be set to NULL
930 * in the first destructur run.
932 if (job->pool == NULL) {
933 abort();
937 * Dettach it from the pool.
939 * The job might still be running,
940 * so we keep job->pool.
941 * The destructor will set it to NULL
942 * after trying pthreadpool_cancel_job()
944 DLIST_REMOVE(job->pool->jobs, job);
947 * Add it to the list of orphaned jobs,
948 * which may be cleaned up later.
950 * The destructor removes it from the list
951 * when possible or it denies the free
952 * and keep it in the list.
954 DLIST_ADD_END(orphaned_jobs, job);
955 TALLOC_FREE(job);
958 static void pthreadpool_tevent_job_cleanup(struct tevent_req *req,
959 enum tevent_req_state req_state)
961 struct pthreadpool_tevent_job_state *state =
962 tevent_req_data(req,
963 struct pthreadpool_tevent_job_state);
965 if (state->job == NULL) {
967 * The job request is not scheduled in the pool
968 * yet or anymore.
970 if (state->glue != NULL) {
971 DLIST_REMOVE(state->glue->states, state);
972 state->glue = NULL;
974 return;
978 * We need to reparent to a long term context.
979 * Maybe the destructor will keep the memory
980 * and leak it for now.
982 pthreadpool_tevent_job_orphan(state->job);
983 state->job = NULL; /* not needed but looks better */
984 return;
987 struct tevent_req *pthreadpool_tevent_job_send(
988 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
989 struct pthreadpool_tevent *pool,
990 void (*fn)(void *private_data), void *private_data)
992 struct tevent_req *req = NULL;
993 struct pthreadpool_tevent_job_state *state = NULL;
994 struct pthreadpool_tevent_job *job = NULL;
995 int ret;
996 struct pthreadpool_tevent *caller_pool = pool;
997 struct pthreadpool_tevent_wrapper *wrapper = pool->wrapper.ctx;
999 pthreadpool_tevent_cleanup_orphaned_jobs();
1001 if (wrapper != NULL) {
1002 pool = wrapper->main_tp;
1005 req = tevent_req_create(mem_ctx, &state,
1006 struct pthreadpool_tevent_job_state);
1007 if (req == NULL) {
1008 return NULL;
1010 state->ev = ev;
1011 state->req = req;
1013 tevent_req_set_cleanup_fn(req, pthreadpool_tevent_job_cleanup);
1015 if (pool == NULL) {
1016 tevent_req_error(req, EINVAL);
1017 return tevent_req_post(req, ev);
1019 if (pool->pool == NULL) {
1020 tevent_req_error(req, EINVAL);
1021 return tevent_req_post(req, ev);
1024 ret = pthreadpool_tevent_register_ev(pool, state);
1025 if (tevent_req_error(req, ret)) {
1026 return tevent_req_post(req, ev);
1029 job = talloc_zero(state, struct pthreadpool_tevent_job);
1030 if (tevent_req_nomem(job, req)) {
1031 return tevent_req_post(req, ev);
1033 job->pool = pool;
1034 job->wrapper = wrapper;
1035 job->fn = fn;
1036 job->private_data = private_data;
1037 job->im = tevent_create_immediate(state->job);
1038 if (tevent_req_nomem(job->im, req)) {
1039 return tevent_req_post(req, ev);
1041 PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(job);
1042 job->per_thread_cwd = pthreadpool_tevent_per_thread_cwd(caller_pool);
1043 talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
1044 DLIST_ADD_END(job->pool->jobs, job);
1045 job->state = state;
1046 state->job = job;
1048 ret = pthreadpool_add_job(job->pool->pool, 0,
1049 pthreadpool_tevent_job_fn,
1050 job);
1051 if (tevent_req_error(req, ret)) {
1052 return tevent_req_post(req, ev);
1055 tevent_req_set_cancel_fn(req, pthreadpool_tevent_job_cancel);
1056 return req;
1059 static __thread struct pthreadpool_tevent_job *current_job;
1061 bool pthreadpool_tevent_current_job_canceled(void)
1063 if (current_job == NULL) {
1065 * Should only be called from within
1066 * the job function.
1068 abort();
1069 return false;
1072 PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
1073 return current_job->needs_fence.maycancel;
1076 bool pthreadpool_tevent_current_job_orphaned(void)
1078 if (current_job == NULL) {
1080 * Should only be called from within
1081 * the job function.
1083 abort();
1084 return false;
1087 PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
1088 return current_job->needs_fence.orphaned;
1091 bool pthreadpool_tevent_current_job_continue(void)
1093 if (current_job == NULL) {
1095 * Should only be called from within
1096 * the job function.
1098 abort();
1099 return false;
1102 PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
1103 if (current_job->needs_fence.maycancel) {
1104 return false;
1106 PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
1107 if (current_job->needs_fence.orphaned) {
1108 return false;
1111 return true;
1114 bool pthreadpool_tevent_current_job_per_thread_cwd(void)
1116 if (current_job == NULL) {
1118 * Should only be called from within
1119 * the job function.
1121 abort();
1122 return false;
1125 return current_job->per_thread_cwd;
1128 static void pthreadpool_tevent_job_fn(void *private_data)
1130 struct pthreadpool_tevent_job *job =
1131 talloc_get_type_abort(private_data,
1132 struct pthreadpool_tevent_job);
1133 struct pthreadpool_tevent_wrapper *wrapper = NULL;
1135 current_job = job;
1136 job->needs_fence.started = true;
1137 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1138 if (job->needs_fence.orphaned) {
1139 current_job = NULL;
1140 return;
1143 wrapper = job->wrapper;
1144 if (wrapper != NULL) {
1145 bool ok;
1147 job->needs_fence.wrapper = true;
1148 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1149 if (job->needs_fence.orphaned) {
1150 job->needs_fence.wrapper = false;
1151 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1152 current_job = NULL;
1153 return;
1155 ok = wrapper->ops->before_job(wrapper->wrap_tp,
1156 wrapper->private_state,
1157 wrapper->main_tp,
1158 __location__);
1159 job->needs_fence.wrapper = false;
1160 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1161 if (!ok) {
1162 job->needs_fence.exit_thread = true;
1163 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1164 current_job = NULL;
1165 return;
1169 job->fn(job->private_data);
1171 job->needs_fence.executed = true;
1172 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1174 if (wrapper != NULL) {
1175 bool ok;
1177 job->needs_fence.wrapper = true;
1178 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1179 if (job->needs_fence.orphaned) {
1180 job->needs_fence.wrapper = false;
1181 job->needs_fence.exit_thread = true;
1182 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1183 current_job = NULL;
1184 return;
1186 ok = wrapper->ops->after_job(wrapper->wrap_tp,
1187 wrapper->private_state,
1188 wrapper->main_tp,
1189 __location__);
1190 job->needs_fence.wrapper = false;
1191 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1192 if (!ok) {
1193 job->needs_fence.exit_thread = true;
1194 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1195 current_job = NULL;
1196 return;
1200 current_job = NULL;
1203 static int pthreadpool_tevent_job_signal(int jobid,
1204 void (*job_fn)(void *private_data),
1205 void *job_private_data,
1206 void *private_data)
1208 struct pthreadpool_tevent_job *job =
1209 talloc_get_type_abort(job_private_data,
1210 struct pthreadpool_tevent_job);
1212 job->needs_fence.finished = true;
1213 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1214 if (job->needs_fence.orphaned) {
1215 /* Request already gone */
1216 job->needs_fence.dropped = true;
1217 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1218 if (job->needs_fence.exit_thread) {
1220 * A problem with the wrapper the current job/worker
1221 * thread needs to terminate.
1223 * The pthreadpool_tevent is already gone.
1225 return -1;
1227 return 0;
1231 * state and state->glue are valid,
1232 * see the job->needs_fence.finished
1233 * "spinlock" loop in
1234 * pthreadpool_tevent_job_orphan()
1236 if (job->state->glue->tctx != NULL) {
1237 /* with HAVE_PTHREAD */
1238 tevent_threaded_schedule_immediate(job->state->glue->tctx,
1239 job->im,
1240 pthreadpool_tevent_job_done,
1241 job);
1242 } else {
1243 /* without HAVE_PTHREAD */
1244 tevent_schedule_immediate(job->im,
1245 job->state->glue->ev,
1246 pthreadpool_tevent_job_done,
1247 job);
1250 job->needs_fence.signaled = true;
1251 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1252 if (job->needs_fence.exit_thread) {
1254 * A problem with the wrapper the current job/worker
1255 * thread needs to terminate.
1257 * The pthreadpool_tevent is already gone.
1259 return -1;
1261 return 0;
1264 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
1265 struct tevent_immediate *im,
1266 void *private_data)
1268 struct pthreadpool_tevent_job *job =
1269 talloc_get_type_abort(private_data,
1270 struct pthreadpool_tevent_job);
1271 struct pthreadpool_tevent_job_state *state = job->state;
1273 TALLOC_FREE(job->im);
1275 if (state == NULL) {
1276 /* Request already gone */
1277 TALLOC_FREE(job);
1278 return;
1282 * pthreadpool_tevent_job_cleanup()
1283 * (called by tevent_req_done() or
1284 * tevent_req_error()) will destroy the job.
1287 if (job->needs_fence.executed) {
1288 tevent_req_done(state->req);
1289 return;
1292 tevent_req_error(state->req, ENOEXEC);
1293 return;
1296 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req)
1298 struct pthreadpool_tevent_job_state *state =
1299 tevent_req_data(req,
1300 struct pthreadpool_tevent_job_state);
1301 struct pthreadpool_tevent_job *job = state->job;
1302 size_t num;
1304 if (job == NULL) {
1305 return false;
1308 job->needs_fence.maycancel = true;
1309 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
1310 if (job->needs_fence.started) {
1312 * It was too late to cancel the request.
1314 * The job still has the chance to look
1315 * at pthreadpool_tevent_current_job_canceled()
1316 * or pthreadpool_tevent_current_job_continue()
1318 return false;
1321 num = pthreadpool_cancel_job(job->pool->pool, 0,
1322 pthreadpool_tevent_job_fn,
1323 job);
1324 if (num == 0) {
1326 * It was too late to cancel the request.
1328 return false;
1332 * It was not too late to cancel the request.
1334 * We can remove job->im, as it will never be used.
1336 TALLOC_FREE(job->im);
1339 * pthreadpool_tevent_job_cleanup()
1340 * will destroy the job.
1342 tevent_req_defer_callback(req, state->ev);
1343 tevent_req_error(req, ECANCELED);
1344 return true;
1347 int pthreadpool_tevent_job_recv(struct tevent_req *req)
1349 return tevent_req_simple_recv_unix(req);