pthreadpool: maintain a global list of orphaned pthreadpool_tevent_jobs
[Samba.git] / lib / pthreadpool / pthreadpool_tevent.c
blob5da1f22e91c48b0220afea66ef22a905363c5bf9
1 /*
2 * Unix SMB/CIFS implementation.
3 * threadpool implementation based on pthreads
4 * Copyright (C) Volker Lendecke 2009,2011
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "replace.h"
21 #include "pthreadpool_tevent.h"
22 #include "pthreadpool.h"
23 #include "lib/util/tevent_unix.h"
24 #include "lib/util/dlinklist.h"
26 struct pthreadpool_tevent_job_state;
29 * We need one pthreadpool_tevent_glue object per unique combintaion of tevent
30 * contexts and pthreadpool_tevent objects. Maintain a list of used tevent
31 * contexts in a pthreadpool_tevent.
33 struct pthreadpool_tevent_glue {
34 struct pthreadpool_tevent_glue *prev, *next;
35 struct pthreadpool_tevent *pool; /* back-pointer to owning object. */
36 /* Tuple we are keeping track of in this list. */
37 struct tevent_context *ev;
38 struct tevent_threaded_context *tctx;
39 /* Pointer to link object owned by *ev. */
40 struct pthreadpool_tevent_glue_ev_link *ev_link;
44 * The pthreadpool_tevent_glue_ev_link and its destructor ensure we remove the
45 * tevent context from our list of active event contexts if the event context
46 * is destroyed.
47 * This structure is talloc()'ed from the struct tevent_context *, and is a
48 * back-pointer allowing the related struct pthreadpool_tevent_glue object
49 * to be removed from the struct pthreadpool_tevent glue list if the owning
50 * tevent_context is talloc_free()'ed.
52 struct pthreadpool_tevent_glue_ev_link {
53 struct pthreadpool_tevent_glue *glue;
56 struct pthreadpool_tevent {
57 struct pthreadpool *pool;
58 struct pthreadpool_tevent_glue *glue_list;
60 struct pthreadpool_tevent_job *jobs;
63 struct pthreadpool_tevent_job_state {
64 struct tevent_context *ev;
65 struct tevent_req *req;
66 struct pthreadpool_tevent_job *job;
69 struct pthreadpool_tevent_job {
70 struct pthreadpool_tevent_job *prev, *next;
72 struct pthreadpool_tevent *pool;
73 struct pthreadpool_tevent_job_state *state;
74 struct tevent_immediate *im;
76 void (*fn)(void *private_data);
77 void *private_data;
80 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
82 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job);
84 static struct pthreadpool_tevent_job *orphaned_jobs;
86 void pthreadpool_tevent_cleanup_orphaned_jobs(void)
88 struct pthreadpool_tevent_job *job = NULL;
89 struct pthreadpool_tevent_job *njob = NULL;
91 for (job = orphaned_jobs; job != NULL; job = njob) {
92 njob = job->next;
95 * The job destructor keeps the job alive
96 * (and in the list) or removes it from the list.
98 TALLOC_FREE(job);
102 static int pthreadpool_tevent_job_signal(int jobid,
103 void (*job_fn)(void *private_data),
104 void *job_private_data,
105 void *private_data);
107 int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
108 struct pthreadpool_tevent **presult)
110 struct pthreadpool_tevent *pool;
111 int ret;
113 pthreadpool_tevent_cleanup_orphaned_jobs();
115 pool = talloc_zero(mem_ctx, struct pthreadpool_tevent);
116 if (pool == NULL) {
117 return ENOMEM;
120 ret = pthreadpool_init(max_threads, &pool->pool,
121 pthreadpool_tevent_job_signal, pool);
122 if (ret != 0) {
123 TALLOC_FREE(pool);
124 return ret;
127 talloc_set_destructor(pool, pthreadpool_tevent_destructor);
129 *presult = pool;
130 return 0;
133 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool)
135 if (pool->pool == NULL) {
136 return 0;
139 return pthreadpool_max_threads(pool->pool);
142 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool)
144 if (pool->pool == NULL) {
145 return 0;
148 return pthreadpool_queued_jobs(pool->pool);
151 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
153 struct pthreadpool_tevent_job *job = NULL;
154 struct pthreadpool_tevent_job *njob = NULL;
155 struct pthreadpool_tevent_glue *glue = NULL;
156 int ret;
158 ret = pthreadpool_stop(pool->pool);
159 if (ret != 0) {
160 return ret;
163 for (job = pool->jobs; job != NULL; job = njob) {
164 njob = job->next;
166 /* The job this removes it from the list */
167 pthreadpool_tevent_job_orphan(job);
171 * Delete all the registered
172 * tevent_context/tevent_threaded_context
173 * pairs.
175 for (glue = pool->glue_list; glue != NULL; glue = pool->glue_list) {
176 /* The glue destructor removes it from the list */
177 TALLOC_FREE(glue);
179 pool->glue_list = NULL;
181 ret = pthreadpool_destroy(pool->pool);
182 if (ret != 0) {
183 return ret;
185 pool->pool = NULL;
187 pthreadpool_tevent_cleanup_orphaned_jobs();
189 return 0;
192 static int pthreadpool_tevent_glue_destructor(
193 struct pthreadpool_tevent_glue *glue)
195 if (glue->pool->glue_list != NULL) {
196 DLIST_REMOVE(glue->pool->glue_list, glue);
199 /* Ensure the ev_link destructor knows we're gone */
200 glue->ev_link->glue = NULL;
202 TALLOC_FREE(glue->ev_link);
203 TALLOC_FREE(glue->tctx);
205 return 0;
209 * Destructor called either explicitly from
210 * pthreadpool_tevent_glue_destructor(), or indirectly
211 * when owning tevent_context is destroyed.
213 * When called from pthreadpool_tevent_glue_destructor()
214 * ev_link->glue is already NULL, so this does nothing.
216 * When called from talloc_free() of the owning
217 * tevent_context we must ensure we also remove the
218 * linked glue object from the list inside
219 * struct pthreadpool_tevent.
221 static int pthreadpool_tevent_glue_link_destructor(
222 struct pthreadpool_tevent_glue_ev_link *ev_link)
224 TALLOC_FREE(ev_link->glue);
225 return 0;
228 static int pthreadpool_tevent_register_ev(struct pthreadpool_tevent *pool,
229 struct tevent_context *ev)
231 struct pthreadpool_tevent_glue *glue = NULL;
232 struct pthreadpool_tevent_glue_ev_link *ev_link = NULL;
235 * See if this tevent_context was already registered by
236 * searching the glue object list. If so we have nothing
237 * to do here - we already have a tevent_context/tevent_threaded_context
238 * pair.
240 for (glue = pool->glue_list; glue != NULL; glue = glue->next) {
241 if (glue->ev == ev) {
242 return 0;
247 * Event context not yet registered - create a new glue
248 * object containing a tevent_context/tevent_threaded_context
249 * pair and put it on the list to remember this registration.
250 * We also need a link object to ensure the event context
251 * can't go away without us knowing about it.
253 glue = talloc_zero(pool, struct pthreadpool_tevent_glue);
254 if (glue == NULL) {
255 return ENOMEM;
257 *glue = (struct pthreadpool_tevent_glue) {
258 .pool = pool,
259 .ev = ev,
261 talloc_set_destructor(glue, pthreadpool_tevent_glue_destructor);
264 * Now allocate the link object to the event context. Note this
265 * is allocated OFF THE EVENT CONTEXT ITSELF, so if the event
266 * context is freed we are able to cleanup the glue object
267 * in the link object destructor.
270 ev_link = talloc_zero(ev, struct pthreadpool_tevent_glue_ev_link);
271 if (ev_link == NULL) {
272 TALLOC_FREE(glue);
273 return ENOMEM;
275 ev_link->glue = glue;
276 talloc_set_destructor(ev_link, pthreadpool_tevent_glue_link_destructor);
278 glue->ev_link = ev_link;
280 #ifdef HAVE_PTHREAD
281 glue->tctx = tevent_threaded_context_create(pool, ev);
282 if (glue->tctx == NULL) {
283 TALLOC_FREE(ev_link);
284 TALLOC_FREE(glue);
285 return ENOMEM;
287 #endif
289 DLIST_ADD(pool->glue_list, glue);
290 return 0;
293 static void pthreadpool_tevent_job_fn(void *private_data);
294 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
295 struct tevent_immediate *im,
296 void *private_data);
297 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req);
299 static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
302 * We should never be called with state->state != NULL.
303 * Only pthreadpool_tevent_job_orphan() will call TALLOC_FREE(job)
304 * after detaching from the request state and pool list.
306 if (job->state != NULL) {
307 abort();
311 * If the job is not finished (job->im still there)
312 * and it's still attached to the pool,
313 * we try to cancel it (before it was starts)
315 if (job->im != NULL && job->pool != NULL) {
316 size_t num;
318 num = pthreadpool_cancel_job(job->pool->pool, 0,
319 pthreadpool_tevent_job_fn,
320 job);
321 if (num != 0) {
323 * It was not too late to cancel the request.
325 * We can remove job->im, as it will never be used.
327 TALLOC_FREE(job->im);
332 * pthreadpool_tevent_job_orphan() already removed
333 * it from pool->jobs. And we don't need try
334 * pthreadpool_cancel_job() again.
336 job->pool = NULL;
338 if (job->im != NULL) {
340 * state->im still there means, we need to wait for the
341 * immediate event to be triggered or just leak the memory.
343 * Move it to the orphaned list, if it's not already there.
345 return -1;
349 * Finally remove from the orphaned_jobs list
350 * and let talloc destroy us.
352 DLIST_REMOVE(orphaned_jobs, job);
354 return 0;
357 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job)
360 * We're the only function that sets
361 * job->state = NULL;
363 if (job->state == NULL) {
364 abort();
368 * We need to reparent to a long term context.
369 * And detach from the request state.
370 * Maybe the destructor will keep the memory
371 * and leak it for now.
373 (void)talloc_reparent(job->state, NULL, job);
374 job->state->job = NULL;
375 job->state = NULL;
378 * job->pool will only be set to NULL
379 * in the first destructur run.
381 if (job->pool == NULL) {
382 abort();
386 * Dettach it from the pool.
388 * The job might still be running,
389 * so we keep job->pool.
390 * The destructor will set it to NULL
391 * after trying pthreadpool_cancel_job()
393 DLIST_REMOVE(job->pool->jobs, job);
396 * Add it to the list of orphaned jobs,
397 * which may be cleaned up later.
399 * The destructor removes it from the list
400 * when possible or it denies the free
401 * and keep it in the list.
403 DLIST_ADD_END(orphaned_jobs, job);
404 TALLOC_FREE(job);
407 static void pthreadpool_tevent_job_cleanup(struct tevent_req *req,
408 enum tevent_req_state req_state)
410 struct pthreadpool_tevent_job_state *state =
411 tevent_req_data(req,
412 struct pthreadpool_tevent_job_state);
414 if (state->job == NULL) {
416 * The job request is not scheduled in the pool
417 * yet or anymore.
419 return;
423 * We need to reparent to a long term context.
424 * Maybe the destructor will keep the memory
425 * and leak it for now.
427 pthreadpool_tevent_job_orphan(state->job);
428 state->job = NULL; /* not needed but looks better */
429 return;
432 struct tevent_req *pthreadpool_tevent_job_send(
433 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
434 struct pthreadpool_tevent *pool,
435 void (*fn)(void *private_data), void *private_data)
437 struct tevent_req *req = NULL;
438 struct pthreadpool_tevent_job_state *state = NULL;
439 struct pthreadpool_tevent_job *job = NULL;
440 int ret;
442 pthreadpool_tevent_cleanup_orphaned_jobs();
444 req = tevent_req_create(mem_ctx, &state,
445 struct pthreadpool_tevent_job_state);
446 if (req == NULL) {
447 return NULL;
449 state->ev = ev;
450 state->req = req;
452 tevent_req_set_cleanup_fn(req, pthreadpool_tevent_job_cleanup);
454 if (pool == NULL) {
455 tevent_req_error(req, EINVAL);
456 return tevent_req_post(req, ev);
458 if (pool->pool == NULL) {
459 tevent_req_error(req, EINVAL);
460 return tevent_req_post(req, ev);
463 ret = pthreadpool_tevent_register_ev(pool, ev);
464 if (tevent_req_error(req, ret)) {
465 return tevent_req_post(req, ev);
468 job = talloc_zero(state, struct pthreadpool_tevent_job);
469 if (tevent_req_nomem(job, req)) {
470 return tevent_req_post(req, ev);
472 job->pool = pool;
473 job->fn = fn;
474 job->private_data = private_data;
475 job->im = tevent_create_immediate(state->job);
476 if (tevent_req_nomem(job->im, req)) {
477 return tevent_req_post(req, ev);
479 talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
480 DLIST_ADD_END(job->pool->jobs, job);
481 job->state = state;
482 state->job = job;
484 ret = pthreadpool_add_job(job->pool->pool, 0,
485 pthreadpool_tevent_job_fn,
486 job);
487 if (tevent_req_error(req, ret)) {
488 return tevent_req_post(req, ev);
491 tevent_req_set_cancel_fn(req, pthreadpool_tevent_job_cancel);
492 return req;
495 static void pthreadpool_tevent_job_fn(void *private_data)
497 struct pthreadpool_tevent_job *job =
498 talloc_get_type_abort(private_data,
499 struct pthreadpool_tevent_job);
501 job->fn(job->private_data);
504 static int pthreadpool_tevent_job_signal(int jobid,
505 void (*job_fn)(void *private_data),
506 void *job_private_data,
507 void *private_data)
509 struct pthreadpool_tevent_job *job =
510 talloc_get_type_abort(job_private_data,
511 struct pthreadpool_tevent_job);
512 struct pthreadpool_tevent_job_state *state = job->state;
513 struct tevent_threaded_context *tctx = NULL;
514 struct pthreadpool_tevent_glue *g = NULL;
516 if (state == NULL) {
517 /* Request already gone */
518 return 0;
521 #ifdef HAVE_PTHREAD
522 for (g = job->pool->glue_list; g != NULL; g = g->next) {
523 if (g->ev == state->ev) {
524 tctx = g->tctx;
525 break;
529 if (tctx == NULL) {
530 abort();
532 #endif
534 if (tctx != NULL) {
535 /* with HAVE_PTHREAD */
536 tevent_threaded_schedule_immediate(tctx, job->im,
537 pthreadpool_tevent_job_done,
538 job);
539 } else {
540 /* without HAVE_PTHREAD */
541 tevent_schedule_immediate(job->im, state->ev,
542 pthreadpool_tevent_job_done,
543 job);
546 return 0;
549 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
550 struct tevent_immediate *im,
551 void *private_data)
553 struct pthreadpool_tevent_job *job =
554 talloc_get_type_abort(private_data,
555 struct pthreadpool_tevent_job);
556 struct pthreadpool_tevent_job_state *state = job->state;
558 TALLOC_FREE(job->im);
560 if (state == NULL) {
561 /* Request already gone */
562 TALLOC_FREE(job);
563 return;
567 * pthreadpool_tevent_job_cleanup()
568 * will destroy the job.
570 tevent_req_done(state->req);
573 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req)
575 struct pthreadpool_tevent_job_state *state =
576 tevent_req_data(req,
577 struct pthreadpool_tevent_job_state);
578 struct pthreadpool_tevent_job *job = state->job;
579 size_t num;
581 if (job == NULL) {
582 return false;
585 num = pthreadpool_cancel_job(job->pool->pool, 0,
586 pthreadpool_tevent_job_fn,
587 job);
588 if (num == 0) {
590 * It was too late to cancel the request.
592 return false;
596 * It was not too late to cancel the request.
598 * We can remove job->im, as it will never be used.
600 TALLOC_FREE(job->im);
603 * pthreadpool_tevent_job_cleanup()
604 * will destroy the job.
606 tevent_req_defer_callback(req, state->ev);
607 tevent_req_error(req, ECANCELED);
608 return true;
611 int pthreadpool_tevent_job_recv(struct tevent_req *req)
613 return tevent_req_simple_recv_unix(req);