CVE-2020-25717: s3:auth: Fallback to a SID/UID based mapping if the named based looku...
[Samba.git] / lib / pthreadpool / pthreadpool_tevent.c
blob389bb06b54c1455b78a53835a38efd7da6974f0b
1 /*
2 * Unix SMB/CIFS implementation.
3 * threadpool implementation based on pthreads
4 * Copyright (C) Volker Lendecke 2009,2011
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "replace.h"
21 #include "system/filesys.h"
22 #include "pthreadpool_tevent.h"
23 #include "pthreadpool.h"
24 #include "lib/util/tevent_unix.h"
25 #include "lib/util/dlinklist.h"
27 struct pthreadpool_tevent_job_state;
30 * We need one pthreadpool_tevent_glue object per unique combintaion of tevent
31 * contexts and pthreadpool_tevent objects. Maintain a list of used tevent
32 * contexts in a pthreadpool_tevent.
34 struct pthreadpool_tevent_glue {
35 struct pthreadpool_tevent_glue *prev, *next;
36 struct pthreadpool_tevent *pool; /* back-pointer to owning object. */
37 /* Tuple we are keeping track of in this list. */
38 struct tevent_context *ev;
39 struct tevent_threaded_context *tctx;
40 /* Pointer to link object owned by *ev. */
41 struct pthreadpool_tevent_glue_ev_link *ev_link;
45 * The pthreadpool_tevent_glue_ev_link and its destructor ensure we remove the
46 * tevent context from our list of active event contexts if the event context
47 * is destroyed.
48 * This structure is talloc()'ed from the struct tevent_context *, and is a
49 * back-pointer allowing the related struct pthreadpool_tevent_glue object
50 * to be removed from the struct pthreadpool_tevent glue list if the owning
51 * tevent_context is talloc_free()'ed.
53 struct pthreadpool_tevent_glue_ev_link {
54 struct pthreadpool_tevent_glue *glue;
57 struct pthreadpool_tevent {
58 struct pthreadpool *pool;
59 struct pthreadpool_tevent_glue *glue_list;
61 struct pthreadpool_tevent_job_state *jobs;
64 struct pthreadpool_tevent_job_state {
65 struct pthreadpool_tevent_job_state *prev, *next;
66 struct pthreadpool_tevent *pool;
67 struct tevent_context *ev;
68 struct tevent_immediate *im;
69 struct tevent_req *req;
71 void (*fn)(void *private_data);
72 void *private_data;
75 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
77 static int pthreadpool_tevent_job_signal(int jobid,
78 void (*job_fn)(void *private_data),
79 void *job_private_data,
80 void *private_data);
82 int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
83 struct pthreadpool_tevent **presult)
85 struct pthreadpool_tevent *pool;
86 int ret;
88 pool = talloc_zero(mem_ctx, struct pthreadpool_tevent);
89 if (pool == NULL) {
90 return ENOMEM;
93 ret = pthreadpool_init(max_threads, &pool->pool,
94 pthreadpool_tevent_job_signal, pool);
95 if (ret != 0) {
96 TALLOC_FREE(pool);
97 return ret;
100 talloc_set_destructor(pool, pthreadpool_tevent_destructor);
102 *presult = pool;
103 return 0;
106 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool)
108 if (pool->pool == NULL) {
109 return 0;
112 return pthreadpool_max_threads(pool->pool);
115 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool)
117 if (pool->pool == NULL) {
118 return 0;
121 return pthreadpool_queued_jobs(pool->pool);
124 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
126 struct pthreadpool_tevent_job_state *state, *next;
127 struct pthreadpool_tevent_glue *glue = NULL;
128 int ret;
130 ret = pthreadpool_stop(pool->pool);
131 if (ret != 0) {
132 return ret;
135 for (state = pool->jobs; state != NULL; state = next) {
136 next = state->next;
137 DLIST_REMOVE(pool->jobs, state);
138 state->pool = NULL;
142 * Delete all the registered
143 * tevent_context/tevent_threaded_context
144 * pairs.
146 for (glue = pool->glue_list; glue != NULL; glue = pool->glue_list) {
147 /* The glue destructor removes it from the list */
148 TALLOC_FREE(glue);
150 pool->glue_list = NULL;
152 ret = pthreadpool_destroy(pool->pool);
153 if (ret != 0) {
154 return ret;
156 pool->pool = NULL;
158 return 0;
161 static int pthreadpool_tevent_glue_destructor(
162 struct pthreadpool_tevent_glue *glue)
164 if (glue->pool->glue_list != NULL) {
165 DLIST_REMOVE(glue->pool->glue_list, glue);
168 /* Ensure the ev_link destructor knows we're gone */
169 glue->ev_link->glue = NULL;
171 TALLOC_FREE(glue->ev_link);
172 TALLOC_FREE(glue->tctx);
174 return 0;
178 * Destructor called either explicitly from
179 * pthreadpool_tevent_glue_destructor(), or indirectly
180 * when owning tevent_context is destroyed.
182 * When called from pthreadpool_tevent_glue_destructor()
183 * ev_link->glue is already NULL, so this does nothing.
185 * When called from talloc_free() of the owning
186 * tevent_context we must ensure we also remove the
187 * linked glue object from the list inside
188 * struct pthreadpool_tevent.
190 static int pthreadpool_tevent_glue_link_destructor(
191 struct pthreadpool_tevent_glue_ev_link *ev_link)
193 TALLOC_FREE(ev_link->glue);
194 return 0;
197 static int pthreadpool_tevent_register_ev(struct pthreadpool_tevent *pool,
198 struct tevent_context *ev)
200 struct pthreadpool_tevent_glue *glue = NULL;
201 struct pthreadpool_tevent_glue_ev_link *ev_link = NULL;
204 * See if this tevent_context was already registered by
205 * searching the glue object list. If so we have nothing
206 * to do here - we already have a tevent_context/tevent_threaded_context
207 * pair.
209 for (glue = pool->glue_list; glue != NULL; glue = glue->next) {
210 if (glue->ev == ev) {
211 return 0;
216 * Event context not yet registered - create a new glue
217 * object containing a tevent_context/tevent_threaded_context
218 * pair and put it on the list to remember this registration.
219 * We also need a link object to ensure the event context
220 * can't go away without us knowing about it.
222 glue = talloc_zero(pool, struct pthreadpool_tevent_glue);
223 if (glue == NULL) {
224 return ENOMEM;
226 *glue = (struct pthreadpool_tevent_glue) {
227 .pool = pool,
228 .ev = ev,
230 talloc_set_destructor(glue, pthreadpool_tevent_glue_destructor);
233 * Now allocate the link object to the event context. Note this
234 * is allocated OFF THE EVENT CONTEXT ITSELF, so if the event
235 * context is freed we are able to cleanup the glue object
236 * in the link object destructor.
239 ev_link = talloc_zero(ev, struct pthreadpool_tevent_glue_ev_link);
240 if (ev_link == NULL) {
241 TALLOC_FREE(glue);
242 return ENOMEM;
244 ev_link->glue = glue;
245 talloc_set_destructor(ev_link, pthreadpool_tevent_glue_link_destructor);
247 glue->ev_link = ev_link;
249 #ifdef HAVE_PTHREAD
250 glue->tctx = tevent_threaded_context_create(glue, ev);
251 if (glue->tctx == NULL) {
252 TALLOC_FREE(ev_link);
253 TALLOC_FREE(glue);
254 return ENOMEM;
256 #endif
258 DLIST_ADD(pool->glue_list, glue);
259 return 0;
262 static void pthreadpool_tevent_job_fn(void *private_data);
263 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
264 struct tevent_immediate *im,
265 void *private_data);
267 static int pthreadpool_tevent_job_state_destructor(
268 struct pthreadpool_tevent_job_state *state)
270 if (state->pool == NULL) {
271 return 0;
275 * We should never be called with state->req == NULL,
276 * state->pool must be cleared before the 2nd talloc_free().
278 if (state->req == NULL) {
279 abort();
283 * We need to reparent to a long term context.
285 (void)talloc_reparent(state->req, NULL, state);
286 state->req = NULL;
287 return -1;
290 struct tevent_req *pthreadpool_tevent_job_send(
291 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
292 struct pthreadpool_tevent *pool,
293 void (*fn)(void *private_data), void *private_data)
295 struct tevent_req *req;
296 struct pthreadpool_tevent_job_state *state;
297 int ret;
299 req = tevent_req_create(mem_ctx, &state,
300 struct pthreadpool_tevent_job_state);
301 if (req == NULL) {
302 return NULL;
304 state->pool = pool;
305 state->ev = ev;
306 state->req = req;
307 state->fn = fn;
308 state->private_data = private_data;
310 if (pool == NULL) {
311 tevent_req_error(req, EINVAL);
312 return tevent_req_post(req, ev);
314 if (pool->pool == NULL) {
315 tevent_req_error(req, EINVAL);
316 return tevent_req_post(req, ev);
319 state->im = tevent_create_immediate(state);
320 if (tevent_req_nomem(state->im, req)) {
321 return tevent_req_post(req, ev);
324 ret = pthreadpool_tevent_register_ev(pool, ev);
325 if (tevent_req_error(req, ret)) {
326 return tevent_req_post(req, ev);
329 ret = pthreadpool_add_job(pool->pool, 0,
330 pthreadpool_tevent_job_fn,
331 state);
332 if (tevent_req_error(req, ret)) {
333 return tevent_req_post(req, ev);
337 * Once the job is scheduled, we need to protect
338 * our memory.
340 talloc_set_destructor(state, pthreadpool_tevent_job_state_destructor);
342 DLIST_ADD_END(pool->jobs, state);
344 return req;
347 static void pthreadpool_tevent_job_fn(void *private_data)
349 struct pthreadpool_tevent_job_state *state = talloc_get_type_abort(
350 private_data, struct pthreadpool_tevent_job_state);
351 state->fn(state->private_data);
354 static int pthreadpool_tevent_job_signal(int jobid,
355 void (*job_fn)(void *private_data),
356 void *job_private_data,
357 void *private_data)
359 struct pthreadpool_tevent_job_state *state = talloc_get_type_abort(
360 job_private_data, struct pthreadpool_tevent_job_state);
361 struct tevent_threaded_context *tctx = NULL;
362 struct pthreadpool_tevent_glue *g = NULL;
364 if (state->pool == NULL) {
365 /* The pthreadpool_tevent is already gone */
366 return 0;
369 #ifdef HAVE_PTHREAD
370 for (g = state->pool->glue_list; g != NULL; g = g->next) {
371 if (g->ev == state->ev) {
372 tctx = g->tctx;
373 break;
377 if (tctx == NULL) {
378 abort();
380 #endif
382 if (tctx != NULL) {
383 /* with HAVE_PTHREAD */
384 tevent_threaded_schedule_immediate(tctx, state->im,
385 pthreadpool_tevent_job_done,
386 state);
387 } else {
388 /* without HAVE_PTHREAD */
389 tevent_schedule_immediate(state->im, state->ev,
390 pthreadpool_tevent_job_done,
391 state);
394 return 0;
397 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
398 struct tevent_immediate *im,
399 void *private_data)
401 struct pthreadpool_tevent_job_state *state = talloc_get_type_abort(
402 private_data, struct pthreadpool_tevent_job_state);
404 if (state->pool != NULL) {
405 DLIST_REMOVE(state->pool->jobs, state);
406 state->pool = NULL;
409 if (state->req == NULL) {
411 * There was a talloc_free() state->req
412 * while the job was pending,
413 * which mean we're reparented on a longterm
414 * talloc context.
416 * We just cleanup here...
418 talloc_free(state);
419 return;
422 tevent_req_done(state->req);
425 int pthreadpool_tevent_job_recv(struct tevent_req *req)
427 return tevent_req_simple_recv_unix(req);