s3:printing: Allow to run samba-bgqd as a standalone systemd service
[Samba.git] / lib / tevent / tevent_req.c
blob544f853ca9d0192c84bd479943065b8a61b552cf
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 #undef tevent_req_set_callback
31 #undef tevent_req_set_cancel_fn
32 #undef tevent_req_set_cleanup_fn
34 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
36 return talloc_asprintf(mem_ctx,
37 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
38 " state[%s (%p)] timer[%p] finish[%s]",
39 req, req->internal.create_location,
40 req->internal.state,
41 (unsigned long long)req->internal.error,
42 (unsigned long long)req->internal.error,
43 req->internal.private_type,
44 req->data,
45 req->internal.timer,
46 req->internal.finish_location
50 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
52 if (req == NULL) {
53 return talloc_strdup(mem_ctx, "tevent_req[NULL]");
56 if (!req->private_print) {
57 return tevent_req_default_print(req, mem_ctx);
60 return req->private_print(req, mem_ctx);
63 static int tevent_req_destructor(struct tevent_req *req);
65 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
66 void *pdata,
67 size_t data_size,
68 const char *type,
69 const char *location)
71 return __tevent_req_create(mem_ctx,
72 pdata,
73 data_size,
74 type,
75 NULL,
76 location);
79 struct tevent_req *__tevent_req_create(TALLOC_CTX *mem_ctx,
80 void *pdata,
81 size_t data_size,
82 const char *type,
83 const char *func,
84 const char *location)
86 struct tevent_req *req;
87 struct tevent_req *parent;
88 void **ppdata = (void **)pdata;
89 void *data;
90 size_t payload;
92 payload = sizeof(struct tevent_immediate) + data_size;
93 if (payload < sizeof(struct tevent_immediate)) {
94 /* overflow */
95 return NULL;
98 req = talloc_pooled_object(
99 mem_ctx, struct tevent_req, 2,
100 sizeof(struct tevent_immediate) + data_size);
101 if (req == NULL) {
102 return NULL;
105 *req = (struct tevent_req) {
106 .internal = {
107 .private_type = type,
108 .create_location = location,
109 .state = TEVENT_REQ_IN_PROGRESS,
110 .trigger = tevent_create_immediate(req),
114 data = talloc_zero_size(req, data_size);
117 * No need to check for req->internal.trigger!=NULL or
118 * data!=NULL, this can't fail: talloc_pooled_object has
119 * already allocated sufficient memory.
122 talloc_set_name_const(data, type);
124 req->data = data;
126 talloc_set_destructor(req, tevent_req_destructor);
128 parent = talloc_get_type(talloc_parent(mem_ctx), struct tevent_req);
129 if ((parent != NULL) && (parent->internal.profile != NULL)) {
130 bool ok = tevent_req_set_profile(req);
132 if (!ok) {
133 TALLOC_FREE(req);
134 return NULL;
136 req->internal.profile->parent = parent->internal.profile;
137 DLIST_ADD_END(parent->internal.profile->subprofiles,
138 req->internal.profile);
141 *ppdata = data;
143 /* Initially, talloc_zero_size() sets internal.call_depth to 0 */
144 if (parent != NULL) {
145 req->internal.call_depth = parent->internal.call_depth + 1;
147 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CREATE,
148 req,
149 req->internal.call_depth,
150 func);
152 return req;
155 static int tevent_req_destructor(struct tevent_req *req)
157 tevent_req_received(req);
158 return 0;
161 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
163 req->internal.finish_location = location;
164 if (req->internal.defer_callback_ev) {
165 (void)tevent_req_post(req, req->internal.defer_callback_ev);
166 req->internal.defer_callback_ev = NULL;
167 return;
169 if (req->async.fn != NULL) {
170 /* Calling back the parent code, decrement the call depth. */
171 size_t new_depth = req->internal.call_depth > 0 ?
172 req->internal.call_depth - 1 : 0;
173 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_NOTIFY_CB,
174 req,
175 new_depth,
176 req->async.fn_name);
177 req->async.fn(req);
181 static void tevent_req_cleanup(struct tevent_req *req)
183 if (req->private_cleanup.state >= req->internal.state) {
185 * Don't call the cleanup_function multiple times for the same
186 * state recursively
188 return;
191 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CLEANUP,
192 req,
193 req->internal.call_depth,
194 req->private_cleanup.fn_name);
196 if (req->private_cleanup.fn == NULL) {
197 return;
200 req->private_cleanup.state = req->internal.state;
201 req->private_cleanup.fn(req, req->internal.state);
204 static void tevent_req_finish(struct tevent_req *req,
205 enum tevent_req_state state,
206 const char *location)
208 struct tevent_req_profile *p;
210 * make sure we do not timeout after
211 * the request was already finished
213 TALLOC_FREE(req->internal.timer);
215 req->internal.state = state;
216 req->internal.finish_location = location;
218 tevent_req_cleanup(req);
220 p = req->internal.profile;
222 if (p != NULL) {
223 p->stop_location = location;
224 p->stop_time = tevent_timeval_current();
225 p->state = state;
226 p->user_error = req->internal.error;
228 if (p->parent != NULL) {
229 talloc_steal(p->parent, p);
230 req->internal.profile = NULL;
234 _tevent_req_notify_callback(req, location);
237 void _tevent_req_done(struct tevent_req *req,
238 const char *location)
240 tevent_req_finish(req, TEVENT_REQ_DONE, location);
243 bool _tevent_req_error(struct tevent_req *req,
244 uint64_t error,
245 const char *location)
247 if (error == 0) {
248 return false;
251 req->internal.error = error;
252 tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
253 return true;
256 void _tevent_req_oom(struct tevent_req *req, const char *location)
258 tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
261 bool _tevent_req_nomem(const void *p,
262 struct tevent_req *req,
263 const char *location)
265 if (p != NULL) {
266 return false;
268 _tevent_req_oom(req, location);
269 return true;
273 * @internal
275 * @brief Immediate event callback.
277 * @param[in] ev The event context to use.
279 * @param[in] im The immediate event.
281 * @param[in] priv The async request to be finished.
283 static void tevent_req_trigger(struct tevent_context *ev,
284 struct tevent_immediate *im,
285 void *private_data)
287 struct tevent_req *req =
288 talloc_get_type_abort(private_data,
289 struct tevent_req);
291 tevent_req_finish(req, req->internal.state,
292 req->internal.finish_location);
295 struct tevent_req *tevent_req_post(struct tevent_req *req,
296 struct tevent_context *ev)
298 tevent_schedule_immediate(req->internal.trigger,
299 ev, tevent_req_trigger, req);
300 return req;
303 void tevent_req_defer_callback(struct tevent_req *req,
304 struct tevent_context *ev)
306 req->internal.defer_callback_ev = ev;
309 bool tevent_req_is_in_progress(struct tevent_req *req)
311 if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
312 return true;
315 return false;
318 void tevent_req_received(struct tevent_req *req)
320 talloc_set_destructor(req, NULL);
322 req->private_print = NULL;
323 req->private_cancel.fn = NULL;
324 req->private_cancel.fn_name = NULL;
326 TALLOC_FREE(req->internal.trigger);
327 TALLOC_FREE(req->internal.timer);
329 req->internal.state = TEVENT_REQ_RECEIVED;
331 tevent_req_cleanup(req);
333 TALLOC_FREE(req->data);
336 bool tevent_req_poll(struct tevent_req *req,
337 struct tevent_context *ev)
339 while (tevent_req_is_in_progress(req)) {
340 int ret;
342 ret = tevent_loop_once(ev);
343 if (ret != 0) {
344 return false;
348 return true;
351 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
352 uint64_t *error)
354 if (req->internal.state == TEVENT_REQ_DONE) {
355 return false;
357 if (req->internal.state == TEVENT_REQ_USER_ERROR) {
358 *error = req->internal.error;
360 *state = req->internal.state;
361 return true;
364 static void tevent_req_timedout(struct tevent_context *ev,
365 struct tevent_timer *te,
366 struct timeval now,
367 void *private_data)
369 struct tevent_req *req =
370 talloc_get_type_abort(private_data,
371 struct tevent_req);
373 TALLOC_FREE(req->internal.timer);
375 tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
378 bool tevent_req_set_endtime(struct tevent_req *req,
379 struct tevent_context *ev,
380 struct timeval endtime)
382 TALLOC_FREE(req->internal.timer);
384 req->internal.timer = tevent_add_timer(ev, req, endtime,
385 tevent_req_timedout,
386 req);
387 if (tevent_req_nomem(req->internal.timer, req)) {
388 return false;
391 return true;
394 void tevent_req_reset_endtime(struct tevent_req *req)
396 TALLOC_FREE(req->internal.timer);
399 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
401 return _tevent_req_set_callback(req, fn, NULL, pvt);
404 void _tevent_req_set_callback(struct tevent_req *req,
405 tevent_req_fn fn,
406 const char *fn_name,
407 void *pvt)
409 req->async.fn = fn;
410 req->async.fn_name = fn_name;
411 req->async.private_data = pvt;
414 void *_tevent_req_callback_data(struct tevent_req *req)
416 return req->async.private_data;
419 void *_tevent_req_data(struct tevent_req *req)
421 return req->data;
424 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
426 req->private_print = fn;
429 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
431 _tevent_req_set_cancel_fn(req, fn, NULL);
434 void _tevent_req_set_cancel_fn(struct tevent_req *req,
435 tevent_req_cancel_fn fn,
436 const char *fn_name)
438 req->private_cancel.fn = fn;
439 req->private_cancel.fn_name = fn != NULL ? fn_name : NULL;
442 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
444 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CANCEL,
445 req,
446 req->internal.call_depth,
447 req->private_cancel.fn_name);
449 if (req->private_cancel.fn == NULL) {
450 return false;
453 return req->private_cancel.fn(req);
456 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
458 _tevent_req_set_cleanup_fn(req, fn, NULL);
461 void _tevent_req_set_cleanup_fn(struct tevent_req *req,
462 tevent_req_cleanup_fn fn,
463 const char *fn_name)
465 req->private_cleanup.state = req->internal.state;
466 req->private_cleanup.fn = fn;
467 req->private_cleanup.fn_name = fn != NULL ? fn_name : NULL;
470 static int tevent_req_profile_destructor(struct tevent_req_profile *p);
472 bool tevent_req_set_profile(struct tevent_req *req)
474 struct tevent_req_profile *p;
476 if (req->internal.profile != NULL) {
477 tevent_req_error(req, EINVAL);
478 return false;
481 p = tevent_req_profile_create(req);
483 if (tevent_req_nomem(p, req)) {
484 return false;
487 p->req_name = talloc_get_name(req->data);
488 p->start_location = req->internal.create_location;
489 p->start_time = tevent_timeval_current();
491 req->internal.profile = p;
493 return true;
496 static int tevent_req_profile_destructor(struct tevent_req_profile *p)
498 if (p->parent != NULL) {
499 DLIST_REMOVE(p->parent->subprofiles, p);
500 p->parent = NULL;
503 while (p->subprofiles != NULL) {
504 p->subprofiles->parent = NULL;
505 DLIST_REMOVE(p->subprofiles, p->subprofiles);
508 return 0;
511 struct tevent_req_profile *tevent_req_move_profile(struct tevent_req *req,
512 TALLOC_CTX *mem_ctx)
514 return talloc_move(mem_ctx, &req->internal.profile);
517 const struct tevent_req_profile *tevent_req_get_profile(
518 struct tevent_req *req)
520 return req->internal.profile;
523 void tevent_req_profile_get_name(const struct tevent_req_profile *profile,
524 const char **req_name)
526 if (req_name != NULL) {
527 *req_name = profile->req_name;
531 void tevent_req_profile_get_start(const struct tevent_req_profile *profile,
532 const char **start_location,
533 struct timeval *start_time)
535 if (start_location != NULL) {
536 *start_location = profile->start_location;
538 if (start_time != NULL) {
539 *start_time = profile->start_time;
543 void tevent_req_profile_get_stop(const struct tevent_req_profile *profile,
544 const char **stop_location,
545 struct timeval *stop_time)
547 if (stop_location != NULL) {
548 *stop_location = profile->stop_location;
550 if (stop_time != NULL) {
551 *stop_time = profile->stop_time;
555 void tevent_req_profile_get_status(const struct tevent_req_profile *profile,
556 pid_t *pid,
557 enum tevent_req_state *state,
558 uint64_t *user_error)
560 if (pid != NULL) {
561 *pid = profile->pid;
563 if (state != NULL) {
564 *state = profile->state;
566 if (user_error != NULL) {
567 *user_error = profile->user_error;
571 const struct tevent_req_profile *tevent_req_profile_get_subprofiles(
572 const struct tevent_req_profile *profile)
574 return profile->subprofiles;
577 const struct tevent_req_profile *tevent_req_profile_next(
578 const struct tevent_req_profile *profile)
580 return profile->next;
583 struct tevent_req_profile *tevent_req_profile_create(TALLOC_CTX *mem_ctx)
585 struct tevent_req_profile *result;
587 result = talloc_zero(mem_ctx, struct tevent_req_profile);
588 if (result == NULL) {
589 return NULL;
591 talloc_set_destructor(result, tevent_req_profile_destructor);
593 return result;
596 bool tevent_req_profile_set_name(struct tevent_req_profile *profile,
597 const char *req_name)
599 profile->req_name = talloc_strdup(profile, req_name);
600 return (profile->req_name != NULL);
603 bool tevent_req_profile_set_start(struct tevent_req_profile *profile,
604 const char *start_location,
605 struct timeval start_time)
607 profile->start_time = start_time;
609 profile->start_location = talloc_strdup(profile, start_location);
610 return (profile->start_location != NULL);
613 bool tevent_req_profile_set_stop(struct tevent_req_profile *profile,
614 const char *stop_location,
615 struct timeval stop_time)
617 profile->stop_time = stop_time;
619 profile->stop_location = talloc_strdup(profile, stop_location);
620 return (profile->stop_location != NULL);
623 void tevent_req_profile_set_status(struct tevent_req_profile *profile,
624 pid_t pid,
625 enum tevent_req_state state,
626 uint64_t user_error)
628 profile->pid = pid;
629 profile->state = state;
630 profile->user_error = user_error;
633 void tevent_req_profile_append_sub(struct tevent_req_profile *parent_profile,
634 struct tevent_req_profile **sub_profile)
636 struct tevent_req_profile *sub;
638 sub = talloc_move(parent_profile, sub_profile);
640 sub->parent = parent_profile;
641 DLIST_ADD_END(parent_profile->subprofiles, sub);