tevent: Flow: store callback function name in tevent_req
[Samba.git] / lib / tevent / tevent_req.c
bloba677f437cdd8932e342870739b5d722b82bda559
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 #undef tevent_req_set_callback
32 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
34 return talloc_asprintf(mem_ctx,
35 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
36 " state[%s (%p)] timer[%p] finish[%s]",
37 req, req->internal.create_location,
38 req->internal.state,
39 (unsigned long long)req->internal.error,
40 (unsigned long long)req->internal.error,
41 req->internal.private_type,
42 req->data,
43 req->internal.timer,
44 req->internal.finish_location
48 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
50 if (req == NULL) {
51 return talloc_strdup(mem_ctx, "tevent_req[NULL]");
54 if (!req->private_print) {
55 return tevent_req_default_print(req, mem_ctx);
58 return req->private_print(req, mem_ctx);
61 static int tevent_req_destructor(struct tevent_req *req);
63 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
64 void *pdata,
65 size_t data_size,
66 const char *type,
67 const char *location)
69 return __tevent_req_create(mem_ctx,
70 pdata,
71 data_size,
72 type,
73 NULL,
74 location);
77 struct tevent_req *__tevent_req_create(TALLOC_CTX *mem_ctx,
78 void *pdata,
79 size_t data_size,
80 const char *type,
81 const char *func,
82 const char *location)
84 struct tevent_req *req;
85 struct tevent_req *parent;
86 void **ppdata = (void **)pdata;
87 void *data;
88 size_t payload;
90 payload = sizeof(struct tevent_immediate) + data_size;
91 if (payload < sizeof(struct tevent_immediate)) {
92 /* overflow */
93 return NULL;
96 req = talloc_pooled_object(
97 mem_ctx, struct tevent_req, 2,
98 sizeof(struct tevent_immediate) + data_size);
99 if (req == NULL) {
100 return NULL;
103 *req = (struct tevent_req) {
104 .internal = {
105 .private_type = type,
106 .create_location = location,
107 .state = TEVENT_REQ_IN_PROGRESS,
108 .trigger = tevent_create_immediate(req),
112 data = talloc_zero_size(req, data_size);
115 * No need to check for req->internal.trigger!=NULL or
116 * data!=NULL, this can't fail: talloc_pooled_object has
117 * already allocated sufficient memory.
120 talloc_set_name_const(data, type);
122 req->data = data;
124 talloc_set_destructor(req, tevent_req_destructor);
126 parent = talloc_get_type(talloc_parent(mem_ctx), struct tevent_req);
127 if ((parent != NULL) && (parent->internal.profile != NULL)) {
128 bool ok = tevent_req_set_profile(req);
130 if (!ok) {
131 TALLOC_FREE(req);
132 return NULL;
134 req->internal.profile->parent = parent->internal.profile;
135 DLIST_ADD_END(parent->internal.profile->subprofiles,
136 req->internal.profile);
139 *ppdata = data;
141 /* Initially, talloc_zero_size() sets internal.call_depth to 0 */
142 if (parent != NULL && parent->internal.call_depth > 0) {
143 req->internal.call_depth = parent->internal.call_depth + 1;
144 tevent_thread_call_depth_set(req->internal.call_depth);
147 return req;
150 static int tevent_req_destructor(struct tevent_req *req)
152 tevent_req_received(req);
153 return 0;
156 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
158 req->internal.finish_location = location;
159 if (req->internal.defer_callback_ev) {
160 (void)tevent_req_post(req, req->internal.defer_callback_ev);
161 req->internal.defer_callback_ev = NULL;
162 return;
164 if (req->async.fn != NULL) {
165 /* Calling back the parent code, decrement the call depth. */
166 tevent_thread_call_depth_set(req->internal.call_depth > 0 ?
167 req->internal.call_depth - 1 : 0);
168 req->async.fn(req);
172 static void tevent_req_cleanup(struct tevent_req *req)
174 if (req->private_cleanup.fn == NULL) {
175 return;
178 if (req->private_cleanup.state >= req->internal.state) {
180 * Don't call the cleanup_function multiple times for the same
181 * state recursively
183 return;
186 req->private_cleanup.state = req->internal.state;
187 req->private_cleanup.fn(req, req->internal.state);
190 static void tevent_req_finish(struct tevent_req *req,
191 enum tevent_req_state state,
192 const char *location)
194 struct tevent_req_profile *p;
196 * make sure we do not timeout after
197 * the request was already finished
199 TALLOC_FREE(req->internal.timer);
201 req->internal.state = state;
202 req->internal.finish_location = location;
204 tevent_req_cleanup(req);
206 p = req->internal.profile;
208 if (p != NULL) {
209 p->stop_location = location;
210 p->stop_time = tevent_timeval_current();
211 p->state = state;
212 p->user_error = req->internal.error;
214 if (p->parent != NULL) {
215 talloc_steal(p->parent, p);
216 req->internal.profile = NULL;
220 _tevent_req_notify_callback(req, location);
223 void _tevent_req_done(struct tevent_req *req,
224 const char *location)
226 tevent_req_finish(req, TEVENT_REQ_DONE, location);
229 bool _tevent_req_error(struct tevent_req *req,
230 uint64_t error,
231 const char *location)
233 if (error == 0) {
234 return false;
237 req->internal.error = error;
238 tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
239 return true;
242 void _tevent_req_oom(struct tevent_req *req, const char *location)
244 tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
247 bool _tevent_req_nomem(const void *p,
248 struct tevent_req *req,
249 const char *location)
251 if (p != NULL) {
252 return false;
254 _tevent_req_oom(req, location);
255 return true;
259 * @internal
261 * @brief Immediate event callback.
263 * @param[in] ev The event context to use.
265 * @param[in] im The immediate event.
267 * @param[in] priv The async request to be finished.
269 static void tevent_req_trigger(struct tevent_context *ev,
270 struct tevent_immediate *im,
271 void *private_data)
273 struct tevent_req *req =
274 talloc_get_type_abort(private_data,
275 struct tevent_req);
277 tevent_req_finish(req, req->internal.state,
278 req->internal.finish_location);
281 struct tevent_req *tevent_req_post(struct tevent_req *req,
282 struct tevent_context *ev)
284 tevent_schedule_immediate(req->internal.trigger,
285 ev, tevent_req_trigger, req);
286 return req;
289 void tevent_req_defer_callback(struct tevent_req *req,
290 struct tevent_context *ev)
292 req->internal.defer_callback_ev = ev;
295 bool tevent_req_is_in_progress(struct tevent_req *req)
297 if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
298 return true;
301 return false;
304 void tevent_req_received(struct tevent_req *req)
306 talloc_set_destructor(req, NULL);
308 req->private_print = NULL;
309 req->private_cancel = NULL;
311 TALLOC_FREE(req->internal.trigger);
312 TALLOC_FREE(req->internal.timer);
314 req->internal.state = TEVENT_REQ_RECEIVED;
316 tevent_req_cleanup(req);
318 TALLOC_FREE(req->data);
321 bool tevent_req_poll(struct tevent_req *req,
322 struct tevent_context *ev)
324 while (tevent_req_is_in_progress(req)) {
325 int ret;
327 ret = tevent_loop_once(ev);
328 if (ret != 0) {
329 return false;
333 return true;
336 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
337 uint64_t *error)
339 if (req->internal.state == TEVENT_REQ_DONE) {
340 return false;
342 if (req->internal.state == TEVENT_REQ_USER_ERROR) {
343 *error = req->internal.error;
345 *state = req->internal.state;
346 return true;
349 static void tevent_req_timedout(struct tevent_context *ev,
350 struct tevent_timer *te,
351 struct timeval now,
352 void *private_data)
354 struct tevent_req *req =
355 talloc_get_type_abort(private_data,
356 struct tevent_req);
358 TALLOC_FREE(req->internal.timer);
360 tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
363 bool tevent_req_set_endtime(struct tevent_req *req,
364 struct tevent_context *ev,
365 struct timeval endtime)
367 TALLOC_FREE(req->internal.timer);
369 req->internal.timer = tevent_add_timer(ev, req, endtime,
370 tevent_req_timedout,
371 req);
372 if (tevent_req_nomem(req->internal.timer, req)) {
373 return false;
376 return true;
379 void tevent_req_reset_endtime(struct tevent_req *req)
381 TALLOC_FREE(req->internal.timer);
384 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
386 return _tevent_req_set_callback(req, fn, NULL, pvt);
389 void _tevent_req_set_callback(struct tevent_req *req,
390 tevent_req_fn fn,
391 const char *fn_name,
392 void *pvt)
394 req->async.fn = fn;
395 req->async.fn_name = fn_name;
396 req->async.private_data = pvt;
399 void *_tevent_req_callback_data(struct tevent_req *req)
401 return req->async.private_data;
404 void *_tevent_req_data(struct tevent_req *req)
406 return req->data;
409 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
411 req->private_print = fn;
414 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
416 req->private_cancel = fn;
419 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
421 if (req->private_cancel == NULL) {
422 return false;
425 return req->private_cancel(req);
428 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
430 req->private_cleanup.state = req->internal.state;
431 req->private_cleanup.fn = fn;
434 static int tevent_req_profile_destructor(struct tevent_req_profile *p);
436 bool tevent_req_set_profile(struct tevent_req *req)
438 struct tevent_req_profile *p;
440 if (req->internal.profile != NULL) {
441 tevent_req_error(req, EINVAL);
442 return false;
445 p = tevent_req_profile_create(req);
447 if (tevent_req_nomem(p, req)) {
448 return false;
451 p->req_name = talloc_get_name(req->data);
452 p->start_location = req->internal.create_location;
453 p->start_time = tevent_timeval_current();
455 req->internal.profile = p;
457 return true;
460 static int tevent_req_profile_destructor(struct tevent_req_profile *p)
462 if (p->parent != NULL) {
463 DLIST_REMOVE(p->parent->subprofiles, p);
464 p->parent = NULL;
467 while (p->subprofiles != NULL) {
468 p->subprofiles->parent = NULL;
469 DLIST_REMOVE(p->subprofiles, p->subprofiles);
472 return 0;
475 struct tevent_req_profile *tevent_req_move_profile(struct tevent_req *req,
476 TALLOC_CTX *mem_ctx)
478 return talloc_move(mem_ctx, &req->internal.profile);
481 const struct tevent_req_profile *tevent_req_get_profile(
482 struct tevent_req *req)
484 return req->internal.profile;
487 void tevent_req_profile_get_name(const struct tevent_req_profile *profile,
488 const char **req_name)
490 if (req_name != NULL) {
491 *req_name = profile->req_name;
495 void tevent_req_profile_get_start(const struct tevent_req_profile *profile,
496 const char **start_location,
497 struct timeval *start_time)
499 if (start_location != NULL) {
500 *start_location = profile->start_location;
502 if (start_time != NULL) {
503 *start_time = profile->start_time;
507 void tevent_req_profile_get_stop(const struct tevent_req_profile *profile,
508 const char **stop_location,
509 struct timeval *stop_time)
511 if (stop_location != NULL) {
512 *stop_location = profile->stop_location;
514 if (stop_time != NULL) {
515 *stop_time = profile->stop_time;
519 void tevent_req_profile_get_status(const struct tevent_req_profile *profile,
520 pid_t *pid,
521 enum tevent_req_state *state,
522 uint64_t *user_error)
524 if (pid != NULL) {
525 *pid = profile->pid;
527 if (state != NULL) {
528 *state = profile->state;
530 if (user_error != NULL) {
531 *user_error = profile->user_error;
535 const struct tevent_req_profile *tevent_req_profile_get_subprofiles(
536 const struct tevent_req_profile *profile)
538 return profile->subprofiles;
541 const struct tevent_req_profile *tevent_req_profile_next(
542 const struct tevent_req_profile *profile)
544 return profile->next;
547 struct tevent_req_profile *tevent_req_profile_create(TALLOC_CTX *mem_ctx)
549 struct tevent_req_profile *result;
551 result = talloc_zero(mem_ctx, struct tevent_req_profile);
552 if (result == NULL) {
553 return NULL;
555 talloc_set_destructor(result, tevent_req_profile_destructor);
557 return result;
560 bool tevent_req_profile_set_name(struct tevent_req_profile *profile,
561 const char *req_name)
563 profile->req_name = talloc_strdup(profile, req_name);
564 return (profile->req_name != NULL);
567 bool tevent_req_profile_set_start(struct tevent_req_profile *profile,
568 const char *start_location,
569 struct timeval start_time)
571 profile->start_time = start_time;
573 profile->start_location = talloc_strdup(profile, start_location);
574 return (profile->start_location != NULL);
577 bool tevent_req_profile_set_stop(struct tevent_req_profile *profile,
578 const char *stop_location,
579 struct timeval stop_time)
581 profile->stop_time = stop_time;
583 profile->stop_location = talloc_strdup(profile, stop_location);
584 return (profile->stop_location != NULL);
587 void tevent_req_profile_set_status(struct tevent_req_profile *profile,
588 pid_t pid,
589 enum tevent_req_state state,
590 uint64_t user_error)
592 profile->pid = pid;
593 profile->state = state;
594 profile->user_error = user_error;
597 void tevent_req_profile_append_sub(struct tevent_req_profile *parent_profile,
598 struct tevent_req_profile **sub_profile)
600 struct tevent_req_profile *sub;
602 sub = talloc_move(parent_profile, sub_profile);
604 sub->parent = parent_profile;
605 DLIST_ADD_END(parent_profile->subprofiles, sub);