s3:libsmb: Honor disable_netbios option in smbsock_connect_send
[Samba.git] / lib / tevent / tevent_req.c
blob76e27b8f7e9a64b8d7889ea18974638e34430ba1
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
32 return talloc_asprintf(mem_ctx,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p] finish[%s]",
35 req, req->internal.create_location,
36 req->internal.state,
37 (unsigned long long)req->internal.error,
38 (unsigned long long)req->internal.error,
39 req->internal.private_type,
40 req->data,
41 req->internal.timer,
42 req->internal.finish_location
46 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
48 if (req == NULL) {
49 return talloc_strdup(mem_ctx, "tevent_req[NULL]");
52 if (!req->private_print) {
53 return tevent_req_default_print(req, mem_ctx);
56 return req->private_print(req, mem_ctx);
59 static int tevent_req_destructor(struct tevent_req *req);
61 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
62 void *pdata,
63 size_t data_size,
64 const char *type,
65 const char *location)
67 struct tevent_req *req;
68 struct tevent_req *parent;
69 void **ppdata = (void **)pdata;
70 void *data;
71 size_t payload;
73 payload = sizeof(struct tevent_immediate) + data_size;
74 if (payload < sizeof(struct tevent_immediate)) {
75 /* overflow */
76 return NULL;
79 req = talloc_pooled_object(
80 mem_ctx, struct tevent_req, 2,
81 sizeof(struct tevent_immediate) + data_size);
82 if (req == NULL) {
83 return NULL;
86 *req = (struct tevent_req) {
87 .internal.private_type = type,
88 .internal.create_location = location,
89 .internal.state = TEVENT_REQ_IN_PROGRESS,
90 .internal.trigger = tevent_create_immediate(req)
93 data = talloc_zero_size(req, data_size);
96 * No need to check for req->internal.trigger!=NULL or
97 * data!=NULL, this can't fail: talloc_pooled_object has
98 * already allocated sufficient memory.
101 talloc_set_name_const(data, type);
103 req->data = data;
105 talloc_set_destructor(req, tevent_req_destructor);
107 parent = talloc_get_type(talloc_parent(mem_ctx), struct tevent_req);
108 if ((parent != NULL) && (parent->internal.profile != NULL)) {
109 bool ok = tevent_req_set_profile(req);
111 if (!ok) {
112 TALLOC_FREE(req);
113 return NULL;
115 req->internal.profile->parent = parent->internal.profile;
116 DLIST_ADD_END(parent->internal.profile->subprofiles,
117 req->internal.profile);
120 *ppdata = data;
121 return req;
124 static int tevent_req_destructor(struct tevent_req *req)
126 tevent_req_received(req);
127 return 0;
130 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
132 req->internal.finish_location = location;
133 if (req->internal.defer_callback_ev) {
134 (void)tevent_req_post(req, req->internal.defer_callback_ev);
135 req->internal.defer_callback_ev = NULL;
136 return;
138 if (req->async.fn != NULL) {
139 req->async.fn(req);
143 static void tevent_req_cleanup(struct tevent_req *req)
145 if (req->private_cleanup.fn == NULL) {
146 return;
149 if (req->private_cleanup.state >= req->internal.state) {
151 * Don't call the cleanup_function multiple times for the same
152 * state recursively
154 return;
157 req->private_cleanup.state = req->internal.state;
158 req->private_cleanup.fn(req, req->internal.state);
161 static void tevent_req_finish(struct tevent_req *req,
162 enum tevent_req_state state,
163 const char *location)
165 struct tevent_req_profile *p;
167 * make sure we do not timeout after
168 * the request was already finished
170 TALLOC_FREE(req->internal.timer);
172 req->internal.state = state;
173 req->internal.finish_location = location;
175 tevent_req_cleanup(req);
177 p = req->internal.profile;
179 if (p != NULL) {
180 p->stop_location = location;
181 p->stop_time = tevent_timeval_current();
182 p->state = state;
183 p->user_error = req->internal.error;
185 if (p->parent != NULL) {
186 talloc_steal(p->parent, p);
187 req->internal.profile = NULL;
191 _tevent_req_notify_callback(req, location);
194 void _tevent_req_done(struct tevent_req *req,
195 const char *location)
197 tevent_req_finish(req, TEVENT_REQ_DONE, location);
200 bool _tevent_req_error(struct tevent_req *req,
201 uint64_t error,
202 const char *location)
204 if (error == 0) {
205 return false;
208 req->internal.error = error;
209 tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
210 return true;
213 void _tevent_req_oom(struct tevent_req *req, const char *location)
215 tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
218 bool _tevent_req_nomem(const void *p,
219 struct tevent_req *req,
220 const char *location)
222 if (p != NULL) {
223 return false;
225 _tevent_req_oom(req, location);
226 return true;
230 * @internal
232 * @brief Immediate event callback.
234 * @param[in] ev The event context to use.
236 * @param[in] im The immediate event.
238 * @param[in] priv The async request to be finished.
240 static void tevent_req_trigger(struct tevent_context *ev,
241 struct tevent_immediate *im,
242 void *private_data)
244 struct tevent_req *req =
245 talloc_get_type_abort(private_data,
246 struct tevent_req);
248 tevent_req_finish(req, req->internal.state,
249 req->internal.finish_location);
252 struct tevent_req *tevent_req_post(struct tevent_req *req,
253 struct tevent_context *ev)
255 tevent_schedule_immediate(req->internal.trigger,
256 ev, tevent_req_trigger, req);
257 return req;
260 void tevent_req_defer_callback(struct tevent_req *req,
261 struct tevent_context *ev)
263 req->internal.defer_callback_ev = ev;
266 bool tevent_req_is_in_progress(struct tevent_req *req)
268 if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
269 return true;
272 return false;
275 void tevent_req_received(struct tevent_req *req)
277 talloc_set_destructor(req, NULL);
279 req->private_print = NULL;
280 req->private_cancel = NULL;
282 TALLOC_FREE(req->internal.trigger);
283 TALLOC_FREE(req->internal.timer);
285 req->internal.state = TEVENT_REQ_RECEIVED;
287 tevent_req_cleanup(req);
289 TALLOC_FREE(req->data);
292 bool tevent_req_poll(struct tevent_req *req,
293 struct tevent_context *ev)
295 while (tevent_req_is_in_progress(req)) {
296 int ret;
298 ret = tevent_loop_once(ev);
299 if (ret != 0) {
300 return false;
304 return true;
307 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
308 uint64_t *error)
310 if (req->internal.state == TEVENT_REQ_DONE) {
311 return false;
313 if (req->internal.state == TEVENT_REQ_USER_ERROR) {
314 *error = req->internal.error;
316 *state = req->internal.state;
317 return true;
320 static void tevent_req_timedout(struct tevent_context *ev,
321 struct tevent_timer *te,
322 struct timeval now,
323 void *private_data)
325 struct tevent_req *req =
326 talloc_get_type_abort(private_data,
327 struct tevent_req);
329 TALLOC_FREE(req->internal.timer);
331 tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
334 bool tevent_req_set_endtime(struct tevent_req *req,
335 struct tevent_context *ev,
336 struct timeval endtime)
338 TALLOC_FREE(req->internal.timer);
340 req->internal.timer = tevent_add_timer(ev, req, endtime,
341 tevent_req_timedout,
342 req);
343 if (tevent_req_nomem(req->internal.timer, req)) {
344 return false;
347 return true;
350 void tevent_req_reset_endtime(struct tevent_req *req)
352 TALLOC_FREE(req->internal.timer);
355 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
357 req->async.fn = fn;
358 req->async.private_data = pvt;
361 void *_tevent_req_callback_data(struct tevent_req *req)
363 return req->async.private_data;
366 void *_tevent_req_data(struct tevent_req *req)
368 return req->data;
371 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
373 req->private_print = fn;
376 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
378 req->private_cancel = fn;
381 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
383 if (req->private_cancel == NULL) {
384 return false;
387 return req->private_cancel(req);
390 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
392 req->private_cleanup.state = req->internal.state;
393 req->private_cleanup.fn = fn;
396 static int tevent_req_profile_destructor(struct tevent_req_profile *p);
398 bool tevent_req_set_profile(struct tevent_req *req)
400 struct tevent_req_profile *p;
402 if (req->internal.profile != NULL) {
403 tevent_req_error(req, EINVAL);
404 return false;
407 p = tevent_req_profile_create(req);
409 if (tevent_req_nomem(p, req)) {
410 return false;
413 p->req_name = talloc_get_name(req->data);
414 p->start_location = req->internal.create_location;
415 p->start_time = tevent_timeval_current();
417 req->internal.profile = p;
419 return true;
422 static int tevent_req_profile_destructor(struct tevent_req_profile *p)
424 if (p->parent != NULL) {
425 DLIST_REMOVE(p->parent->subprofiles, p);
426 p->parent = NULL;
429 while (p->subprofiles != NULL) {
430 p->subprofiles->parent = NULL;
431 DLIST_REMOVE(p->subprofiles, p->subprofiles);
434 return 0;
437 struct tevent_req_profile *tevent_req_move_profile(struct tevent_req *req,
438 TALLOC_CTX *mem_ctx)
440 return talloc_move(mem_ctx, &req->internal.profile);
443 const struct tevent_req_profile *tevent_req_get_profile(
444 struct tevent_req *req)
446 return req->internal.profile;
449 void tevent_req_profile_get_name(const struct tevent_req_profile *profile,
450 const char **req_name)
452 if (req_name != NULL) {
453 *req_name = profile->req_name;
457 void tevent_req_profile_get_start(const struct tevent_req_profile *profile,
458 const char **start_location,
459 struct timeval *start_time)
461 if (start_location != NULL) {
462 *start_location = profile->start_location;
464 if (start_time != NULL) {
465 *start_time = profile->start_time;
469 void tevent_req_profile_get_stop(const struct tevent_req_profile *profile,
470 const char **stop_location,
471 struct timeval *stop_time)
473 if (stop_location != NULL) {
474 *stop_location = profile->stop_location;
476 if (stop_time != NULL) {
477 *stop_time = profile->stop_time;
481 void tevent_req_profile_get_status(const struct tevent_req_profile *profile,
482 pid_t *pid,
483 enum tevent_req_state *state,
484 uint64_t *user_error)
486 if (pid != NULL) {
487 *pid = profile->pid;
489 if (state != NULL) {
490 *state = profile->state;
492 if (user_error != NULL) {
493 *user_error = profile->user_error;
497 const struct tevent_req_profile *tevent_req_profile_get_subprofiles(
498 const struct tevent_req_profile *profile)
500 return profile->subprofiles;
503 const struct tevent_req_profile *tevent_req_profile_next(
504 const struct tevent_req_profile *profile)
506 return profile->next;
509 struct tevent_req_profile *tevent_req_profile_create(TALLOC_CTX *mem_ctx)
511 struct tevent_req_profile *result;
513 result = talloc_zero(mem_ctx, struct tevent_req_profile);
514 if (result == NULL) {
515 return NULL;
517 talloc_set_destructor(result, tevent_req_profile_destructor);
519 return result;
522 bool tevent_req_profile_set_name(struct tevent_req_profile *profile,
523 const char *req_name)
525 profile->req_name = talloc_strdup(profile, req_name);
526 return (profile->req_name != NULL);
529 bool tevent_req_profile_set_start(struct tevent_req_profile *profile,
530 const char *start_location,
531 struct timeval start_time)
533 profile->start_time = start_time;
535 profile->start_location = talloc_strdup(profile, start_location);
536 return (profile->start_location != NULL);
539 bool tevent_req_profile_set_stop(struct tevent_req_profile *profile,
540 const char *stop_location,
541 struct timeval stop_time)
543 profile->stop_time = stop_time;
545 profile->stop_location = talloc_strdup(profile, stop_location);
546 return (profile->stop_location != NULL);
549 void tevent_req_profile_set_status(struct tevent_req_profile *profile,
550 pid_t pid,
551 enum tevent_req_state state,
552 uint64_t user_error)
554 profile->pid = pid;
555 profile->state = state;
556 profile->user_error = user_error;
559 void tevent_req_profile_append_sub(struct tevent_req_profile *parent_profile,
560 struct tevent_req_profile **sub_profile)
562 struct tevent_req_profile *sub;
564 sub = talloc_move(parent_profile, sub_profile);
566 sub->parent = parent_profile;
567 DLIST_ADD_END(parent_profile->subprofiles, sub);