2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
32 return talloc_asprintf(mem_ctx
,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p] finish[%s]",
35 req
, req
->internal
.create_location
,
37 (unsigned long long)req
->internal
.error
,
38 (unsigned long long)req
->internal
.error
,
39 req
->internal
.private_type
,
42 req
->internal
.finish_location
46 char *tevent_req_print(TALLOC_CTX
*mem_ctx
, struct tevent_req
*req
)
49 return talloc_strdup(mem_ctx
, "tevent_req[NULL]");
52 if (!req
->private_print
) {
53 return tevent_req_default_print(req
, mem_ctx
);
56 return req
->private_print(req
, mem_ctx
);
59 static int tevent_req_destructor(struct tevent_req
*req
);
61 struct tevent_req
*_tevent_req_create(TALLOC_CTX
*mem_ctx
,
67 struct tevent_req
*req
;
68 struct tevent_req
*parent
;
69 void **ppdata
= (void **)pdata
;
73 payload
= sizeof(struct tevent_immediate
) + data_size
;
74 if (payload
< sizeof(struct tevent_immediate
)) {
79 req
= talloc_pooled_object(
80 mem_ctx
, struct tevent_req
, 2,
81 sizeof(struct tevent_immediate
) + data_size
);
86 *req
= (struct tevent_req
) {
87 .internal
.private_type
= type
,
88 .internal
.create_location
= location
,
89 .internal
.state
= TEVENT_REQ_IN_PROGRESS
,
90 .internal
.trigger
= tevent_create_immediate(req
)
93 data
= talloc_zero_size(req
, data_size
);
96 * No need to check for req->internal.trigger!=NULL or
97 * data!=NULL, this can't fail: talloc_pooled_object has
98 * already allocated sufficient memory.
101 talloc_set_name_const(data
, type
);
105 talloc_set_destructor(req
, tevent_req_destructor
);
107 parent
= talloc_get_type(talloc_parent(mem_ctx
), struct tevent_req
);
108 if ((parent
!= NULL
) && (parent
->internal
.profile
!= NULL
)) {
109 bool ok
= tevent_req_set_profile(req
);
115 req
->internal
.profile
->parent
= parent
->internal
.profile
;
116 DLIST_ADD_END(parent
->internal
.profile
->subprofiles
,
117 req
->internal
.profile
);
124 static int tevent_req_destructor(struct tevent_req
*req
)
126 tevent_req_received(req
);
130 void _tevent_req_notify_callback(struct tevent_req
*req
, const char *location
)
132 req
->internal
.finish_location
= location
;
133 if (req
->internal
.defer_callback_ev
) {
134 (void)tevent_req_post(req
, req
->internal
.defer_callback_ev
);
135 req
->internal
.defer_callback_ev
= NULL
;
138 if (req
->async
.fn
!= NULL
) {
143 static void tevent_req_cleanup(struct tevent_req
*req
)
145 if (req
->private_cleanup
.fn
== NULL
) {
149 if (req
->private_cleanup
.state
>= req
->internal
.state
) {
151 * Don't call the cleanup_function multiple times for the same
157 req
->private_cleanup
.state
= req
->internal
.state
;
158 req
->private_cleanup
.fn(req
, req
->internal
.state
);
161 static void tevent_req_finish(struct tevent_req
*req
,
162 enum tevent_req_state state
,
163 const char *location
)
165 struct tevent_req_profile
*p
;
167 * make sure we do not timeout after
168 * the request was already finished
170 TALLOC_FREE(req
->internal
.timer
);
172 req
->internal
.state
= state
;
173 req
->internal
.finish_location
= location
;
175 tevent_req_cleanup(req
);
177 p
= req
->internal
.profile
;
180 p
->stop_location
= location
;
181 p
->stop_time
= tevent_timeval_current();
183 p
->user_error
= req
->internal
.error
;
185 if (p
->parent
!= NULL
) {
186 talloc_steal(p
->parent
, p
);
187 req
->internal
.profile
= NULL
;
191 _tevent_req_notify_callback(req
, location
);
194 void _tevent_req_done(struct tevent_req
*req
,
195 const char *location
)
197 tevent_req_finish(req
, TEVENT_REQ_DONE
, location
);
200 bool _tevent_req_error(struct tevent_req
*req
,
202 const char *location
)
208 req
->internal
.error
= error
;
209 tevent_req_finish(req
, TEVENT_REQ_USER_ERROR
, location
);
213 void _tevent_req_oom(struct tevent_req
*req
, const char *location
)
215 tevent_req_finish(req
, TEVENT_REQ_NO_MEMORY
, location
);
218 bool _tevent_req_nomem(const void *p
,
219 struct tevent_req
*req
,
220 const char *location
)
225 _tevent_req_oom(req
, location
);
232 * @brief Immediate event callback.
234 * @param[in] ev The event context to use.
236 * @param[in] im The immediate event.
238 * @param[in] priv The async request to be finished.
240 static void tevent_req_trigger(struct tevent_context
*ev
,
241 struct tevent_immediate
*im
,
244 struct tevent_req
*req
=
245 talloc_get_type_abort(private_data
,
248 tevent_req_finish(req
, req
->internal
.state
,
249 req
->internal
.finish_location
);
252 struct tevent_req
*tevent_req_post(struct tevent_req
*req
,
253 struct tevent_context
*ev
)
255 tevent_schedule_immediate(req
->internal
.trigger
,
256 ev
, tevent_req_trigger
, req
);
260 void tevent_req_defer_callback(struct tevent_req
*req
,
261 struct tevent_context
*ev
)
263 req
->internal
.defer_callback_ev
= ev
;
266 bool tevent_req_is_in_progress(struct tevent_req
*req
)
268 if (req
->internal
.state
== TEVENT_REQ_IN_PROGRESS
) {
275 void tevent_req_received(struct tevent_req
*req
)
277 talloc_set_destructor(req
, NULL
);
279 req
->private_print
= NULL
;
280 req
->private_cancel
= NULL
;
282 TALLOC_FREE(req
->internal
.trigger
);
283 TALLOC_FREE(req
->internal
.timer
);
285 req
->internal
.state
= TEVENT_REQ_RECEIVED
;
287 tevent_req_cleanup(req
);
289 TALLOC_FREE(req
->data
);
292 bool tevent_req_poll(struct tevent_req
*req
,
293 struct tevent_context
*ev
)
295 while (tevent_req_is_in_progress(req
)) {
298 ret
= tevent_loop_once(ev
);
307 bool tevent_req_is_error(struct tevent_req
*req
, enum tevent_req_state
*state
,
310 if (req
->internal
.state
== TEVENT_REQ_DONE
) {
313 if (req
->internal
.state
== TEVENT_REQ_USER_ERROR
) {
314 *error
= req
->internal
.error
;
316 *state
= req
->internal
.state
;
320 static void tevent_req_timedout(struct tevent_context
*ev
,
321 struct tevent_timer
*te
,
325 struct tevent_req
*req
=
326 talloc_get_type_abort(private_data
,
329 TALLOC_FREE(req
->internal
.timer
);
331 tevent_req_finish(req
, TEVENT_REQ_TIMED_OUT
, __FUNCTION__
);
334 bool tevent_req_set_endtime(struct tevent_req
*req
,
335 struct tevent_context
*ev
,
336 struct timeval endtime
)
338 TALLOC_FREE(req
->internal
.timer
);
340 req
->internal
.timer
= tevent_add_timer(ev
, req
, endtime
,
343 if (tevent_req_nomem(req
->internal
.timer
, req
)) {
350 void tevent_req_reset_endtime(struct tevent_req
*req
)
352 TALLOC_FREE(req
->internal
.timer
);
355 void tevent_req_set_callback(struct tevent_req
*req
, tevent_req_fn fn
, void *pvt
)
358 req
->async
.private_data
= pvt
;
361 void *_tevent_req_callback_data(struct tevent_req
*req
)
363 return req
->async
.private_data
;
366 void *_tevent_req_data(struct tevent_req
*req
)
371 void tevent_req_set_print_fn(struct tevent_req
*req
, tevent_req_print_fn fn
)
373 req
->private_print
= fn
;
376 void tevent_req_set_cancel_fn(struct tevent_req
*req
, tevent_req_cancel_fn fn
)
378 req
->private_cancel
= fn
;
381 bool _tevent_req_cancel(struct tevent_req
*req
, const char *location
)
383 if (req
->private_cancel
== NULL
) {
387 return req
->private_cancel(req
);
390 void tevent_req_set_cleanup_fn(struct tevent_req
*req
, tevent_req_cleanup_fn fn
)
392 req
->private_cleanup
.state
= req
->internal
.state
;
393 req
->private_cleanup
.fn
= fn
;
396 static int tevent_req_profile_destructor(struct tevent_req_profile
*p
);
398 bool tevent_req_set_profile(struct tevent_req
*req
)
400 struct tevent_req_profile
*p
;
402 if (req
->internal
.profile
!= NULL
) {
403 tevent_req_error(req
, EINVAL
);
407 p
= tevent_req_profile_create(req
);
409 if (tevent_req_nomem(p
, req
)) {
413 p
->req_name
= talloc_get_name(req
->data
);
414 p
->start_location
= req
->internal
.create_location
;
415 p
->start_time
= tevent_timeval_current();
417 req
->internal
.profile
= p
;
422 static int tevent_req_profile_destructor(struct tevent_req_profile
*p
)
424 if (p
->parent
!= NULL
) {
425 DLIST_REMOVE(p
->parent
->subprofiles
, p
);
429 while (p
->subprofiles
!= NULL
) {
430 p
->subprofiles
->parent
= NULL
;
431 DLIST_REMOVE(p
->subprofiles
, p
->subprofiles
);
437 struct tevent_req_profile
*tevent_req_move_profile(struct tevent_req
*req
,
440 return talloc_move(mem_ctx
, &req
->internal
.profile
);
443 const struct tevent_req_profile
*tevent_req_get_profile(
444 struct tevent_req
*req
)
446 return req
->internal
.profile
;
449 void tevent_req_profile_get_name(const struct tevent_req_profile
*profile
,
450 const char **req_name
)
452 if (req_name
!= NULL
) {
453 *req_name
= profile
->req_name
;
457 void tevent_req_profile_get_start(const struct tevent_req_profile
*profile
,
458 const char **start_location
,
459 struct timeval
*start_time
)
461 if (start_location
!= NULL
) {
462 *start_location
= profile
->start_location
;
464 if (start_time
!= NULL
) {
465 *start_time
= profile
->start_time
;
469 void tevent_req_profile_get_stop(const struct tevent_req_profile
*profile
,
470 const char **stop_location
,
471 struct timeval
*stop_time
)
473 if (stop_location
!= NULL
) {
474 *stop_location
= profile
->stop_location
;
476 if (stop_time
!= NULL
) {
477 *stop_time
= profile
->stop_time
;
481 void tevent_req_profile_get_status(const struct tevent_req_profile
*profile
,
483 enum tevent_req_state
*state
,
484 uint64_t *user_error
)
490 *state
= profile
->state
;
492 if (user_error
!= NULL
) {
493 *user_error
= profile
->user_error
;
497 const struct tevent_req_profile
*tevent_req_profile_get_subprofiles(
498 const struct tevent_req_profile
*profile
)
500 return profile
->subprofiles
;
503 const struct tevent_req_profile
*tevent_req_profile_next(
504 const struct tevent_req_profile
*profile
)
506 return profile
->next
;
509 struct tevent_req_profile
*tevent_req_profile_create(TALLOC_CTX
*mem_ctx
)
511 struct tevent_req_profile
*result
;
513 result
= talloc_zero(mem_ctx
, struct tevent_req_profile
);
514 if (result
== NULL
) {
517 talloc_set_destructor(result
, tevent_req_profile_destructor
);
522 bool tevent_req_profile_set_name(struct tevent_req_profile
*profile
,
523 const char *req_name
)
525 profile
->req_name
= talloc_strdup(profile
, req_name
);
526 return (profile
->req_name
!= NULL
);
529 bool tevent_req_profile_set_start(struct tevent_req_profile
*profile
,
530 const char *start_location
,
531 struct timeval start_time
)
533 profile
->start_time
= start_time
;
535 profile
->start_location
= talloc_strdup(profile
, start_location
);
536 return (profile
->start_location
!= NULL
);
539 bool tevent_req_profile_set_stop(struct tevent_req_profile
*profile
,
540 const char *stop_location
,
541 struct timeval stop_time
)
543 profile
->stop_time
= stop_time
;
545 profile
->stop_location
= talloc_strdup(profile
, stop_location
);
546 return (profile
->stop_location
!= NULL
);
549 void tevent_req_profile_set_status(struct tevent_req_profile
*profile
,
551 enum tevent_req_state state
,
555 profile
->state
= state
;
556 profile
->user_error
= user_error
;
559 void tevent_req_profile_append_sub(struct tevent_req_profile
*parent_profile
,
560 struct tevent_req_profile
**sub_profile
)
562 struct tevent_req_profile
*sub
;
564 sub
= talloc_move(parent_profile
, sub_profile
);
566 sub
->parent
= parent_profile
;
567 DLIST_ADD_END(parent_profile
->subprofiles
, sub
);