2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 #undef tevent_req_set_callback
31 #undef tevent_req_set_cancel_fn
32 #undef tevent_req_set_cleanup_fn
34 char *tevent_req_default_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
36 return talloc_asprintf(mem_ctx
,
37 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
38 " state[%s (%p)] timer[%p] finish[%s]",
39 req
, req
->internal
.create_location
,
41 (unsigned long long)req
->internal
.error
,
42 (unsigned long long)req
->internal
.error
,
43 req
->internal
.private_type
,
46 req
->internal
.finish_location
50 char *tevent_req_print(TALLOC_CTX
*mem_ctx
, struct tevent_req
*req
)
53 return talloc_strdup(mem_ctx
, "tevent_req[NULL]");
56 if (!req
->private_print
) {
57 return tevent_req_default_print(req
, mem_ctx
);
60 return req
->private_print(req
, mem_ctx
);
63 static int tevent_req_destructor(struct tevent_req
*req
);
65 struct tevent_req
*_tevent_req_create(TALLOC_CTX
*mem_ctx
,
71 return __tevent_req_create(mem_ctx
,
79 struct tevent_req
*__tevent_req_create(TALLOC_CTX
*mem_ctx
,
86 struct tevent_req
*req
;
87 struct tevent_req
*parent
;
88 void **ppdata
= (void **)pdata
;
92 payload
= sizeof(struct tevent_immediate
) + data_size
;
93 if (payload
< sizeof(struct tevent_immediate
)) {
98 req
= talloc_pooled_object(
99 mem_ctx
, struct tevent_req
, 2,
100 sizeof(struct tevent_immediate
) + data_size
);
105 *req
= (struct tevent_req
) {
107 .private_type
= type
,
108 .create_location
= location
,
109 .state
= TEVENT_REQ_IN_PROGRESS
,
110 .trigger
= tevent_create_immediate(req
),
114 data
= talloc_zero_size(req
, data_size
);
117 * No need to check for req->internal.trigger!=NULL or
118 * data!=NULL, this can't fail: talloc_pooled_object has
119 * already allocated sufficient memory.
122 talloc_set_name_const(data
, type
);
126 talloc_set_destructor(req
, tevent_req_destructor
);
128 parent
= talloc_get_type(talloc_parent(mem_ctx
), struct tevent_req
);
129 if ((parent
!= NULL
) && (parent
->internal
.profile
!= NULL
)) {
130 bool ok
= tevent_req_set_profile(req
);
136 req
->internal
.profile
->parent
= parent
->internal
.profile
;
137 DLIST_ADD_END(parent
->internal
.profile
->subprofiles
,
138 req
->internal
.profile
);
143 /* Initially, talloc_zero_size() sets internal.call_depth to 0 */
144 if (parent
!= NULL
) {
145 req
->internal
.call_depth
= parent
->internal
.call_depth
+ 1;
147 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CREATE
,
149 req
->internal
.call_depth
,
155 static int tevent_req_destructor(struct tevent_req
*req
)
157 tevent_req_received(req
);
161 void _tevent_req_notify_callback(struct tevent_req
*req
, const char *location
)
163 req
->internal
.finish_location
= location
;
164 if (req
->internal
.defer_callback_ev
) {
165 (void)tevent_req_post(req
, req
->internal
.defer_callback_ev
);
166 req
->internal
.defer_callback_ev
= NULL
;
169 if (req
->async
.fn
!= NULL
) {
170 /* Calling back the parent code, decrement the call depth. */
171 size_t new_depth
= req
->internal
.call_depth
> 0 ?
172 req
->internal
.call_depth
- 1 : 0;
173 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_NOTIFY_CB
,
181 static void tevent_req_cleanup(struct tevent_req
*req
)
183 if (req
->private_cleanup
.state
>= req
->internal
.state
) {
185 * Don't call the cleanup_function multiple times for the same
191 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CLEANUP
,
193 req
->internal
.call_depth
,
194 req
->private_cleanup
.fn_name
);
196 if (req
->private_cleanup
.fn
== NULL
) {
200 req
->private_cleanup
.state
= req
->internal
.state
;
201 req
->private_cleanup
.fn(req
, req
->internal
.state
);
204 static void tevent_req_finish(struct tevent_req
*req
,
205 enum tevent_req_state state
,
206 const char *location
)
208 struct tevent_req_profile
*p
;
210 * make sure we do not timeout after
211 * the request was already finished
213 TALLOC_FREE(req
->internal
.timer
);
215 req
->internal
.state
= state
;
216 req
->internal
.finish_location
= location
;
218 tevent_req_cleanup(req
);
220 p
= req
->internal
.profile
;
223 p
->stop_location
= location
;
224 p
->stop_time
= tevent_timeval_current();
226 p
->user_error
= req
->internal
.error
;
228 if (p
->parent
!= NULL
) {
229 talloc_steal(p
->parent
, p
);
230 req
->internal
.profile
= NULL
;
234 _tevent_req_notify_callback(req
, location
);
237 void _tevent_req_done(struct tevent_req
*req
,
238 const char *location
)
240 tevent_req_finish(req
, TEVENT_REQ_DONE
, location
);
243 bool _tevent_req_error(struct tevent_req
*req
,
245 const char *location
)
251 req
->internal
.error
= error
;
252 tevent_req_finish(req
, TEVENT_REQ_USER_ERROR
, location
);
256 void _tevent_req_oom(struct tevent_req
*req
, const char *location
)
258 tevent_req_finish(req
, TEVENT_REQ_NO_MEMORY
, location
);
261 bool _tevent_req_nomem(const void *p
,
262 struct tevent_req
*req
,
263 const char *location
)
268 _tevent_req_oom(req
, location
);
275 * @brief Immediate event callback.
277 * @param[in] ev The event context to use.
279 * @param[in] im The immediate event.
281 * @param[in] priv The async request to be finished.
283 static void tevent_req_trigger(struct tevent_context
*ev
,
284 struct tevent_immediate
*im
,
287 struct tevent_req
*req
=
288 talloc_get_type_abort(private_data
,
291 tevent_req_finish(req
, req
->internal
.state
,
292 req
->internal
.finish_location
);
295 struct tevent_req
*tevent_req_post(struct tevent_req
*req
,
296 struct tevent_context
*ev
)
298 tevent_schedule_immediate(req
->internal
.trigger
,
299 ev
, tevent_req_trigger
, req
);
303 void tevent_req_defer_callback(struct tevent_req
*req
,
304 struct tevent_context
*ev
)
306 req
->internal
.defer_callback_ev
= ev
;
309 bool tevent_req_is_in_progress(struct tevent_req
*req
)
311 if (req
->internal
.state
== TEVENT_REQ_IN_PROGRESS
) {
318 void tevent_req_received(struct tevent_req
*req
)
320 talloc_set_destructor(req
, NULL
);
322 req
->private_print
= NULL
;
323 req
->private_cancel
.fn
= NULL
;
324 req
->private_cancel
.fn_name
= NULL
;
326 TALLOC_FREE(req
->internal
.trigger
);
327 TALLOC_FREE(req
->internal
.timer
);
329 req
->internal
.state
= TEVENT_REQ_RECEIVED
;
331 tevent_req_cleanup(req
);
333 TALLOC_FREE(req
->data
);
336 bool tevent_req_poll(struct tevent_req
*req
,
337 struct tevent_context
*ev
)
339 while (tevent_req_is_in_progress(req
)) {
342 ret
= tevent_loop_once(ev
);
351 bool tevent_req_is_error(struct tevent_req
*req
, enum tevent_req_state
*state
,
354 if (req
->internal
.state
== TEVENT_REQ_DONE
) {
357 if (req
->internal
.state
== TEVENT_REQ_USER_ERROR
) {
358 *error
= req
->internal
.error
;
360 *state
= req
->internal
.state
;
364 static void tevent_req_timedout(struct tevent_context
*ev
,
365 struct tevent_timer
*te
,
369 struct tevent_req
*req
=
370 talloc_get_type_abort(private_data
,
373 TALLOC_FREE(req
->internal
.timer
);
375 tevent_req_finish(req
, TEVENT_REQ_TIMED_OUT
, __FUNCTION__
);
378 bool tevent_req_set_endtime(struct tevent_req
*req
,
379 struct tevent_context
*ev
,
380 struct timeval endtime
)
382 TALLOC_FREE(req
->internal
.timer
);
384 req
->internal
.timer
= tevent_add_timer(ev
, req
, endtime
,
387 if (tevent_req_nomem(req
->internal
.timer
, req
)) {
394 void tevent_req_reset_endtime(struct tevent_req
*req
)
396 TALLOC_FREE(req
->internal
.timer
);
399 void tevent_req_set_callback(struct tevent_req
*req
, tevent_req_fn fn
, void *pvt
)
401 return _tevent_req_set_callback(req
, fn
, NULL
, pvt
);
404 void _tevent_req_set_callback(struct tevent_req
*req
,
410 req
->async
.fn_name
= fn_name
;
411 req
->async
.private_data
= pvt
;
414 void *_tevent_req_callback_data(struct tevent_req
*req
)
416 return req
->async
.private_data
;
419 void *_tevent_req_data(struct tevent_req
*req
)
424 void tevent_req_set_print_fn(struct tevent_req
*req
, tevent_req_print_fn fn
)
426 req
->private_print
= fn
;
429 void tevent_req_set_cancel_fn(struct tevent_req
*req
, tevent_req_cancel_fn fn
)
431 _tevent_req_set_cancel_fn(req
, fn
, NULL
);
434 void _tevent_req_set_cancel_fn(struct tevent_req
*req
,
435 tevent_req_cancel_fn fn
,
438 req
->private_cancel
.fn
= fn
;
439 req
->private_cancel
.fn_name
= fn
!= NULL
? fn_name
: NULL
;
442 bool _tevent_req_cancel(struct tevent_req
*req
, const char *location
)
444 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_CANCEL
,
446 req
->internal
.call_depth
,
447 req
->private_cancel
.fn_name
);
449 if (req
->private_cancel
.fn
== NULL
) {
453 return req
->private_cancel
.fn(req
);
456 void tevent_req_set_cleanup_fn(struct tevent_req
*req
, tevent_req_cleanup_fn fn
)
458 _tevent_req_set_cleanup_fn(req
, fn
, NULL
);
461 void _tevent_req_set_cleanup_fn(struct tevent_req
*req
,
462 tevent_req_cleanup_fn fn
,
465 req
->private_cleanup
.state
= req
->internal
.state
;
466 req
->private_cleanup
.fn
= fn
;
467 req
->private_cleanup
.fn_name
= fn
!= NULL
? fn_name
: NULL
;
470 static int tevent_req_profile_destructor(struct tevent_req_profile
*p
);
472 bool tevent_req_set_profile(struct tevent_req
*req
)
474 struct tevent_req_profile
*p
;
476 if (req
->internal
.profile
!= NULL
) {
477 tevent_req_error(req
, EINVAL
);
481 p
= tevent_req_profile_create(req
);
483 if (tevent_req_nomem(p
, req
)) {
487 p
->req_name
= talloc_get_name(req
->data
);
488 p
->start_location
= req
->internal
.create_location
;
489 p
->start_time
= tevent_timeval_current();
491 req
->internal
.profile
= p
;
496 static int tevent_req_profile_destructor(struct tevent_req_profile
*p
)
498 if (p
->parent
!= NULL
) {
499 DLIST_REMOVE(p
->parent
->subprofiles
, p
);
503 while (p
->subprofiles
!= NULL
) {
504 p
->subprofiles
->parent
= NULL
;
505 DLIST_REMOVE(p
->subprofiles
, p
->subprofiles
);
511 struct tevent_req_profile
*tevent_req_move_profile(struct tevent_req
*req
,
514 return talloc_move(mem_ctx
, &req
->internal
.profile
);
517 const struct tevent_req_profile
*tevent_req_get_profile(
518 struct tevent_req
*req
)
520 return req
->internal
.profile
;
523 void tevent_req_profile_get_name(const struct tevent_req_profile
*profile
,
524 const char **req_name
)
526 if (req_name
!= NULL
) {
527 *req_name
= profile
->req_name
;
531 void tevent_req_profile_get_start(const struct tevent_req_profile
*profile
,
532 const char **start_location
,
533 struct timeval
*start_time
)
535 if (start_location
!= NULL
) {
536 *start_location
= profile
->start_location
;
538 if (start_time
!= NULL
) {
539 *start_time
= profile
->start_time
;
543 void tevent_req_profile_get_stop(const struct tevent_req_profile
*profile
,
544 const char **stop_location
,
545 struct timeval
*stop_time
)
547 if (stop_location
!= NULL
) {
548 *stop_location
= profile
->stop_location
;
550 if (stop_time
!= NULL
) {
551 *stop_time
= profile
->stop_time
;
555 void tevent_req_profile_get_status(const struct tevent_req_profile
*profile
,
557 enum tevent_req_state
*state
,
558 uint64_t *user_error
)
564 *state
= profile
->state
;
566 if (user_error
!= NULL
) {
567 *user_error
= profile
->user_error
;
571 const struct tevent_req_profile
*tevent_req_profile_get_subprofiles(
572 const struct tevent_req_profile
*profile
)
574 return profile
->subprofiles
;
577 const struct tevent_req_profile
*tevent_req_profile_next(
578 const struct tevent_req_profile
*profile
)
580 return profile
->next
;
583 struct tevent_req_profile
*tevent_req_profile_create(TALLOC_CTX
*mem_ctx
)
585 struct tevent_req_profile
*result
;
587 result
= talloc_zero(mem_ctx
, struct tevent_req_profile
);
588 if (result
== NULL
) {
591 talloc_set_destructor(result
, tevent_req_profile_destructor
);
596 bool tevent_req_profile_set_name(struct tevent_req_profile
*profile
,
597 const char *req_name
)
599 profile
->req_name
= talloc_strdup(profile
, req_name
);
600 return (profile
->req_name
!= NULL
);
603 bool tevent_req_profile_set_start(struct tevent_req_profile
*profile
,
604 const char *start_location
,
605 struct timeval start_time
)
607 profile
->start_time
= start_time
;
609 profile
->start_location
= talloc_strdup(profile
, start_location
);
610 return (profile
->start_location
!= NULL
);
613 bool tevent_req_profile_set_stop(struct tevent_req_profile
*profile
,
614 const char *stop_location
,
615 struct timeval stop_time
)
617 profile
->stop_time
= stop_time
;
619 profile
->stop_location
= talloc_strdup(profile
, stop_location
);
620 return (profile
->stop_location
!= NULL
);
623 void tevent_req_profile_set_status(struct tevent_req_profile
*profile
,
625 enum tevent_req_state state
,
629 profile
->state
= state
;
630 profile
->user_error
= user_error
;
633 void tevent_req_profile_append_sub(struct tevent_req_profile
*parent_profile
,
634 struct tevent_req_profile
**sub_profile
)
636 struct tevent_req_profile
*sub
;
638 sub
= talloc_move(parent_profile
, sub_profile
);
640 sub
->parent
= parent_profile
;
641 DLIST_ADD_END(parent_profile
->subprofiles
, sub
);