2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
32 return talloc_asprintf(mem_ctx
,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p] finish[%s]",
35 req
, req
->internal
.create_location
,
37 (unsigned long long)req
->internal
.error
,
38 (unsigned long long)req
->internal
.error
,
39 req
->internal
.private_type
,
42 req
->internal
.finish_location
46 char *tevent_req_print(TALLOC_CTX
*mem_ctx
, struct tevent_req
*req
)
49 return talloc_strdup(mem_ctx
, "tevent_req[NULL]");
52 if (!req
->private_print
) {
53 return tevent_req_default_print(req
, mem_ctx
);
56 return req
->private_print(req
, mem_ctx
);
59 static int tevent_req_destructor(struct tevent_req
*req
);
61 struct tevent_req
*_tevent_req_create(TALLOC_CTX
*mem_ctx
,
67 struct tevent_req
*req
;
68 void **ppdata
= (void **)pdata
;
72 payload
= sizeof(struct tevent_immediate
) + data_size
;
73 if (payload
< sizeof(struct tevent_immediate
)) {
78 req
= talloc_pooled_object(
79 mem_ctx
, struct tevent_req
, 2,
80 sizeof(struct tevent_immediate
) + data_size
);
85 *req
= (struct tevent_req
) {
86 .internal
.private_type
= type
,
87 .internal
.create_location
= location
,
88 .internal
.state
= TEVENT_REQ_IN_PROGRESS
,
89 .internal
.trigger
= tevent_create_immediate(req
)
92 data
= talloc_zero_size(req
, data_size
);
95 * No need to check for req->internal.trigger!=NULL or
96 * data!=NULL, this can't fail: talloc_pooled_object has
97 * already allocated sufficient memory.
100 talloc_set_name_const(data
, type
);
104 talloc_set_destructor(req
, tevent_req_destructor
);
110 static int tevent_req_destructor(struct tevent_req
*req
)
112 tevent_req_received(req
);
116 void _tevent_req_notify_callback(struct tevent_req
*req
, const char *location
)
118 req
->internal
.finish_location
= location
;
119 if (req
->internal
.defer_callback_ev
) {
120 (void)tevent_req_post(req
, req
->internal
.defer_callback_ev
);
121 req
->internal
.defer_callback_ev
= NULL
;
124 if (req
->async
.fn
!= NULL
) {
129 static void tevent_req_cleanup(struct tevent_req
*req
)
131 if (req
->private_cleanup
.fn
== NULL
) {
135 if (req
->private_cleanup
.state
>= req
->internal
.state
) {
137 * Don't call the cleanup_function multiple times for the same
143 req
->private_cleanup
.state
= req
->internal
.state
;
144 req
->private_cleanup
.fn(req
, req
->internal
.state
);
147 static void tevent_req_finish(struct tevent_req
*req
,
148 enum tevent_req_state state
,
149 const char *location
)
152 * make sure we do not timeout after
153 * the request was already finished
155 TALLOC_FREE(req
->internal
.timer
);
157 req
->internal
.state
= state
;
158 req
->internal
.finish_location
= location
;
160 tevent_req_cleanup(req
);
162 _tevent_req_notify_callback(req
, location
);
165 void _tevent_req_done(struct tevent_req
*req
,
166 const char *location
)
168 tevent_req_finish(req
, TEVENT_REQ_DONE
, location
);
171 bool _tevent_req_error(struct tevent_req
*req
,
173 const char *location
)
179 req
->internal
.error
= error
;
180 tevent_req_finish(req
, TEVENT_REQ_USER_ERROR
, location
);
184 void _tevent_req_oom(struct tevent_req
*req
, const char *location
)
186 tevent_req_finish(req
, TEVENT_REQ_NO_MEMORY
, location
);
189 bool _tevent_req_nomem(const void *p
,
190 struct tevent_req
*req
,
191 const char *location
)
196 _tevent_req_oom(req
, location
);
203 * @brief Immediate event callback.
205 * @param[in] ev The event context to use.
207 * @param[in] im The immediate event.
209 * @param[in] priv The async request to be finished.
211 static void tevent_req_trigger(struct tevent_context
*ev
,
212 struct tevent_immediate
*im
,
215 struct tevent_req
*req
=
216 talloc_get_type_abort(private_data
,
219 tevent_req_finish(req
, req
->internal
.state
,
220 req
->internal
.finish_location
);
223 struct tevent_req
*tevent_req_post(struct tevent_req
*req
,
224 struct tevent_context
*ev
)
226 tevent_schedule_immediate(req
->internal
.trigger
,
227 ev
, tevent_req_trigger
, req
);
231 void tevent_req_defer_callback(struct tevent_req
*req
,
232 struct tevent_context
*ev
)
234 req
->internal
.defer_callback_ev
= ev
;
237 bool tevent_req_is_in_progress(struct tevent_req
*req
)
239 if (req
->internal
.state
== TEVENT_REQ_IN_PROGRESS
) {
246 void tevent_req_received(struct tevent_req
*req
)
248 talloc_set_destructor(req
, NULL
);
250 req
->private_print
= NULL
;
251 req
->private_cancel
= NULL
;
253 TALLOC_FREE(req
->internal
.trigger
);
254 TALLOC_FREE(req
->internal
.timer
);
256 req
->internal
.state
= TEVENT_REQ_RECEIVED
;
258 tevent_req_cleanup(req
);
260 TALLOC_FREE(req
->data
);
263 bool tevent_req_poll(struct tevent_req
*req
,
264 struct tevent_context
*ev
)
266 while (tevent_req_is_in_progress(req
)) {
269 ret
= tevent_loop_once(ev
);
278 bool tevent_req_is_error(struct tevent_req
*req
, enum tevent_req_state
*state
,
281 if (req
->internal
.state
== TEVENT_REQ_DONE
) {
284 if (req
->internal
.state
== TEVENT_REQ_USER_ERROR
) {
285 *error
= req
->internal
.error
;
287 *state
= req
->internal
.state
;
291 static void tevent_req_timedout(struct tevent_context
*ev
,
292 struct tevent_timer
*te
,
296 struct tevent_req
*req
=
297 talloc_get_type_abort(private_data
,
300 TALLOC_FREE(req
->internal
.timer
);
302 tevent_req_finish(req
, TEVENT_REQ_TIMED_OUT
, __FUNCTION__
);
305 bool tevent_req_set_endtime(struct tevent_req
*req
,
306 struct tevent_context
*ev
,
307 struct timeval endtime
)
309 TALLOC_FREE(req
->internal
.timer
);
311 req
->internal
.timer
= tevent_add_timer(ev
, req
, endtime
,
314 if (tevent_req_nomem(req
->internal
.timer
, req
)) {
321 void tevent_req_reset_endtime(struct tevent_req
*req
)
323 TALLOC_FREE(req
->internal
.timer
);
326 void tevent_req_set_callback(struct tevent_req
*req
, tevent_req_fn fn
, void *pvt
)
329 req
->async
.private_data
= pvt
;
332 void *_tevent_req_callback_data(struct tevent_req
*req
)
334 return req
->async
.private_data
;
337 void *_tevent_req_data(struct tevent_req
*req
)
342 void tevent_req_set_print_fn(struct tevent_req
*req
, tevent_req_print_fn fn
)
344 req
->private_print
= fn
;
347 void tevent_req_set_cancel_fn(struct tevent_req
*req
, tevent_req_cancel_fn fn
)
349 req
->private_cancel
= fn
;
352 bool _tevent_req_cancel(struct tevent_req
*req
, const char *location
)
354 if (req
->private_cancel
== NULL
) {
358 return req
->private_cancel(req
);
361 void tevent_req_set_cleanup_fn(struct tevent_req
*req
, tevent_req_cleanup_fn fn
)
363 req
->private_cleanup
.state
= req
->internal
.state
;
364 req
->private_cleanup
.fn
= fn
;