2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
)
32 return talloc_asprintf(mem_ctx
,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p]",
35 req
, req
->internal
.create_location
,
37 (unsigned long long)req
->internal
.error
,
38 (unsigned long long)req
->internal
.error
,
39 talloc_get_name(req
->data
),
45 char *tevent_req_print(TALLOC_CTX
*mem_ctx
, struct tevent_req
*req
)
47 if (!req
->private_print
) {
48 return tevent_req_default_print(req
, mem_ctx
);
51 return req
->private_print(req
, mem_ctx
);
54 static int tevent_req_destructor(struct tevent_req
*req
);
56 struct tevent_req
*_tevent_req_create(TALLOC_CTX
*mem_ctx
,
62 struct tevent_req
*req
;
63 void **ppdata
= (void **)pdata
;
67 payload
= sizeof(struct tevent_immediate
) + data_size
;
68 if (payload
< sizeof(struct tevent_immediate
)) {
73 req
= talloc_pooled_object(
74 mem_ctx
, struct tevent_req
, 2,
75 sizeof(struct tevent_immediate
) + data_size
);
80 *req
= (struct tevent_req
) {
81 .internal
.private_type
= type
,
82 .internal
.create_location
= location
,
83 .internal
.state
= TEVENT_REQ_IN_PROGRESS
,
84 .internal
.trigger
= tevent_create_immediate(req
)
87 data
= talloc_zero_size(req
, data_size
);
90 * No need to check for req->internal.trigger!=NULL or
91 * data!=NULL, this can't fail: talloc_pooled_object has
92 * already allocated sufficient memory.
95 talloc_set_name_const(data
, type
);
99 talloc_set_destructor(req
, tevent_req_destructor
);
105 static int tevent_req_destructor(struct tevent_req
*req
)
107 tevent_req_received(req
);
111 void _tevent_req_notify_callback(struct tevent_req
*req
, const char *location
)
113 req
->internal
.finish_location
= location
;
114 if (req
->internal
.defer_callback_ev
) {
115 (void)tevent_req_post(req
, req
->internal
.defer_callback_ev
);
116 req
->internal
.defer_callback_ev
= NULL
;
119 if (req
->async
.fn
!= NULL
) {
124 static void tevent_req_cleanup(struct tevent_req
*req
)
126 if (req
->private_cleanup
.fn
== NULL
) {
130 if (req
->private_cleanup
.state
>= req
->internal
.state
) {
132 * Don't call the cleanup_function multiple times for the same
138 req
->private_cleanup
.state
= req
->internal
.state
;
139 req
->private_cleanup
.fn(req
, req
->internal
.state
);
142 static void tevent_req_finish(struct tevent_req
*req
,
143 enum tevent_req_state state
,
144 const char *location
)
147 * make sure we do not timeout after
148 * the request was already finished
150 TALLOC_FREE(req
->internal
.timer
);
152 req
->internal
.state
= state
;
153 req
->internal
.finish_location
= location
;
155 tevent_req_cleanup(req
);
157 _tevent_req_notify_callback(req
, location
);
160 void _tevent_req_done(struct tevent_req
*req
,
161 const char *location
)
163 tevent_req_finish(req
, TEVENT_REQ_DONE
, location
);
166 bool _tevent_req_error(struct tevent_req
*req
,
168 const char *location
)
174 req
->internal
.error
= error
;
175 tevent_req_finish(req
, TEVENT_REQ_USER_ERROR
, location
);
179 void _tevent_req_oom(struct tevent_req
*req
, const char *location
)
181 tevent_req_finish(req
, TEVENT_REQ_NO_MEMORY
, location
);
184 bool _tevent_req_nomem(const void *p
,
185 struct tevent_req
*req
,
186 const char *location
)
191 _tevent_req_oom(req
, location
);
198 * @brief Immediate event callback.
200 * @param[in] ev The event context to use.
202 * @param[in] im The immediate event.
204 * @param[in] priv The async request to be finished.
206 static void tevent_req_trigger(struct tevent_context
*ev
,
207 struct tevent_immediate
*im
,
210 struct tevent_req
*req
=
211 talloc_get_type_abort(private_data
,
214 tevent_req_finish(req
, req
->internal
.state
,
215 req
->internal
.finish_location
);
218 struct tevent_req
*tevent_req_post(struct tevent_req
*req
,
219 struct tevent_context
*ev
)
221 tevent_schedule_immediate(req
->internal
.trigger
,
222 ev
, tevent_req_trigger
, req
);
226 void tevent_req_defer_callback(struct tevent_req
*req
,
227 struct tevent_context
*ev
)
229 req
->internal
.defer_callback_ev
= ev
;
232 bool tevent_req_is_in_progress(struct tevent_req
*req
)
234 if (req
->internal
.state
== TEVENT_REQ_IN_PROGRESS
) {
241 void tevent_req_received(struct tevent_req
*req
)
243 talloc_set_destructor(req
, NULL
);
245 req
->private_print
= NULL
;
246 req
->private_cancel
= NULL
;
248 TALLOC_FREE(req
->internal
.trigger
);
249 TALLOC_FREE(req
->internal
.timer
);
251 req
->internal
.state
= TEVENT_REQ_RECEIVED
;
253 tevent_req_cleanup(req
);
255 TALLOC_FREE(req
->data
);
258 bool tevent_req_poll(struct tevent_req
*req
,
259 struct tevent_context
*ev
)
261 while (tevent_req_is_in_progress(req
)) {
264 ret
= tevent_loop_once(ev
);
273 bool tevent_req_is_error(struct tevent_req
*req
, enum tevent_req_state
*state
,
276 if (req
->internal
.state
== TEVENT_REQ_DONE
) {
279 if (req
->internal
.state
== TEVENT_REQ_USER_ERROR
) {
280 *error
= req
->internal
.error
;
282 *state
= req
->internal
.state
;
286 static void tevent_req_timedout(struct tevent_context
*ev
,
287 struct tevent_timer
*te
,
291 struct tevent_req
*req
=
292 talloc_get_type_abort(private_data
,
295 TALLOC_FREE(req
->internal
.timer
);
297 tevent_req_finish(req
, TEVENT_REQ_TIMED_OUT
, __FUNCTION__
);
300 bool tevent_req_set_endtime(struct tevent_req
*req
,
301 struct tevent_context
*ev
,
302 struct timeval endtime
)
304 TALLOC_FREE(req
->internal
.timer
);
306 req
->internal
.timer
= tevent_add_timer(ev
, req
, endtime
,
309 if (tevent_req_nomem(req
->internal
.timer
, req
)) {
316 void tevent_req_reset_endtime(struct tevent_req
*req
)
318 TALLOC_FREE(req
->internal
.timer
);
321 void tevent_req_set_callback(struct tevent_req
*req
, tevent_req_fn fn
, void *pvt
)
324 req
->async
.private_data
= pvt
;
327 void *_tevent_req_callback_data(struct tevent_req
*req
)
329 return req
->async
.private_data
;
332 void *_tevent_req_data(struct tevent_req
*req
)
337 void tevent_req_set_print_fn(struct tevent_req
*req
, tevent_req_print_fn fn
)
339 req
->private_print
= fn
;
342 void tevent_req_set_cancel_fn(struct tevent_req
*req
, tevent_req_cancel_fn fn
)
344 req
->private_cancel
= fn
;
347 bool _tevent_req_cancel(struct tevent_req
*req
, const char *location
)
349 if (req
->private_cancel
== NULL
) {
353 return req
->private_cancel(req
);
356 void tevent_req_set_cleanup_fn(struct tevent_req
*req
, tevent_req_cleanup_fn fn
)
358 req
->private_cleanup
.state
= req
->internal
.state
;
359 req
->private_cleanup
.fn
= fn
;