s4:lib/socket: simplify iface_list_wildcard() and its callers
[Samba.git] / lib / tevent / tevent_req.c
blobc86fb68f8013dc4270ad563bbed5b7cb9be12800
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
32 return talloc_asprintf(mem_ctx,
33 "tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
34 " state[%s (%p)] timer[%p]",
35 req, req->internal.create_location,
36 req->internal.state,
37 (unsigned long long)req->internal.error,
38 (unsigned long long)req->internal.error,
39 talloc_get_name(req->data),
40 req->data,
41 req->internal.timer
45 char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
47 if (!req->private_print) {
48 return tevent_req_default_print(req, mem_ctx);
51 return req->private_print(req, mem_ctx);
54 static int tevent_req_destructor(struct tevent_req *req);
56 struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
57 void *pdata,
58 size_t data_size,
59 const char *type,
60 const char *location)
62 struct tevent_req *req;
63 void **ppdata = (void **)pdata;
64 void *data;
66 req = talloc_pooled_object(
67 mem_ctx, struct tevent_req, 2,
68 sizeof(struct tevent_immediate) + data_size);
69 if (req == NULL) {
70 return NULL;
72 ZERO_STRUCTP(req);
73 req->internal.private_type = type;
74 req->internal.create_location = location;
75 req->internal.state = TEVENT_REQ_IN_PROGRESS;
76 req->internal.trigger = tevent_create_immediate(req);
77 if (!req->internal.trigger) {
78 talloc_free(req);
79 return NULL;
82 data = talloc_zero_size(req, data_size);
83 if (data == NULL) {
84 talloc_free(req);
85 return NULL;
87 talloc_set_name_const(data, type);
89 req->data = data;
91 talloc_set_destructor(req, tevent_req_destructor);
93 *ppdata = data;
94 return req;
97 static int tevent_req_destructor(struct tevent_req *req)
99 tevent_req_received(req);
100 return 0;
103 void _tevent_req_notify_callback(struct tevent_req *req, const char *location)
105 req->internal.finish_location = location;
106 if (req->internal.defer_callback_ev) {
107 (void)tevent_req_post(req, req->internal.defer_callback_ev);
108 req->internal.defer_callback_ev = NULL;
109 return;
111 if (req->async.fn != NULL) {
112 req->async.fn(req);
116 static void tevent_req_cleanup(struct tevent_req *req)
118 if (req->private_cleanup.fn == NULL) {
119 return;
122 if (req->private_cleanup.state >= req->internal.state) {
124 * Don't call the cleanup_function multiple times for the same
125 * state recursively
127 return;
130 req->private_cleanup.state = req->internal.state;
131 req->private_cleanup.fn(req, req->internal.state);
134 static void tevent_req_finish(struct tevent_req *req,
135 enum tevent_req_state state,
136 const char *location)
139 * make sure we do not timeout after
140 * the request was already finished
142 TALLOC_FREE(req->internal.timer);
144 req->internal.state = state;
145 req->internal.finish_location = location;
147 tevent_req_cleanup(req);
149 _tevent_req_notify_callback(req, location);
152 void _tevent_req_done(struct tevent_req *req,
153 const char *location)
155 tevent_req_finish(req, TEVENT_REQ_DONE, location);
158 bool _tevent_req_error(struct tevent_req *req,
159 uint64_t error,
160 const char *location)
162 if (error == 0) {
163 return false;
166 req->internal.error = error;
167 tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
168 return true;
171 void _tevent_req_oom(struct tevent_req *req, const char *location)
173 tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
176 bool _tevent_req_nomem(const void *p,
177 struct tevent_req *req,
178 const char *location)
180 if (p != NULL) {
181 return false;
183 _tevent_req_oom(req, location);
184 return true;
188 * @internal
190 * @brief Immediate event callback.
192 * @param[in] ev The event context to use.
194 * @param[in] im The immediate event.
196 * @param[in] priv The async request to be finished.
198 static void tevent_req_trigger(struct tevent_context *ev,
199 struct tevent_immediate *im,
200 void *private_data)
202 struct tevent_req *req =
203 talloc_get_type_abort(private_data,
204 struct tevent_req);
206 tevent_req_finish(req, req->internal.state,
207 req->internal.finish_location);
210 struct tevent_req *tevent_req_post(struct tevent_req *req,
211 struct tevent_context *ev)
213 tevent_schedule_immediate(req->internal.trigger,
214 ev, tevent_req_trigger, req);
215 return req;
218 void tevent_req_defer_callback(struct tevent_req *req,
219 struct tevent_context *ev)
221 req->internal.defer_callback_ev = ev;
224 bool tevent_req_is_in_progress(struct tevent_req *req)
226 if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
227 return true;
230 return false;
233 void tevent_req_received(struct tevent_req *req)
235 talloc_set_destructor(req, NULL);
237 req->private_print = NULL;
238 req->private_cancel = NULL;
240 TALLOC_FREE(req->internal.trigger);
241 TALLOC_FREE(req->internal.timer);
243 req->internal.state = TEVENT_REQ_RECEIVED;
245 tevent_req_cleanup(req);
247 TALLOC_FREE(req->data);
250 bool tevent_req_poll(struct tevent_req *req,
251 struct tevent_context *ev)
253 while (tevent_req_is_in_progress(req)) {
254 int ret;
256 ret = tevent_loop_once(ev);
257 if (ret != 0) {
258 return false;
262 return true;
265 bool tevent_req_is_error(struct tevent_req *req, enum tevent_req_state *state,
266 uint64_t *error)
268 if (req->internal.state == TEVENT_REQ_DONE) {
269 return false;
271 if (req->internal.state == TEVENT_REQ_USER_ERROR) {
272 *error = req->internal.error;
274 *state = req->internal.state;
275 return true;
278 static void tevent_req_timedout(struct tevent_context *ev,
279 struct tevent_timer *te,
280 struct timeval now,
281 void *private_data)
283 struct tevent_req *req =
284 talloc_get_type_abort(private_data,
285 struct tevent_req);
287 TALLOC_FREE(req->internal.timer);
289 tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
292 bool tevent_req_set_endtime(struct tevent_req *req,
293 struct tevent_context *ev,
294 struct timeval endtime)
296 TALLOC_FREE(req->internal.timer);
298 req->internal.timer = tevent_add_timer(ev, req, endtime,
299 tevent_req_timedout,
300 req);
301 if (tevent_req_nomem(req->internal.timer, req)) {
302 return false;
305 return true;
308 void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt)
310 req->async.fn = fn;
311 req->async.private_data = pvt;
314 void *_tevent_req_callback_data(struct tevent_req *req)
316 return req->async.private_data;
319 void *_tevent_req_data(struct tevent_req *req)
321 return req->data;
324 void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
326 req->private_print = fn;
329 void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
331 req->private_cancel = fn;
334 bool _tevent_req_cancel(struct tevent_req *req, const char *location)
336 if (req->private_cancel == NULL) {
337 return false;
340 return req->private_cancel(req);
343 void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
345 req->private_cleanup.state = req->internal.state;
346 req->private_cleanup.fn = fn;