selftest: don't hardcode '.python3' for extra-python tests
[Samba.git] / lib / tevent / tevent_queue.c
blob9c3973b731e5afc9f94962af5ae28e8482ce92e9
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 struct tevent_queue_entry {
31 struct tevent_queue_entry *prev, *next;
32 struct tevent_queue *queue;
34 bool triggered;
36 struct tevent_req *req;
37 struct tevent_context *ev;
39 tevent_queue_trigger_fn_t trigger;
40 void *private_data;
43 struct tevent_queue {
44 const char *name;
45 const char *location;
47 bool running;
48 struct tevent_immediate *immediate;
50 size_t length;
51 struct tevent_queue_entry *list;
54 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
55 struct tevent_immediate *im,
56 void *private_data);
58 static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
60 struct tevent_queue *q = e->queue;
62 if (!q) {
63 return 0;
66 DLIST_REMOVE(q->list, e);
67 q->length--;
69 if (!q->running) {
70 return 0;
73 if (!q->list) {
74 return 0;
77 if (q->list->triggered) {
78 return 0;
81 tevent_schedule_immediate(q->immediate,
82 q->list->ev,
83 tevent_queue_immediate_trigger,
84 q);
86 return 0;
89 static int tevent_queue_destructor(struct tevent_queue *q)
91 q->running = false;
93 while (q->list) {
94 struct tevent_queue_entry *e = q->list;
95 talloc_free(e);
98 return 0;
101 struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
102 const char *name,
103 const char *location)
105 struct tevent_queue *queue;
107 queue = talloc_zero(mem_ctx, struct tevent_queue);
108 if (!queue) {
109 return NULL;
112 queue->name = talloc_strdup(queue, name);
113 if (!queue->name) {
114 talloc_free(queue);
115 return NULL;
117 queue->immediate = tevent_create_immediate(queue);
118 if (!queue->immediate) {
119 talloc_free(queue);
120 return NULL;
123 queue->location = location;
125 /* queue is running by default */
126 queue->running = true;
128 talloc_set_destructor(queue, tevent_queue_destructor);
129 return queue;
132 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
133 struct tevent_immediate *im,
134 void *private_data)
136 struct tevent_queue *q =
137 talloc_get_type_abort(private_data,
138 struct tevent_queue);
140 if (!q->running) {
141 return;
144 if (!q->list) {
145 return;
148 q->list->triggered = true;
149 q->list->trigger(q->list->req, q->list->private_data);
152 static struct tevent_queue_entry *tevent_queue_add_internal(
153 struct tevent_queue *queue,
154 struct tevent_context *ev,
155 struct tevent_req *req,
156 tevent_queue_trigger_fn_t trigger,
157 void *private_data,
158 bool allow_direct)
160 struct tevent_queue_entry *e;
162 e = talloc_zero(req, struct tevent_queue_entry);
163 if (e == NULL) {
164 return NULL;
167 e->queue = queue;
168 e->req = req;
169 e->ev = ev;
170 e->trigger = trigger;
171 e->private_data = private_data;
174 * if there is no trigger, it is just a blocker
176 if (trigger == NULL) {
177 e->triggered = true;
180 if (queue->length > 0) {
182 * if there are already entries in the
183 * queue do not optimize.
185 allow_direct = false;
188 if (req->async.fn != NULL) {
190 * If the caller wants to optimize for the
191 * empty queue case, call the trigger only
192 * if there is no callback defined for the
193 * request yet.
195 allow_direct = false;
198 DLIST_ADD_END(queue->list, e);
199 queue->length++;
200 talloc_set_destructor(e, tevent_queue_entry_destructor);
202 if (!queue->running) {
203 return e;
206 if (queue->list->triggered) {
207 return e;
211 * If allowed we directly call the trigger
212 * avoiding possible delays caused by
213 * an immediate event.
215 if (allow_direct) {
216 queue->list->triggered = true;
217 queue->list->trigger(queue->list->req,
218 queue->list->private_data);
219 return e;
222 tevent_schedule_immediate(queue->immediate,
223 queue->list->ev,
224 tevent_queue_immediate_trigger,
225 queue);
227 return e;
230 bool tevent_queue_add(struct tevent_queue *queue,
231 struct tevent_context *ev,
232 struct tevent_req *req,
233 tevent_queue_trigger_fn_t trigger,
234 void *private_data)
236 struct tevent_queue_entry *e;
238 e = tevent_queue_add_internal(queue, ev, req,
239 trigger, private_data, false);
240 if (e == NULL) {
241 return false;
244 return true;
247 struct tevent_queue_entry *tevent_queue_add_entry(
248 struct tevent_queue *queue,
249 struct tevent_context *ev,
250 struct tevent_req *req,
251 tevent_queue_trigger_fn_t trigger,
252 void *private_data)
254 return tevent_queue_add_internal(queue, ev, req,
255 trigger, private_data, false);
258 struct tevent_queue_entry *tevent_queue_add_optimize_empty(
259 struct tevent_queue *queue,
260 struct tevent_context *ev,
261 struct tevent_req *req,
262 tevent_queue_trigger_fn_t trigger,
263 void *private_data)
265 return tevent_queue_add_internal(queue, ev, req,
266 trigger, private_data, true);
269 void tevent_queue_entry_untrigger(struct tevent_queue_entry *entry)
271 if (entry->queue->running) {
272 abort();
275 if (entry->queue->list != entry) {
276 abort();
279 entry->triggered = false;
282 void tevent_queue_start(struct tevent_queue *queue)
284 if (queue->running) {
285 /* already started */
286 return;
289 queue->running = true;
291 if (!queue->list) {
292 return;
295 if (queue->list->triggered) {
296 return;
299 tevent_schedule_immediate(queue->immediate,
300 queue->list->ev,
301 tevent_queue_immediate_trigger,
302 queue);
305 void tevent_queue_stop(struct tevent_queue *queue)
307 queue->running = false;
310 size_t tevent_queue_length(struct tevent_queue *queue)
312 return queue->length;
315 bool tevent_queue_running(struct tevent_queue *queue)
317 return queue->running;
320 struct tevent_queue_wait_state {
321 uint8_t dummy;
324 static void tevent_queue_wait_trigger(struct tevent_req *req,
325 void *private_data);
327 struct tevent_req *tevent_queue_wait_send(TALLOC_CTX *mem_ctx,
328 struct tevent_context *ev,
329 struct tevent_queue *queue)
331 struct tevent_req *req;
332 struct tevent_queue_wait_state *state;
333 bool ok;
335 req = tevent_req_create(mem_ctx, &state,
336 struct tevent_queue_wait_state);
337 if (req == NULL) {
338 return NULL;
341 ok = tevent_queue_add(queue, ev, req,
342 tevent_queue_wait_trigger,
343 NULL);
344 if (!ok) {
345 tevent_req_oom(req);
346 return tevent_req_post(req, ev);
349 return req;
352 static void tevent_queue_wait_trigger(struct tevent_req *req,
353 void *private_data)
355 tevent_req_done(req);
358 bool tevent_queue_wait_recv(struct tevent_req *req)
360 enum tevent_req_state state;
361 uint64_t err;
363 if (tevent_req_is_error(req, &state, &err)) {
364 tevent_req_received(req);
365 return false;
368 tevent_req_received(req);
369 return true;