smbd: Fix some whitespace
[Samba.git] / lib / tevent / tevent_queue.c
blob8f0e6a5c0f9fbba3831bbbd3cf63abff68c8e896
1 /*
2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "tevent.h"
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 #undef tevent_queue_add
31 #undef tevent_queue_add_entry
32 #undef tevent_queue_add_optimize_empty
34 struct tevent_queue_entry {
35 struct tevent_queue_entry *prev, *next;
36 struct tevent_queue *queue;
38 bool triggered;
40 struct tevent_req *req;
41 struct tevent_context *ev;
43 tevent_queue_trigger_fn_t trigger;
44 const char *trigger_name;
45 void *private_data;
46 uint64_t tag;
49 struct tevent_queue {
50 const char *name;
51 const char *location;
53 bool running;
54 struct tevent_immediate *immediate;
56 size_t length;
57 struct tevent_queue_entry *list;
60 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
61 struct tevent_immediate *im,
62 void *private_data);
64 static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
66 struct tevent_queue *q = e->queue;
68 if (!q) {
69 return 0;
72 tevent_trace_queue_callback(q->list->ev, e, TEVENT_EVENT_TRACE_DETACH);
73 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_LEAVE,
74 q->list->req,
75 q->list->req->internal.call_depth,
76 e->trigger_name);
77 DLIST_REMOVE(q->list, e);
78 q->length--;
80 if (!q->running) {
81 return 0;
84 if (!q->list) {
85 return 0;
88 if (q->list->triggered) {
89 return 0;
92 tevent_schedule_immediate(q->immediate,
93 q->list->ev,
94 tevent_queue_immediate_trigger,
95 q);
97 return 0;
100 static int tevent_queue_destructor(struct tevent_queue *q)
102 q->running = false;
104 while (q->list) {
105 struct tevent_queue_entry *e = q->list;
106 talloc_free(e);
109 return 0;
112 struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
113 const char *name,
114 const char *location)
116 struct tevent_queue *queue;
118 queue = talloc_zero(mem_ctx, struct tevent_queue);
119 if (!queue) {
120 return NULL;
123 queue->name = talloc_strdup(queue, name);
124 if (!queue->name) {
125 talloc_free(queue);
126 return NULL;
128 queue->immediate = tevent_create_immediate(queue);
129 if (!queue->immediate) {
130 talloc_free(queue);
131 return NULL;
134 queue->location = location;
136 /* queue is running by default */
137 queue->running = true;
139 talloc_set_destructor(queue, tevent_queue_destructor);
140 return queue;
143 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
144 struct tevent_immediate *im,
145 void *private_data)
147 struct tevent_queue *q =
148 talloc_get_type_abort(private_data,
149 struct tevent_queue);
151 if (!q->running) {
152 return;
155 if (!q->list) {
156 return;
159 tevent_trace_queue_callback(ev, q->list,
160 TEVENT_EVENT_TRACE_BEFORE_HANDLER);
161 /* Set the call depth of the request coming from the queue. */
162 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_TRIGGER,
163 q->list->req,
164 q->list->req->internal.call_depth,
165 q->list->trigger_name);
166 q->list->triggered = true;
167 q->list->trigger(q->list->req, q->list->private_data);
170 static void tevent_queue_noop_trigger(struct tevent_req *req,
171 void *_private_data)
173 /* this is doing nothing but blocking the queue */
176 static struct tevent_queue_entry *tevent_queue_add_internal(
177 struct tevent_queue *queue,
178 struct tevent_context *ev,
179 struct tevent_req *req,
180 tevent_queue_trigger_fn_t trigger,
181 const char *trigger_name,
182 void *private_data,
183 bool allow_direct)
185 struct tevent_queue_entry *e;
187 e = talloc_zero(req, struct tevent_queue_entry);
188 if (e == NULL) {
189 return NULL;
193 * if there is no trigger, it is just a blocker
195 if (trigger == NULL) {
196 trigger = tevent_queue_noop_trigger;
199 e->queue = queue;
200 e->req = req;
201 e->ev = ev;
202 e->trigger = trigger;
203 e->trigger_name = trigger_name;
204 e->private_data = private_data;
206 if (queue->length > 0) {
208 * if there are already entries in the
209 * queue do not optimize.
211 allow_direct = false;
214 if (req->async.fn != NULL) {
216 * If the caller wants to optimize for the
217 * empty queue case, call the trigger only
218 * if there is no callback defined for the
219 * request yet.
221 allow_direct = false;
224 DLIST_ADD_END(queue->list, e);
225 queue->length++;
226 talloc_set_destructor(e, tevent_queue_entry_destructor);
227 tevent_trace_queue_callback(ev, e, TEVENT_EVENT_TRACE_ATTACH);
228 tevent_thread_call_depth_notify(TEVENT_CALL_FLOW_REQ_QUEUE_ENTER,
229 req,
230 req->internal.call_depth,
231 e->trigger_name);
233 if (!queue->running) {
234 return e;
237 if (queue->list->triggered) {
238 return e;
242 * If allowed we directly call the trigger
243 * avoiding possible delays caused by
244 * an immediate event.
246 if (allow_direct) {
247 tevent_trace_queue_callback(ev,
248 queue->list,
249 TEVENT_EVENT_TRACE_BEFORE_HANDLER);
250 queue->list->triggered = true;
251 queue->list->trigger(queue->list->req,
252 queue->list->private_data);
253 return e;
256 tevent_schedule_immediate(queue->immediate,
257 queue->list->ev,
258 tevent_queue_immediate_trigger,
259 queue);
261 return e;
264 bool tevent_queue_add(struct tevent_queue *queue,
265 struct tevent_context *ev,
266 struct tevent_req *req,
267 tevent_queue_trigger_fn_t trigger,
268 void *private_data)
270 return _tevent_queue_add(queue, ev, req, trigger, NULL, private_data);
273 bool _tevent_queue_add(struct tevent_queue *queue,
274 struct tevent_context *ev,
275 struct tevent_req *req,
276 tevent_queue_trigger_fn_t trigger,
277 const char* trigger_name,
278 void *private_data)
280 struct tevent_queue_entry *e;
282 e = tevent_queue_add_internal(queue, ev, req,
283 trigger, trigger_name,
284 private_data, false);
285 if (e == NULL) {
286 return false;
289 return true;
292 struct tevent_queue_entry *tevent_queue_add_entry(
293 struct tevent_queue *queue,
294 struct tevent_context *ev,
295 struct tevent_req *req,
296 tevent_queue_trigger_fn_t trigger,
297 void *private_data)
299 return _tevent_queue_add_entry(queue, ev, req,
300 trigger, NULL,
301 private_data);
304 struct tevent_queue_entry *_tevent_queue_add_entry(
305 struct tevent_queue *queue,
306 struct tevent_context *ev,
307 struct tevent_req *req,
308 tevent_queue_trigger_fn_t trigger,
309 const char* trigger_name,
310 void *private_data)
312 return tevent_queue_add_internal(queue, ev, req,
313 trigger, trigger_name,
314 private_data, false);
317 struct tevent_queue_entry *tevent_queue_add_optimize_empty(
318 struct tevent_queue *queue,
319 struct tevent_context *ev,
320 struct tevent_req *req,
321 tevent_queue_trigger_fn_t trigger,
322 void *private_data)
324 return _tevent_queue_add_optimize_empty(queue, ev, req,
325 trigger, NULL,
326 private_data);
329 struct tevent_queue_entry *_tevent_queue_add_optimize_empty(
330 struct tevent_queue *queue,
331 struct tevent_context *ev,
332 struct tevent_req *req,
333 tevent_queue_trigger_fn_t trigger,
334 const char* trigger_name,
335 void *private_data)
337 return tevent_queue_add_internal(queue, ev, req,
338 trigger, trigger_name,
339 private_data, true);
342 void tevent_queue_entry_untrigger(struct tevent_queue_entry *entry)
344 if (entry->queue->running) {
345 abort();
348 if (entry->queue->list != entry) {
349 abort();
352 entry->triggered = false;
355 void tevent_queue_start(struct tevent_queue *queue)
357 if (queue->running) {
358 /* already started */
359 return;
362 queue->running = true;
364 if (!queue->list) {
365 return;
368 if (queue->list->triggered) {
369 return;
372 tevent_schedule_immediate(queue->immediate,
373 queue->list->ev,
374 tevent_queue_immediate_trigger,
375 queue);
378 void tevent_queue_stop(struct tevent_queue *queue)
380 queue->running = false;
383 size_t tevent_queue_length(struct tevent_queue *queue)
385 return queue->length;
388 bool tevent_queue_running(struct tevent_queue *queue)
390 return queue->running;
393 struct tevent_queue_wait_state {
394 uint8_t dummy;
397 static void tevent_queue_wait_trigger(struct tevent_req *req,
398 void *private_data);
400 struct tevent_req *tevent_queue_wait_send(TALLOC_CTX *mem_ctx,
401 struct tevent_context *ev,
402 struct tevent_queue *queue)
404 struct tevent_req *req;
405 struct tevent_queue_wait_state *state;
406 bool ok;
408 req = tevent_req_create(mem_ctx, &state,
409 struct tevent_queue_wait_state);
410 if (req == NULL) {
411 return NULL;
414 ok = _tevent_queue_add(queue, ev, req,
415 tevent_queue_wait_trigger,
416 "tevent_queue_wait_trigger",
417 NULL);
418 if (!ok) {
419 tevent_req_oom(req);
420 return tevent_req_post(req, ev);
423 return req;
426 static void tevent_queue_wait_trigger(struct tevent_req *req,
427 void *private_data)
429 tevent_req_done(req);
432 bool tevent_queue_wait_recv(struct tevent_req *req)
434 enum tevent_req_state state;
435 uint64_t err;
437 if (tevent_req_is_error(req, &state, &err)) {
438 tevent_req_received(req);
439 return false;
442 tevent_req_received(req);
443 return true;
446 void tevent_queue_entry_set_tag(struct tevent_queue_entry *qe, uint64_t tag)
448 if (qe == NULL) {
449 return;
452 qe->tag = tag;
455 uint64_t tevent_queue_entry_get_tag(const struct tevent_queue_entry *qe)
457 if (qe == NULL) {
458 return 0;
461 return qe->tag;