2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 struct tevent_queue_entry
{
31 struct tevent_queue_entry
*prev
, *next
;
32 struct tevent_queue
*queue
;
36 struct tevent_req
*req
;
37 struct tevent_context
*ev
;
39 tevent_queue_trigger_fn_t trigger
;
49 struct tevent_immediate
*immediate
;
52 struct tevent_queue_entry
*list
;
55 static void tevent_queue_immediate_trigger(struct tevent_context
*ev
,
56 struct tevent_immediate
*im
,
59 static int tevent_queue_entry_destructor(struct tevent_queue_entry
*e
)
61 struct tevent_queue
*q
= e
->queue
;
67 tevent_trace_queue_callback(q
->list
->ev
, e
, TEVENT_EVENT_TRACE_DETACH
);
68 DLIST_REMOVE(q
->list
, e
);
79 if (q
->list
->triggered
) {
83 tevent_schedule_immediate(q
->immediate
,
85 tevent_queue_immediate_trigger
,
91 static int tevent_queue_destructor(struct tevent_queue
*q
)
96 struct tevent_queue_entry
*e
= q
->list
;
103 struct tevent_queue
*_tevent_queue_create(TALLOC_CTX
*mem_ctx
,
105 const char *location
)
107 struct tevent_queue
*queue
;
109 queue
= talloc_zero(mem_ctx
, struct tevent_queue
);
114 queue
->name
= talloc_strdup(queue
, name
);
119 queue
->immediate
= tevent_create_immediate(queue
);
120 if (!queue
->immediate
) {
125 queue
->location
= location
;
127 /* queue is running by default */
128 queue
->running
= true;
130 talloc_set_destructor(queue
, tevent_queue_destructor
);
134 static void tevent_queue_immediate_trigger(struct tevent_context
*ev
,
135 struct tevent_immediate
*im
,
138 struct tevent_queue
*q
=
139 talloc_get_type_abort(private_data
,
140 struct tevent_queue
);
150 tevent_trace_queue_callback(ev
, q
->list
,
151 TEVENT_EVENT_TRACE_BEFORE_HANDLER
);
152 q
->list
->triggered
= true;
153 q
->list
->trigger(q
->list
->req
, q
->list
->private_data
);
156 static void tevent_queue_noop_trigger(struct tevent_req
*req
,
159 /* this is doing nothing but blocking the queue */
162 static struct tevent_queue_entry
*tevent_queue_add_internal(
163 struct tevent_queue
*queue
,
164 struct tevent_context
*ev
,
165 struct tevent_req
*req
,
166 tevent_queue_trigger_fn_t trigger
,
170 struct tevent_queue_entry
*e
;
172 e
= talloc_zero(req
, struct tevent_queue_entry
);
178 * if there is no trigger, it is just a blocker
180 if (trigger
== NULL
) {
181 trigger
= tevent_queue_noop_trigger
;
187 e
->trigger
= trigger
;
188 e
->private_data
= private_data
;
190 if (queue
->length
> 0) {
192 * if there are already entries in the
193 * queue do not optimize.
195 allow_direct
= false;
198 if (req
->async
.fn
!= NULL
) {
200 * If the caller wants to optimize for the
201 * empty queue case, call the trigger only
202 * if there is no callback defined for the
205 allow_direct
= false;
208 DLIST_ADD_END(queue
->list
, e
);
210 talloc_set_destructor(e
, tevent_queue_entry_destructor
);
211 tevent_trace_queue_callback(ev
, e
, TEVENT_EVENT_TRACE_ATTACH
);
213 if (!queue
->running
) {
217 if (queue
->list
->triggered
) {
222 * If allowed we directly call the trigger
223 * avoiding possible delays caused by
224 * an immediate event.
227 tevent_trace_queue_callback(ev
,
229 TEVENT_EVENT_TRACE_BEFORE_HANDLER
);
230 queue
->list
->triggered
= true;
231 queue
->list
->trigger(queue
->list
->req
,
232 queue
->list
->private_data
);
236 tevent_schedule_immediate(queue
->immediate
,
238 tevent_queue_immediate_trigger
,
244 bool tevent_queue_add(struct tevent_queue
*queue
,
245 struct tevent_context
*ev
,
246 struct tevent_req
*req
,
247 tevent_queue_trigger_fn_t trigger
,
250 struct tevent_queue_entry
*e
;
252 e
= tevent_queue_add_internal(queue
, ev
, req
,
253 trigger
, private_data
, false);
261 struct tevent_queue_entry
*tevent_queue_add_entry(
262 struct tevent_queue
*queue
,
263 struct tevent_context
*ev
,
264 struct tevent_req
*req
,
265 tevent_queue_trigger_fn_t trigger
,
268 return tevent_queue_add_internal(queue
, ev
, req
,
269 trigger
, private_data
, false);
272 struct tevent_queue_entry
*tevent_queue_add_optimize_empty(
273 struct tevent_queue
*queue
,
274 struct tevent_context
*ev
,
275 struct tevent_req
*req
,
276 tevent_queue_trigger_fn_t trigger
,
279 return tevent_queue_add_internal(queue
, ev
, req
,
280 trigger
, private_data
, true);
283 void tevent_queue_entry_untrigger(struct tevent_queue_entry
*entry
)
285 if (entry
->queue
->running
) {
289 if (entry
->queue
->list
!= entry
) {
293 entry
->triggered
= false;
296 void tevent_queue_start(struct tevent_queue
*queue
)
298 if (queue
->running
) {
299 /* already started */
303 queue
->running
= true;
309 if (queue
->list
->triggered
) {
313 tevent_schedule_immediate(queue
->immediate
,
315 tevent_queue_immediate_trigger
,
319 void tevent_queue_stop(struct tevent_queue
*queue
)
321 queue
->running
= false;
324 size_t tevent_queue_length(struct tevent_queue
*queue
)
326 return queue
->length
;
329 bool tevent_queue_running(struct tevent_queue
*queue
)
331 return queue
->running
;
334 struct tevent_queue_wait_state
{
338 static void tevent_queue_wait_trigger(struct tevent_req
*req
,
341 struct tevent_req
*tevent_queue_wait_send(TALLOC_CTX
*mem_ctx
,
342 struct tevent_context
*ev
,
343 struct tevent_queue
*queue
)
345 struct tevent_req
*req
;
346 struct tevent_queue_wait_state
*state
;
349 req
= tevent_req_create(mem_ctx
, &state
,
350 struct tevent_queue_wait_state
);
355 ok
= tevent_queue_add(queue
, ev
, req
,
356 tevent_queue_wait_trigger
,
360 return tevent_req_post(req
, ev
);
366 static void tevent_queue_wait_trigger(struct tevent_req
*req
,
369 tevent_req_done(req
);
372 bool tevent_queue_wait_recv(struct tevent_req
*req
)
374 enum tevent_req_state state
;
377 if (tevent_req_is_error(req
, &state
, &err
)) {
378 tevent_req_received(req
);
382 tevent_req_received(req
);
386 void tevent_queue_entry_set_tag(struct tevent_queue_entry
*qe
, uint64_t tag
)
395 uint64_t tevent_queue_entry_get_tag(const struct tevent_queue_entry
*qe
)