2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 struct tevent_queue_entry
{
31 struct tevent_queue_entry
*prev
, *next
;
32 struct tevent_queue
*queue
;
36 struct tevent_req
*req
;
37 struct tevent_context
*ev
;
39 tevent_queue_trigger_fn_t trigger
;
48 struct tevent_immediate
*immediate
;
51 struct tevent_queue_entry
*list
;
54 static void tevent_queue_immediate_trigger(struct tevent_context
*ev
,
55 struct tevent_immediate
*im
,
58 static int tevent_queue_entry_destructor(struct tevent_queue_entry
*e
)
60 struct tevent_queue
*q
= e
->queue
;
66 DLIST_REMOVE(q
->list
, e
);
77 if (q
->list
->triggered
) {
81 tevent_schedule_immediate(q
->immediate
,
83 tevent_queue_immediate_trigger
,
89 static int tevent_queue_destructor(struct tevent_queue
*q
)
94 struct tevent_queue_entry
*e
= q
->list
;
101 struct tevent_queue
*_tevent_queue_create(TALLOC_CTX
*mem_ctx
,
103 const char *location
)
105 struct tevent_queue
*queue
;
107 queue
= talloc_zero(mem_ctx
, struct tevent_queue
);
112 queue
->name
= talloc_strdup(queue
, name
);
117 queue
->immediate
= tevent_create_immediate(queue
);
118 if (!queue
->immediate
) {
123 queue
->location
= location
;
125 /* queue is running by default */
126 queue
->running
= true;
128 talloc_set_destructor(queue
, tevent_queue_destructor
);
132 static void tevent_queue_immediate_trigger(struct tevent_context
*ev
,
133 struct tevent_immediate
*im
,
136 struct tevent_queue
*q
=
137 talloc_get_type_abort(private_data
,
138 struct tevent_queue
);
144 q
->list
->triggered
= true;
145 q
->list
->trigger(q
->list
->req
, q
->list
->private_data
);
148 static struct tevent_queue_entry
*tevent_queue_add_internal(
149 struct tevent_queue
*queue
,
150 struct tevent_context
*ev
,
151 struct tevent_req
*req
,
152 tevent_queue_trigger_fn_t trigger
,
156 struct tevent_queue_entry
*e
;
158 e
= talloc_zero(req
, struct tevent_queue_entry
);
166 e
->trigger
= trigger
;
167 e
->private_data
= private_data
;
170 * if there is no trigger, it is just a blocker
172 if (trigger
== NULL
) {
176 if (queue
->length
> 0) {
178 * if there are already entries in the
179 * queue do not optimize.
181 allow_direct
= false;
184 if (req
->async
.fn
!= NULL
) {
186 * If the callers wants to optimize for the
187 * empty queue case, call the trigger only
188 * if there is no callback defined for the
191 allow_direct
= false;
194 DLIST_ADD_END(queue
->list
, e
, struct tevent_queue_entry
*);
196 talloc_set_destructor(e
, tevent_queue_entry_destructor
);
198 if (!queue
->running
) {
202 if (queue
->list
->triggered
) {
207 * If allowed we directly call the trigger
208 * avoiding possible delays caused by
209 * an immediate event.
212 queue
->list
->triggered
= true;
213 queue
->list
->trigger(queue
->list
->req
,
214 queue
->list
->private_data
);
218 tevent_schedule_immediate(queue
->immediate
,
220 tevent_queue_immediate_trigger
,
226 bool tevent_queue_add(struct tevent_queue
*queue
,
227 struct tevent_context
*ev
,
228 struct tevent_req
*req
,
229 tevent_queue_trigger_fn_t trigger
,
232 struct tevent_queue_entry
*e
;
234 e
= tevent_queue_add_internal(queue
, ev
, req
,
235 trigger
, private_data
, false);
243 struct tevent_queue_entry
*tevent_queue_add_entry(
244 struct tevent_queue
*queue
,
245 struct tevent_context
*ev
,
246 struct tevent_req
*req
,
247 tevent_queue_trigger_fn_t trigger
,
250 return tevent_queue_add_internal(queue
, ev
, req
,
251 trigger
, private_data
, false);
254 struct tevent_queue_entry
*tevent_queue_add_optimize_empty(
255 struct tevent_queue
*queue
,
256 struct tevent_context
*ev
,
257 struct tevent_req
*req
,
258 tevent_queue_trigger_fn_t trigger
,
261 return tevent_queue_add_internal(queue
, ev
, req
,
262 trigger
, private_data
, true);
265 void tevent_queue_start(struct tevent_queue
*queue
)
267 if (queue
->running
) {
268 /* already started */
272 queue
->running
= true;
278 if (queue
->list
->triggered
) {
282 tevent_schedule_immediate(queue
->immediate
,
284 tevent_queue_immediate_trigger
,
288 void tevent_queue_stop(struct tevent_queue
*queue
)
290 queue
->running
= false;
293 size_t tevent_queue_length(struct tevent_queue
*queue
)
295 return queue
->length
;
298 bool tevent_queue_running(struct tevent_queue
*queue
)
300 return queue
->running
;
303 struct tevent_queue_wait_state
{
307 static void tevent_queue_wait_trigger(struct tevent_req
*req
,
310 struct tevent_req
*tevent_queue_wait_send(TALLOC_CTX
*mem_ctx
,
311 struct tevent_context
*ev
,
312 struct tevent_queue
*queue
)
314 struct tevent_req
*req
;
315 struct tevent_queue_wait_state
*state
;
318 req
= tevent_req_create(mem_ctx
, &state
,
319 struct tevent_queue_wait_state
);
324 ok
= tevent_queue_add(queue
, ev
, req
,
325 tevent_queue_wait_trigger
,
329 return tevent_req_post(req
, ev
);
335 static void tevent_queue_wait_trigger(struct tevent_req
*req
,
338 tevent_req_done(req
);
341 bool tevent_queue_wait_recv(struct tevent_req
*req
)
343 enum tevent_req_state state
;
346 if (tevent_req_is_error(req
, &state
, &err
)) {
347 tevent_req_received(req
);
351 tevent_req_received(req
);