2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
43 * These two arrays are maintained together.
46 struct tevent_fd
**fdes
;
50 * Signal fd to wake the poll() thread
54 /* information for exiting from the event loop */
58 static int poll_event_context_destructor(struct poll_event_context
*poll_ev
)
60 struct tevent_fd
*fd
, *fn
;
62 for (fd
= poll_ev
->fresh
; fd
; fd
= fn
) {
65 DLIST_REMOVE(poll_ev
->fresh
, fd
);
68 if (poll_ev
->signal_fd
== -1) {
70 * Non-threaded, no signal pipe
75 close(poll_ev
->signal_fd
);
76 poll_ev
->signal_fd
= -1;
78 if (poll_ev
->num_fds
== 0) {
81 if (poll_ev
->fds
[0].fd
!= -1) {
82 close(poll_ev
->fds
[0].fd
);
83 poll_ev
->fds
[0].fd
= -1;
89 create a poll_event_context structure.
91 static int poll_event_context_init(struct tevent_context
*ev
)
93 struct poll_event_context
*poll_ev
;
96 * we might be called during tevent_re_initialise()
97 * which means we need to free our old additional_data
98 * in order to detach old fd events from the
101 TALLOC_FREE(ev
->additional_data
);
103 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
104 if (poll_ev
== NULL
) {
108 poll_ev
->signal_fd
= -1;
109 ev
->additional_data
= poll_ev
;
110 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
114 static bool set_nonblock(int fd
)
118 val
= fcntl(fd
, F_GETFL
, 0);
124 return (fcntl(fd
, F_SETFL
, val
) != -1);
127 static int poll_event_context_init_mt(struct tevent_context
*ev
)
129 struct poll_event_context
*poll_ev
;
134 ret
= poll_event_context_init(ev
);
139 poll_ev
= talloc_get_type_abort(
140 ev
->additional_data
, struct poll_event_context
);
142 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
143 if (poll_ev
->fds
== NULL
) {
152 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
158 poll_ev
->signal_fd
= fds
[1];
160 pfd
= &poll_ev
->fds
[0];
162 pfd
->events
= (POLLIN
|POLLHUP
);
164 poll_ev
->num_fds
= 1;
166 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
171 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
176 if (poll_ev
->signal_fd
== -1) {
181 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
182 } while ((ret
== -1) && (errno
== EINTR
));
185 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
191 if (poll_ev
->signal_fd
== -1) {
195 if (poll_ev
->num_fds
< 1) {
198 fd
= poll_ev
->fds
[0].fd
;
201 ret
= read(fd
, buf
, sizeof(buf
));
202 } while (ret
== sizeof(buf
));
208 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
210 struct tevent_context
*ev
= fde
->event_ctx
;
211 struct poll_event_context
*poll_ev
;
212 uint64_t del_idx
= fde
->additional_flags
;
218 poll_ev
= talloc_get_type_abort(
219 ev
->additional_data
, struct poll_event_context
);
221 poll_ev
->fdes
[del_idx
] = NULL
;
222 poll_event_wake_pollthread(poll_ev
);
224 return tevent_common_fd_destructor(fde
);
227 static int poll_fresh_fde_destructor(struct tevent_fd
*fde
)
229 struct tevent_context
*ev
= fde
->event_ctx
;
230 struct poll_event_context
*poll_ev
;
235 poll_ev
= talloc_get_type_abort(
236 ev
->additional_data
, struct poll_event_context
);
238 DLIST_REMOVE(poll_ev
->fresh
, fde
);
240 return tevent_common_fd_destructor(fde
);
243 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
244 struct tevent_context
*ev
,
245 tevent_immediate_handler_t handler
,
247 const char *handler_name
,
248 const char *location
)
250 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
251 ev
->additional_data
, struct poll_event_context
);
253 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
254 handler_name
, location
);
255 poll_event_wake_pollthread(poll_ev
);
259 Private function called by "standard" backend fallback.
260 Note this only allows fallback to "poll" backend, not "poll-mt".
262 _PRIVATE_
void tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
263 struct tevent_fd
*fde
)
265 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
266 ev
->additional_data
, struct poll_event_context
);
268 fde
->additional_flags
= UINT64_MAX
;
269 fde
->additional_data
= NULL
;
270 DLIST_ADD(poll_ev
->fresh
, fde
);
271 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
276 return NULL on failure (memory allocation error)
278 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
280 int fd
, uint16_t flags
,
281 tevent_fd_handler_t handler
,
283 const char *handler_name
,
284 const char *location
)
286 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
287 ev
->additional_data
, struct poll_event_context
);
288 struct tevent_fd
*fde
;
294 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
301 fde
->handler
= handler
;
302 fde
->close_fn
= NULL
;
303 fde
->private_data
= private_data
;
304 fde
->handler_name
= handler_name
;
305 fde
->location
= location
;
306 fde
->additional_flags
= UINT64_MAX
;
307 fde
->additional_data
= NULL
;
309 DLIST_ADD(poll_ev
->fresh
, fde
);
310 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
311 poll_event_wake_pollthread(poll_ev
);
314 * poll_event_loop_poll will take care of the rest in
315 * poll_event_setup_fresh
321 set the fd event flags
323 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
325 struct tevent_context
*ev
= fde
->event_ctx
;
326 struct poll_event_context
*poll_ev
;
327 uint64_t idx
= fde
->additional_flags
;
333 poll_ev
= talloc_get_type_abort(
334 ev
->additional_data
, struct poll_event_context
);
338 if (idx
== UINT64_MAX
) {
340 * poll_event_setup_fresh not yet called after this fde was
341 * added. We don't have to do anything to transfer the changed
342 * flags to the array passed to poll(2)
349 if (flags
& TEVENT_FD_READ
) {
350 pollflags
|= (POLLIN
|POLLHUP
);
352 if (flags
& TEVENT_FD_WRITE
) {
353 pollflags
|= (POLLOUT
);
355 poll_ev
->fds
[idx
].events
= pollflags
;
357 poll_event_wake_pollthread(poll_ev
);
360 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
361 struct poll_event_context
*poll_ev
)
363 struct tevent_fd
*fde
, *next
;
364 unsigned num_fresh
, num_fds
;
366 if (poll_ev
->fresh
== NULL
) {
371 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
374 num_fds
= poll_ev
->num_fds
+ num_fresh
;
377 * We check the length of fdes here. It is the last one
378 * enlarged, so if the realloc for poll_fd->fdes fails,
379 * poll_fd->fds will have at least the size of poll_fd->fdes
382 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
383 struct pollfd
*tmp_fds
;
384 struct tevent_fd
**tmp_fdes
;
385 unsigned array_length
;
387 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
389 tmp_fds
= talloc_realloc(
390 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
391 if (tmp_fds
== NULL
) {
394 poll_ev
->fds
= tmp_fds
;
396 tmp_fdes
= talloc_realloc(
397 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
399 if (tmp_fdes
== NULL
) {
402 poll_ev
->fdes
= tmp_fdes
;
405 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
408 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
414 if (fde
->flags
& TEVENT_FD_READ
) {
415 pfd
->events
|= (POLLIN
|POLLHUP
);
417 if (fde
->flags
& TEVENT_FD_WRITE
) {
418 pfd
->events
|= (POLLOUT
);
421 fde
->additional_flags
= poll_ev
->num_fds
;
422 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
425 DLIST_REMOVE(poll_ev
->fresh
, fde
);
426 DLIST_ADD(ev
->fd_events
, fde
);
428 talloc_set_destructor(fde
, poll_event_fd_destructor
);
430 poll_ev
->num_fds
+= 1;
436 event loop handling using poll()
438 static int poll_event_loop_poll(struct tevent_context
*ev
,
439 struct timeval
*tvalp
)
441 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
442 ev
->additional_data
, struct poll_event_context
);
449 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
454 timeout
= tvalp
->tv_sec
* 1000;
455 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
458 poll_event_drain_signal_fd(poll_ev
);
460 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
464 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
465 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
467 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
469 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
470 tevent_common_check_signal(ev
);
474 if (pollrtn
== 0 && tvalp
) {
475 /* we don't care about a possible delay here */
476 tevent_common_loop_timer_delay(ev
);
487 first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
489 /* at least one file descriptor is ready - check
490 which ones and call the handler, being careful to allow
491 the handler to remove itself when called */
493 for (i
=first_fd
; i
<poll_ev
->num_fds
; i
= next_i
) {
495 struct tevent_fd
*fde
;
500 fde
= poll_ev
->fdes
[i
];
503 * This fde was talloc_free()'ed. Delete it
506 poll_ev
->num_fds
-= 1;
507 if (poll_ev
->num_fds
== i
) {
510 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
511 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
512 if (poll_ev
->fdes
[i
] != NULL
) {
513 poll_ev
->fdes
[i
]->additional_flags
= i
;
515 /* we have to reprocess position 'i' */
520 pfd
= &poll_ev
->fds
[i
];
522 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
523 /* If we only wait for TEVENT_FD_WRITE, we
524 should not tell the event handler about it,
525 and remove the writable flag, as we only
526 report errors when waiting for read events
527 to match the select behavior. */
528 if (!(fde
->flags
& TEVENT_FD_READ
)) {
529 TEVENT_FD_NOT_WRITEABLE(fde
);
532 flags
|= TEVENT_FD_READ
;
534 if (pfd
->revents
& POLLIN
) {
535 flags
|= TEVENT_FD_READ
;
537 if (pfd
->revents
& POLLOUT
) {
538 flags
|= TEVENT_FD_WRITE
;
541 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
550 do a single event loop using the events defined in ev
552 static int poll_event_loop_once(struct tevent_context
*ev
,
553 const char *location
)
557 if (ev
->signal_events
&&
558 tevent_common_check_signal(ev
)) {
562 if (ev
->immediate_events
&&
563 tevent_common_loop_immediate(ev
)) {
567 tval
= tevent_common_loop_timer_delay(ev
);
568 if (tevent_timeval_is_zero(&tval
)) {
572 return poll_event_loop_poll(ev
, &tval
);
575 static int poll_event_loop_wait(struct tevent_context
*ev
,
576 const char *location
)
578 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
579 ev
->additional_data
, struct poll_event_context
);
582 * loop as long as we have events pending
584 while (ev
->fd_events
||
586 ev
->immediate_events
||
590 ret
= _tevent_loop_once(ev
, location
);
592 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
593 "_tevent_loop_once() failed: %d - %s\n",
594 ret
, strerror(errno
));
599 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
600 "poll_event_loop_wait() out of events\n");
604 static const struct tevent_ops poll_event_ops
= {
605 .context_init
= poll_event_context_init
,
606 .add_fd
= poll_event_add_fd
,
607 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
608 .get_fd_flags
= tevent_common_fd_get_flags
,
609 .set_fd_flags
= poll_event_set_fd_flags
,
610 .add_timer
= tevent_common_add_timer
,
611 .schedule_immediate
= tevent_common_schedule_immediate
,
612 .add_signal
= tevent_common_add_signal
,
613 .loop_once
= poll_event_loop_once
,
614 .loop_wait
= poll_event_loop_wait
,
617 _PRIVATE_
bool tevent_poll_init(void)
619 return tevent_register_backend("poll", &poll_event_ops
);
622 static const struct tevent_ops poll_event_mt_ops
= {
623 .context_init
= poll_event_context_init_mt
,
624 .add_fd
= poll_event_add_fd
,
625 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
626 .get_fd_flags
= tevent_common_fd_get_flags
,
627 .set_fd_flags
= poll_event_set_fd_flags
,
628 .add_timer
= tevent_common_add_timer
,
629 .schedule_immediate
= poll_event_schedule_immediate
,
630 .add_signal
= tevent_common_add_signal
,
631 .loop_once
= poll_event_loop_once
,
632 .loop_wait
= poll_event_loop_wait
,
635 _PRIVATE_
bool tevent_poll_mt_init(void)
637 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);