2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
42 * A DLIST for disabled fde's.
44 struct tevent_fd
*disabled
;
46 * one or more events were deleted or disabled
51 * These two arrays are maintained together.
54 struct tevent_fd
**fdes
;
58 * Signal fd to wake the poll() thread
63 static int poll_event_context_destructor(struct poll_event_context
*poll_ev
)
65 struct tevent_fd
*fd
, *fn
;
67 for (fd
= poll_ev
->fresh
; fd
; fd
= fn
) {
70 DLIST_REMOVE(poll_ev
->fresh
, fd
);
73 for (fd
= poll_ev
->disabled
; fd
; fd
= fn
) {
76 DLIST_REMOVE(poll_ev
->disabled
, fd
);
79 if (poll_ev
->signal_fd
== -1) {
81 * Non-threaded, no signal pipe
86 close(poll_ev
->signal_fd
);
87 poll_ev
->signal_fd
= -1;
89 if (poll_ev
->num_fds
== 0) {
92 if (poll_ev
->fds
[0].fd
!= -1) {
93 close(poll_ev
->fds
[0].fd
);
94 poll_ev
->fds
[0].fd
= -1;
100 create a poll_event_context structure.
102 static int poll_event_context_init(struct tevent_context
*ev
)
104 struct poll_event_context
*poll_ev
;
107 * we might be called during tevent_re_initialise()
108 * which means we need to free our old additional_data
109 * in order to detach old fd events from the
110 * poll_ev->fresh list
112 TALLOC_FREE(ev
->additional_data
);
114 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
115 if (poll_ev
== NULL
) {
119 poll_ev
->signal_fd
= -1;
120 ev
->additional_data
= poll_ev
;
121 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
125 static bool set_nonblock(int fd
)
129 val
= fcntl(fd
, F_GETFL
, 0);
135 return (fcntl(fd
, F_SETFL
, val
) != -1);
138 static int poll_event_context_init_mt(struct tevent_context
*ev
)
140 struct poll_event_context
*poll_ev
;
145 ret
= poll_event_context_init(ev
);
150 poll_ev
= talloc_get_type_abort(
151 ev
->additional_data
, struct poll_event_context
);
153 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
154 if (poll_ev
->fds
== NULL
) {
163 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
169 poll_ev
->signal_fd
= fds
[1];
171 pfd
= &poll_ev
->fds
[0];
173 pfd
->events
= (POLLIN
|POLLHUP
);
175 poll_ev
->num_fds
= 1;
177 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
182 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
187 if (poll_ev
->signal_fd
== -1) {
192 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
193 } while ((ret
== -1) && (errno
== EINTR
));
196 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
202 if (poll_ev
->signal_fd
== -1) {
206 if (poll_ev
->num_fds
< 1) {
209 fd
= poll_ev
->fds
[0].fd
;
212 ret
= read(fd
, buf
, sizeof(buf
));
213 } while (ret
== sizeof(buf
));
219 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
221 struct tevent_context
*ev
= fde
->event_ctx
;
222 struct poll_event_context
*poll_ev
;
223 uint64_t del_idx
= fde
->additional_flags
;
229 poll_ev
= talloc_get_type_abort(
230 ev
->additional_data
, struct poll_event_context
);
232 if (del_idx
== UINT64_MAX
) {
233 struct tevent_fd
**listp
=
234 (struct tevent_fd
**)fde
->additional_data
;
236 DLIST_REMOVE((*listp
), fde
);
240 poll_ev
->fdes
[del_idx
] = NULL
;
241 poll_ev
->deleted
= true;
242 poll_event_wake_pollthread(poll_ev
);
244 return tevent_common_fd_destructor(fde
);
247 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
248 struct tevent_context
*ev
,
249 tevent_immediate_handler_t handler
,
251 const char *handler_name
,
252 const char *location
)
254 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
255 ev
->additional_data
, struct poll_event_context
);
257 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
258 handler_name
, location
);
259 poll_event_wake_pollthread(poll_ev
);
263 Private function called by "standard" backend fallback.
264 Note this only allows fallback to "poll" backend, not "poll-mt".
266 _PRIVATE_
void tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
267 struct tevent_fd
*fde
)
269 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
270 ev
->additional_data
, struct poll_event_context
);
271 struct tevent_fd
**listp
;
273 if (fde
->flags
!= 0) {
274 listp
= &poll_ev
->fresh
;
276 listp
= &poll_ev
->disabled
;
279 fde
->additional_flags
= UINT64_MAX
;
280 fde
->additional_data
= listp
;
282 DLIST_ADD((*listp
), fde
);
283 talloc_set_destructor(fde
, poll_event_fd_destructor
);
288 return NULL on failure (memory allocation error)
290 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
292 int fd
, uint16_t flags
,
293 tevent_fd_handler_t handler
,
295 const char *handler_name
,
296 const char *location
)
298 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
299 ev
->additional_data
, struct poll_event_context
);
300 struct tevent_fd
*fde
;
306 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
313 fde
->handler
= handler
;
314 fde
->close_fn
= NULL
;
315 fde
->private_data
= private_data
;
316 fde
->handler_name
= handler_name
;
317 fde
->location
= location
;
318 fde
->additional_flags
= UINT64_MAX
;
319 fde
->additional_data
= NULL
;
321 tevent_poll_event_add_fd_internal(ev
, fde
);
322 poll_event_wake_pollthread(poll_ev
);
325 * poll_event_loop_poll will take care of the rest in
326 * poll_event_setup_fresh
332 set the fd event flags
334 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
336 struct tevent_context
*ev
= fde
->event_ctx
;
337 struct poll_event_context
*poll_ev
;
338 uint64_t idx
= fde
->additional_flags
;
344 poll_ev
= talloc_get_type_abort(
345 ev
->additional_data
, struct poll_event_context
);
349 if (idx
== UINT64_MAX
) {
350 struct tevent_fd
**listp
=
351 (struct tevent_fd
**)fde
->additional_data
;
354 * We move it between the fresh and disabled lists.
356 DLIST_REMOVE((*listp
), fde
);
357 tevent_poll_event_add_fd_internal(ev
, fde
);
358 poll_event_wake_pollthread(poll_ev
);
362 if (fde
->flags
== 0) {
364 * We need to remove it from the array
365 * and move it to the disabled list.
367 poll_ev
->fdes
[idx
] = NULL
;
368 poll_ev
->deleted
= true;
369 DLIST_REMOVE(ev
->fd_events
, fde
);
370 tevent_poll_event_add_fd_internal(ev
, fde
);
371 poll_event_wake_pollthread(poll_ev
);
377 if (flags
& TEVENT_FD_READ
) {
378 pollflags
|= (POLLIN
|POLLHUP
);
380 if (flags
& TEVENT_FD_WRITE
) {
381 pollflags
|= (POLLOUT
);
383 poll_ev
->fds
[idx
].events
= pollflags
;
385 poll_event_wake_pollthread(poll_ev
);
388 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
389 struct poll_event_context
*poll_ev
)
391 struct tevent_fd
*fde
, *next
;
392 unsigned num_fresh
, num_fds
;
394 if (poll_ev
->deleted
) {
395 unsigned first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
398 for (i
=first_fd
; i
< poll_ev
->num_fds
;) {
399 fde
= poll_ev
->fdes
[i
];
406 * This fde was talloc_free()'ed. Delete it
409 poll_ev
->num_fds
-= 1;
410 if (poll_ev
->num_fds
== i
) {
413 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
414 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
415 if (poll_ev
->fdes
[i
] != NULL
) {
416 poll_ev
->fdes
[i
]->additional_flags
= i
;
419 poll_ev
->deleted
= false;
422 if (poll_ev
->fresh
== NULL
) {
427 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
430 num_fds
= poll_ev
->num_fds
+ num_fresh
;
433 * We check the length of fdes here. It is the last one
434 * enlarged, so if the realloc for poll_fd->fdes fails,
435 * poll_fd->fds will have at least the size of poll_fd->fdes
438 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
439 struct pollfd
*tmp_fds
;
440 struct tevent_fd
**tmp_fdes
;
441 unsigned array_length
;
443 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
445 tmp_fds
= talloc_realloc(
446 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
447 if (tmp_fds
== NULL
) {
450 poll_ev
->fds
= tmp_fds
;
452 tmp_fdes
= talloc_realloc(
453 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
455 if (tmp_fdes
== NULL
) {
458 poll_ev
->fdes
= tmp_fdes
;
461 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
464 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
470 if (fde
->flags
& TEVENT_FD_READ
) {
471 pfd
->events
|= (POLLIN
|POLLHUP
);
473 if (fde
->flags
& TEVENT_FD_WRITE
) {
474 pfd
->events
|= (POLLOUT
);
477 fde
->additional_flags
= poll_ev
->num_fds
;
478 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
481 DLIST_REMOVE(poll_ev
->fresh
, fde
);
482 DLIST_ADD(ev
->fd_events
, fde
);
484 poll_ev
->num_fds
+= 1;
490 event loop handling using poll()
492 static int poll_event_loop_poll(struct tevent_context
*ev
,
493 struct timeval
*tvalp
)
495 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
496 ev
->additional_data
, struct poll_event_context
);
500 struct tevent_fd
*fde
= NULL
;
503 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
508 timeout
= tvalp
->tv_sec
* 1000;
509 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
512 poll_event_drain_signal_fd(poll_ev
);
514 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
518 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
519 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
521 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
523 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
524 tevent_common_check_signal(ev
);
528 if (pollrtn
== 0 && tvalp
) {
529 /* we don't care about a possible delay here */
530 tevent_common_loop_timer_delay(ev
);
541 /* at least one file descriptor is ready - check
542 which ones and call the handler, being careful to allow
543 the handler to remove itself when called */
545 for (fde
= ev
->fd_events
; fde
; fde
= fde
->next
) {
546 uint64_t idx
= fde
->additional_flags
;
550 if (idx
== UINT64_MAX
) {
554 pfd
= &poll_ev
->fds
[idx
];
556 if (pfd
->revents
& POLLNVAL
) {
558 * the socket is dead! this should never
559 * happen as the socket should have first been
560 * made readable and that should have removed
561 * the event, so this must be a bug.
563 * We ignore it here to match the epoll
566 tevent_debug(ev
, TEVENT_DEBUG_ERROR
,
567 "POLLNVAL on fde[%p] fd[%d] - disabling\n",
569 poll_ev
->fdes
[idx
] = NULL
;
570 poll_ev
->deleted
= true;
571 DLIST_REMOVE(ev
->fd_events
, fde
);
572 fde
->event_ctx
= NULL
;
576 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
577 /* If we only wait for TEVENT_FD_WRITE, we
578 should not tell the event handler about it,
579 and remove the writable flag, as we only
580 report errors when waiting for read events
581 to match the select behavior. */
582 if (!(fde
->flags
& TEVENT_FD_READ
)) {
583 TEVENT_FD_NOT_WRITEABLE(fde
);
586 flags
|= TEVENT_FD_READ
;
588 if (pfd
->revents
& POLLIN
) {
589 flags
|= TEVENT_FD_READ
;
591 if (pfd
->revents
& POLLOUT
) {
592 flags
|= TEVENT_FD_WRITE
;
595 * Note that fde->flags could be changed when using
596 * the poll_mt backend together with threads,
597 * that why we need to check pfd->revents and fde->flags
601 DLIST_DEMOTE(ev
->fd_events
, fde
, struct tevent_fd
);
602 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
607 for (i
= 0; i
< poll_ev
->num_fds
; i
++) {
608 if (poll_ev
->fds
[i
].revents
& POLLNVAL
) {
610 * the socket is dead! this should never
611 * happen as the socket should have first been
612 * made readable and that should have removed
613 * the event, so this must be a bug or
614 * a race in the poll_mt usage.
616 fde
= poll_ev
->fdes
[i
];
617 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
618 "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n",
619 poll_ev
->fds
[i
].fd
, fde
);
620 poll_ev
->fdes
[i
] = NULL
;
621 poll_ev
->deleted
= true;
623 DLIST_REMOVE(ev
->fd_events
, fde
);
624 fde
->event_ctx
= NULL
;
633 do a single event loop using the events defined in ev
635 static int poll_event_loop_once(struct tevent_context
*ev
,
636 const char *location
)
640 if (ev
->signal_events
&&
641 tevent_common_check_signal(ev
)) {
645 if (ev
->immediate_events
&&
646 tevent_common_loop_immediate(ev
)) {
650 tval
= tevent_common_loop_timer_delay(ev
);
651 if (tevent_timeval_is_zero(&tval
)) {
655 return poll_event_loop_poll(ev
, &tval
);
658 static int poll_event_loop_wait(struct tevent_context
*ev
,
659 const char *location
)
661 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
662 ev
->additional_data
, struct poll_event_context
);
665 * loop as long as we have events pending
667 while (ev
->fd_events
||
669 ev
->immediate_events
||
674 ret
= _tevent_loop_once(ev
, location
);
676 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
677 "_tevent_loop_once() failed: %d - %s\n",
678 ret
, strerror(errno
));
683 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
684 "poll_event_loop_wait() out of events\n");
688 static const struct tevent_ops poll_event_ops
= {
689 .context_init
= poll_event_context_init
,
690 .add_fd
= poll_event_add_fd
,
691 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
692 .get_fd_flags
= tevent_common_fd_get_flags
,
693 .set_fd_flags
= poll_event_set_fd_flags
,
694 .add_timer
= tevent_common_add_timer_v2
,
695 .schedule_immediate
= tevent_common_schedule_immediate
,
696 .add_signal
= tevent_common_add_signal
,
697 .loop_once
= poll_event_loop_once
,
698 .loop_wait
= poll_event_loop_wait
,
701 _PRIVATE_
bool tevent_poll_init(void)
703 return tevent_register_backend("poll", &poll_event_ops
);
706 static const struct tevent_ops poll_event_mt_ops
= {
707 .context_init
= poll_event_context_init_mt
,
708 .add_fd
= poll_event_add_fd
,
709 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
710 .get_fd_flags
= tevent_common_fd_get_flags
,
711 .set_fd_flags
= poll_event_set_fd_flags
,
712 .add_timer
= tevent_common_add_timer_v2
,
713 .schedule_immediate
= poll_event_schedule_immediate
,
714 .add_signal
= tevent_common_add_signal
,
715 .loop_once
= poll_event_loop_once
,
716 .loop_wait
= poll_event_loop_wait
,
719 _PRIVATE_
bool tevent_poll_mt_init(void)
721 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);