2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
44 * These two arrays are maintained together.
47 struct tevent_fd
**fdes
;
51 * Signal fd to wake the poll() thread
55 /* information for exiting from the event loop */
59 static int poll_event_context_destructor(struct poll_event_context
*poll_ev
)
61 struct tevent_fd
*fd
, *fn
;
63 for (fd
= poll_ev
->fresh
; fd
; fd
= fn
) {
66 DLIST_REMOVE(poll_ev
->fresh
, fd
);
69 if (poll_ev
->signal_fd
== -1) {
71 * Non-threaded, no signal pipe
76 close(poll_ev
->signal_fd
);
77 poll_ev
->signal_fd
= -1;
79 if (poll_ev
->num_fds
== 0) {
82 if (poll_ev
->fds
[0].fd
!= -1) {
83 close(poll_ev
->fds
[0].fd
);
84 poll_ev
->fds
[0].fd
= -1;
90 create a poll_event_context structure.
92 static int poll_event_context_init(struct tevent_context
*ev
)
94 struct poll_event_context
*poll_ev
;
97 * we might be called during tevent_re_initialise()
98 * which means we need to free our old additional_data
99 * in order to detach old fd events from the
100 * poll_ev->fresh list
102 TALLOC_FREE(ev
->additional_data
);
104 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
105 if (poll_ev
== NULL
) {
109 poll_ev
->signal_fd
= -1;
110 ev
->additional_data
= poll_ev
;
111 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
115 static bool set_nonblock(int fd
)
119 val
= fcntl(fd
, F_GETFL
, 0);
125 return (fcntl(fd
, F_SETFL
, val
) != -1);
128 static int poll_event_context_init_mt(struct tevent_context
*ev
)
130 struct poll_event_context
*poll_ev
;
135 ret
= poll_event_context_init(ev
);
140 poll_ev
= talloc_get_type_abort(
141 ev
->additional_data
, struct poll_event_context
);
143 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
144 if (poll_ev
->fds
== NULL
) {
153 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
159 poll_ev
->signal_fd
= fds
[1];
161 pfd
= &poll_ev
->fds
[0];
163 pfd
->events
= (POLLIN
|POLLHUP
);
165 poll_ev
->num_fds
= 1;
167 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
172 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
177 if (poll_ev
->signal_fd
== -1) {
182 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
183 } while ((ret
== -1) && (errno
== EINTR
));
186 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
192 if (poll_ev
->signal_fd
== -1) {
196 if (poll_ev
->num_fds
< 1) {
199 fd
= poll_ev
->fds
[0].fd
;
202 ret
= read(fd
, buf
, sizeof(buf
));
203 } while (ret
== sizeof(buf
));
209 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
211 struct tevent_context
*ev
= fde
->event_ctx
;
212 struct poll_event_context
*poll_ev
;
213 uint64_t del_idx
= fde
->additional_flags
;
219 poll_ev
= talloc_get_type_abort(
220 ev
->additional_data
, struct poll_event_context
);
222 poll_ev
->fdes
[del_idx
] = NULL
;
223 poll_ev
->deleted
= true;
224 poll_event_wake_pollthread(poll_ev
);
226 return tevent_common_fd_destructor(fde
);
229 static int poll_fresh_fde_destructor(struct tevent_fd
*fde
)
231 struct tevent_context
*ev
= fde
->event_ctx
;
232 struct poll_event_context
*poll_ev
;
237 poll_ev
= talloc_get_type_abort(
238 ev
->additional_data
, struct poll_event_context
);
240 DLIST_REMOVE(poll_ev
->fresh
, fde
);
242 return tevent_common_fd_destructor(fde
);
245 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
246 struct tevent_context
*ev
,
247 tevent_immediate_handler_t handler
,
249 const char *handler_name
,
250 const char *location
)
252 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
253 ev
->additional_data
, struct poll_event_context
);
255 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
256 handler_name
, location
);
257 poll_event_wake_pollthread(poll_ev
);
261 Private function called by "standard" backend fallback.
262 Note this only allows fallback to "poll" backend, not "poll-mt".
264 _PRIVATE_
void tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
265 struct tevent_fd
*fde
)
267 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
268 ev
->additional_data
, struct poll_event_context
);
270 fde
->additional_flags
= UINT64_MAX
;
271 fde
->additional_data
= NULL
;
272 DLIST_ADD(poll_ev
->fresh
, fde
);
273 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
278 return NULL on failure (memory allocation error)
280 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
282 int fd
, uint16_t flags
,
283 tevent_fd_handler_t handler
,
285 const char *handler_name
,
286 const char *location
)
288 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
289 ev
->additional_data
, struct poll_event_context
);
290 struct tevent_fd
*fde
;
296 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
303 fde
->handler
= handler
;
304 fde
->close_fn
= NULL
;
305 fde
->private_data
= private_data
;
306 fde
->handler_name
= handler_name
;
307 fde
->location
= location
;
308 fde
->additional_flags
= UINT64_MAX
;
309 fde
->additional_data
= NULL
;
311 DLIST_ADD(poll_ev
->fresh
, fde
);
312 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
313 poll_event_wake_pollthread(poll_ev
);
316 * poll_event_loop_poll will take care of the rest in
317 * poll_event_setup_fresh
323 set the fd event flags
325 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
327 struct tevent_context
*ev
= fde
->event_ctx
;
328 struct poll_event_context
*poll_ev
;
329 uint64_t idx
= fde
->additional_flags
;
335 poll_ev
= talloc_get_type_abort(
336 ev
->additional_data
, struct poll_event_context
);
340 if (idx
== UINT64_MAX
) {
342 * poll_event_setup_fresh not yet called after this fde was
343 * added. We don't have to do anything to transfer the changed
344 * flags to the array passed to poll(2)
351 if (flags
& TEVENT_FD_READ
) {
352 pollflags
|= (POLLIN
|POLLHUP
);
354 if (flags
& TEVENT_FD_WRITE
) {
355 pollflags
|= (POLLOUT
);
357 poll_ev
->fds
[idx
].events
= pollflags
;
359 poll_event_wake_pollthread(poll_ev
);
362 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
363 struct poll_event_context
*poll_ev
)
365 struct tevent_fd
*fde
, *next
;
366 unsigned num_fresh
, num_fds
;
368 if (poll_ev
->deleted
) {
369 unsigned first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
372 for (i
=first_fd
; i
< poll_ev
->num_fds
;) {
373 fde
= poll_ev
->fdes
[i
];
380 * This fde was talloc_free()'ed. Delete it
383 poll_ev
->num_fds
-= 1;
384 if (poll_ev
->num_fds
== i
) {
387 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
388 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
389 if (poll_ev
->fdes
[i
] != NULL
) {
390 poll_ev
->fdes
[i
]->additional_flags
= i
;
394 poll_ev
->deleted
= false;
396 if (poll_ev
->fresh
== NULL
) {
401 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
404 num_fds
= poll_ev
->num_fds
+ num_fresh
;
407 * We check the length of fdes here. It is the last one
408 * enlarged, so if the realloc for poll_fd->fdes fails,
409 * poll_fd->fds will have at least the size of poll_fd->fdes
412 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
413 struct pollfd
*tmp_fds
;
414 struct tevent_fd
**tmp_fdes
;
415 unsigned array_length
;
417 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
419 tmp_fds
= talloc_realloc(
420 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
421 if (tmp_fds
== NULL
) {
424 poll_ev
->fds
= tmp_fds
;
426 tmp_fdes
= talloc_realloc(
427 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
429 if (tmp_fdes
== NULL
) {
432 poll_ev
->fdes
= tmp_fdes
;
435 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
438 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
444 if (fde
->flags
& TEVENT_FD_READ
) {
445 pfd
->events
|= (POLLIN
|POLLHUP
);
447 if (fde
->flags
& TEVENT_FD_WRITE
) {
448 pfd
->events
|= (POLLOUT
);
451 fde
->additional_flags
= poll_ev
->num_fds
;
452 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
455 DLIST_REMOVE(poll_ev
->fresh
, fde
);
456 DLIST_ADD(ev
->fd_events
, fde
);
458 talloc_set_destructor(fde
, poll_event_fd_destructor
);
460 poll_ev
->num_fds
+= 1;
466 event loop handling using poll()
468 static int poll_event_loop_poll(struct tevent_context
*ev
,
469 struct timeval
*tvalp
)
471 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
472 ev
->additional_data
, struct poll_event_context
);
479 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
484 timeout
= tvalp
->tv_sec
* 1000;
485 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
488 poll_event_drain_signal_fd(poll_ev
);
490 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
494 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
495 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
497 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
499 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
500 tevent_common_check_signal(ev
);
504 if (pollrtn
== 0 && tvalp
) {
505 /* we don't care about a possible delay here */
506 tevent_common_loop_timer_delay(ev
);
517 first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
519 /* at least one file descriptor is ready - check
520 which ones and call the handler, being careful to allow
521 the handler to remove itself when called */
523 for (i
=first_fd
; i
<poll_ev
->num_fds
; i
= next_i
) {
525 struct tevent_fd
*fde
;
530 fde
= poll_ev
->fdes
[i
];
533 * This fde was talloc_free()'ed. Delete it
536 poll_ev
->num_fds
-= 1;
537 if (poll_ev
->num_fds
== i
) {
540 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
541 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
542 if (poll_ev
->fdes
[i
] != NULL
) {
543 poll_ev
->fdes
[i
]->additional_flags
= i
;
545 /* we have to reprocess position 'i' */
550 pfd
= &poll_ev
->fds
[i
];
552 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
553 /* If we only wait for TEVENT_FD_WRITE, we
554 should not tell the event handler about it,
555 and remove the writable flag, as we only
556 report errors when waiting for read events
557 to match the select behavior. */
558 if (!(fde
->flags
& TEVENT_FD_READ
)) {
559 TEVENT_FD_NOT_WRITEABLE(fde
);
562 flags
|= TEVENT_FD_READ
;
564 if (pfd
->revents
& POLLIN
) {
565 flags
|= TEVENT_FD_READ
;
567 if (pfd
->revents
& POLLOUT
) {
568 flags
|= TEVENT_FD_WRITE
;
571 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
580 do a single event loop using the events defined in ev
582 static int poll_event_loop_once(struct tevent_context
*ev
,
583 const char *location
)
587 if (ev
->signal_events
&&
588 tevent_common_check_signal(ev
)) {
592 if (ev
->immediate_events
&&
593 tevent_common_loop_immediate(ev
)) {
597 tval
= tevent_common_loop_timer_delay(ev
);
598 if (tevent_timeval_is_zero(&tval
)) {
602 return poll_event_loop_poll(ev
, &tval
);
605 static int poll_event_loop_wait(struct tevent_context
*ev
,
606 const char *location
)
608 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
609 ev
->additional_data
, struct poll_event_context
);
612 * loop as long as we have events pending
614 while (ev
->fd_events
||
616 ev
->immediate_events
||
620 ret
= _tevent_loop_once(ev
, location
);
622 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
623 "_tevent_loop_once() failed: %d - %s\n",
624 ret
, strerror(errno
));
629 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
630 "poll_event_loop_wait() out of events\n");
634 static const struct tevent_ops poll_event_ops
= {
635 .context_init
= poll_event_context_init
,
636 .add_fd
= poll_event_add_fd
,
637 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
638 .get_fd_flags
= tevent_common_fd_get_flags
,
639 .set_fd_flags
= poll_event_set_fd_flags
,
640 .add_timer
= tevent_common_add_timer
,
641 .schedule_immediate
= tevent_common_schedule_immediate
,
642 .add_signal
= tevent_common_add_signal
,
643 .loop_once
= poll_event_loop_once
,
644 .loop_wait
= poll_event_loop_wait
,
647 _PRIVATE_
bool tevent_poll_init(void)
649 return tevent_register_backend("poll", &poll_event_ops
);
652 static const struct tevent_ops poll_event_mt_ops
= {
653 .context_init
= poll_event_context_init_mt
,
654 .add_fd
= poll_event_add_fd
,
655 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
656 .get_fd_flags
= tevent_common_fd_get_flags
,
657 .set_fd_flags
= poll_event_set_fd_flags
,
658 .add_timer
= tevent_common_add_timer
,
659 .schedule_immediate
= poll_event_schedule_immediate
,
660 .add_signal
= tevent_common_add_signal
,
661 .loop_once
= poll_event_loop_once
,
662 .loop_wait
= poll_event_loop_wait
,
665 _PRIVATE_
bool tevent_poll_mt_init(void)
667 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);