2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd *fresh;
43 * These two arrays are maintained together.
46 struct tevent_fd **fdes;
50 * Signal fd to wake the poll() thread
54 /* information for exiting from the event loop */
58 static int poll_event_context_destructor(struct poll_event_context *poll_ev)
60 struct tevent_fd *fd, *fn;
62 for (fd = poll_ev->fresh; fd; fd = fn) {
65 DLIST_REMOVE(poll_ev->fresh, fd);
68 if (poll_ev->signal_fd == -1) {
70 * Non-threaded, no signal pipe
75 close(poll_ev->signal_fd);
76 poll_ev->signal_fd = -1;
78 if (poll_ev->num_fds == 0) {
81 if (poll_ev->fds[0].fd != -1) {
82 close(poll_ev->fds[0].fd);
83 poll_ev->fds[0].fd = -1;
89 create a poll_event_context structure.
91 static int poll_event_context_init(struct tevent_context *ev)
93 struct poll_event_context *poll_ev;
96 * we might be called during tevent_re_initialise()
97 * which means we need to free our old additional_data
98 * in order to detach old fd events from the
101 TALLOC_FREE(ev->additional_data);
103 poll_ev = talloc_zero(ev, struct poll_event_context);
104 if (poll_ev == NULL) {
108 poll_ev->signal_fd = -1;
109 ev->additional_data = poll_ev;
110 talloc_set_destructor(poll_ev, poll_event_context_destructor);
114 static bool set_nonblock(int fd)
118 val = fcntl(fd, F_GETFL, 0);
124 return (fcntl(fd, F_SETFL, val) != -1);
127 static int poll_event_context_init_mt(struct tevent_context *ev)
129 struct poll_event_context *poll_ev;
134 ret = poll_event_context_init(ev);
139 poll_ev = talloc_get_type_abort(
140 ev->additional_data, struct poll_event_context);
142 poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
143 if (poll_ev->fds == NULL) {
152 if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
158 poll_ev->signal_fd = fds[1];
160 pfd = &poll_ev->fds[0];
162 pfd->events = (POLLIN|POLLHUP);
164 poll_ev->num_fds = 1;
166 talloc_set_destructor(poll_ev, poll_event_context_destructor);
171 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
176 if (poll_ev->signal_fd == -1) {
181 ret = write(poll_ev->signal_fd, &c, sizeof(c));
182 } while ((ret == -1) && (errno == EINTR));
185 static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
191 if (poll_ev->signal_fd == -1) {
195 if (poll_ev->num_fds < 1) {
198 fd = poll_ev->fds[0].fd;
201 ret = read(fd, buf, sizeof(buf));
202 } while (ret == sizeof(buf));
208 static int poll_event_fd_destructor(struct tevent_fd *fde)
210 struct tevent_context *ev = fde->event_ctx;
211 struct poll_event_context *poll_ev;
212 uint64_t del_idx = fde->additional_flags;
218 poll_ev = talloc_get_type_abort(
219 ev->additional_data, struct poll_event_context);
221 poll_ev->fdes[del_idx] = NULL;
222 poll_event_wake_pollthread(poll_ev);
224 return tevent_common_fd_destructor(fde);
227 static int poll_fresh_fde_destructor(struct tevent_fd *fde)
229 struct tevent_context *ev = fde->event_ctx;
230 struct poll_event_context *poll_ev;
235 poll_ev = talloc_get_type_abort(
236 ev->additional_data, struct poll_event_context);
238 DLIST_REMOVE(poll_ev->fresh, fde);
240 return tevent_common_fd_destructor(fde);
243 static void poll_event_schedule_immediate(struct tevent_immediate *im,
244 struct tevent_context *ev,
245 tevent_immediate_handler_t handler,
247 const char *handler_name,
248 const char *location)
250 struct poll_event_context *poll_ev = talloc_get_type_abort(
251 ev->additional_data, struct poll_event_context);
253 tevent_common_schedule_immediate(im, ev, handler, private_data,
254 handler_name, location);
255 poll_event_wake_pollthread(poll_ev);
260 return NULL on failure (memory allocation error)
262 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
264 int fd, uint16_t flags,
265 tevent_fd_handler_t handler,
267 const char *handler_name,
268 const char *location)
270 struct poll_event_context *poll_ev = talloc_get_type_abort(
271 ev->additional_data, struct poll_event_context);
272 struct tevent_fd *fde;
278 fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
285 fde->handler = handler;
286 fde->close_fn = NULL;
287 fde->private_data = private_data;
288 fde->handler_name = handler_name;
289 fde->location = location;
290 fde->additional_flags = UINT64_MAX;
291 fde->additional_data = NULL;
293 DLIST_ADD(poll_ev->fresh, fde);
294 talloc_set_destructor(fde, poll_fresh_fde_destructor);
295 poll_event_wake_pollthread(poll_ev);
298 * poll_event_loop_poll will take care of the rest in
299 * poll_event_setup_fresh
305 set the fd event flags
307 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
309 struct tevent_context *ev = fde->event_ctx;
310 struct poll_event_context *poll_ev;
311 uint64_t idx = fde->additional_flags;
317 poll_ev = talloc_get_type_abort(
318 ev->additional_data, struct poll_event_context);
322 if (idx == UINT64_MAX) {
324 * poll_event_setup_fresh not yet called after this fde was
325 * added. We don't have to do anything to transfer the changed
326 * flags to the array passed to poll(2)
333 if (flags & TEVENT_FD_READ) {
334 pollflags |= (POLLIN|POLLHUP);
336 if (flags & TEVENT_FD_WRITE) {
337 pollflags |= (POLLOUT);
339 poll_ev->fds[idx].events = pollflags;
341 poll_event_wake_pollthread(poll_ev);
344 static bool poll_event_setup_fresh(struct tevent_context *ev,
345 struct poll_event_context *poll_ev)
347 struct tevent_fd *fde, *next;
348 unsigned num_fresh, num_fds;
350 if (poll_ev->fresh == NULL) {
355 for (fde = poll_ev->fresh; fde; fde = fde->next) {
358 num_fds = poll_ev->num_fds + num_fresh;
361 * We check the length of fdes here. It is the last one
362 * enlarged, so if the realloc for poll_fd->fdes fails,
363 * poll_fd->fds will have at least the size of poll_fd->fdes
366 if (num_fds >= talloc_array_length(poll_ev->fdes)) {
367 struct pollfd *tmp_fds;
368 struct tevent_fd **tmp_fdes;
369 unsigned array_length;
371 array_length = (num_fds + 15) & ~15; /* round up to 16 */
373 tmp_fds = talloc_realloc(
374 poll_ev, poll_ev->fds, struct pollfd, array_length);
375 if (tmp_fds == NULL) {
378 poll_ev->fds = tmp_fds;
380 tmp_fdes = talloc_realloc(
381 poll_ev, poll_ev->fdes, struct tevent_fd *,
383 if (tmp_fdes == NULL) {
386 poll_ev->fdes = tmp_fdes;
389 for (fde = poll_ev->fresh; fde; fde = next) {
392 pfd = &poll_ev->fds[poll_ev->num_fds];
398 if (fde->flags & TEVENT_FD_READ) {
399 pfd->events |= (POLLIN|POLLHUP);
401 if (fde->flags & TEVENT_FD_WRITE) {
402 pfd->events |= (POLLOUT);
405 fde->additional_flags = poll_ev->num_fds;
406 poll_ev->fdes[poll_ev->num_fds] = fde;
409 DLIST_REMOVE(poll_ev->fresh, fde);
410 DLIST_ADD(ev->fd_events, fde);
412 talloc_set_destructor(fde, poll_event_fd_destructor);
414 poll_ev->num_fds += 1;
420 event loop handling using poll()
422 static int poll_event_loop_poll(struct tevent_context *ev,
423 struct timeval *tvalp)
425 struct poll_event_context *poll_ev = talloc_get_type_abort(
426 ev->additional_data, struct poll_event_context);
432 if (ev->signal_events && tevent_common_check_signal(ev)) {
437 timeout = tvalp->tv_sec * 1000;
438 timeout += (tvalp->tv_usec + 999) / 1000;
441 poll_event_drain_signal_fd(poll_ev);
443 if (!poll_event_setup_fresh(ev, poll_ev)) {
447 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
448 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
449 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
451 if (pollrtn == -1 && errno == EINTR && ev->signal_events) {
452 tevent_common_check_signal(ev);
456 if (pollrtn == 0 && tvalp) {
457 /* we don't care about a possible delay here */
458 tevent_common_loop_timer_delay(ev);
469 first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
471 /* at least one file descriptor is ready - check
472 which ones and call the handler, being careful to allow
473 the handler to remove itself when called */
475 for (i=first_fd; i<poll_ev->num_fds; i++) {
477 struct tevent_fd *fde;
480 fde = poll_ev->fdes[i];
483 * This fde was talloc_free()'ed. Delete it
486 poll_ev->num_fds -= 1;
487 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
488 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
489 if (poll_ev->fdes[i] != NULL) {
490 poll_ev->fdes[i]->additional_flags = i;
495 pfd = &poll_ev->fds[i];
497 if (pfd->revents & (POLLHUP|POLLERR)) {
498 /* If we only wait for TEVENT_FD_WRITE, we
499 should not tell the event handler about it,
500 and remove the writable flag, as we only
501 report errors when waiting for read events
502 to match the select behavior. */
503 if (!(fde->flags & TEVENT_FD_READ)) {
504 TEVENT_FD_NOT_WRITEABLE(fde);
507 flags |= TEVENT_FD_READ;
509 if (pfd->revents & POLLIN) {
510 flags |= TEVENT_FD_READ;
512 if (pfd->revents & POLLOUT) {
513 flags |= TEVENT_FD_WRITE;
516 fde->handler(ev, fde, flags, fde->private_data);
525 do a single event loop using the events defined in ev
527 static int poll_event_loop_once(struct tevent_context *ev,
528 const char *location)
532 if (ev->signal_events &&
533 tevent_common_check_signal(ev)) {
537 if (ev->immediate_events &&
538 tevent_common_loop_immediate(ev)) {
542 tval = tevent_common_loop_timer_delay(ev);
543 if (tevent_timeval_is_zero(&tval)) {
547 return poll_event_loop_poll(ev, &tval);
550 static int poll_event_loop_wait(struct tevent_context *ev,
551 const char *location)
553 struct poll_event_context *poll_ev = talloc_get_type_abort(
554 ev->additional_data, struct poll_event_context);
557 * loop as long as we have events pending
559 while (ev->fd_events ||
561 ev->immediate_events ||
565 ret = _tevent_loop_once(ev, location);
567 tevent_debug(ev, TEVENT_DEBUG_FATAL,
568 "_tevent_loop_once() failed: %d - %s\n",
569 ret, strerror(errno));
574 tevent_debug(ev, TEVENT_DEBUG_WARNING,
575 "poll_event_loop_wait() out of events\n");
579 static const struct tevent_ops poll_event_ops = {
580 .context_init = poll_event_context_init,
581 .add_fd = poll_event_add_fd,
582 .set_fd_close_fn = tevent_common_fd_set_close_fn,
583 .get_fd_flags = tevent_common_fd_get_flags,
584 .set_fd_flags = poll_event_set_fd_flags,
585 .add_timer = tevent_common_add_timer,
586 .schedule_immediate = tevent_common_schedule_immediate,
587 .add_signal = tevent_common_add_signal,
588 .loop_once = poll_event_loop_once,
589 .loop_wait = poll_event_loop_wait,
592 _PRIVATE_ bool tevent_poll_init(void)
594 return tevent_register_backend("poll", &poll_event_ops);
597 static const struct tevent_ops poll_event_mt_ops = {
598 .context_init = poll_event_context_init_mt,
599 .add_fd = poll_event_add_fd,
600 .set_fd_close_fn = tevent_common_fd_set_close_fn,
601 .get_fd_flags = tevent_common_fd_get_flags,
602 .set_fd_flags = poll_event_set_fd_flags,
603 .add_timer = tevent_common_add_timer,
604 .schedule_immediate = poll_event_schedule_immediate,
605 .add_signal = tevent_common_add_signal,
606 .loop_once = poll_event_loop_once,
607 .loop_wait = poll_event_loop_wait,
610 _PRIVATE_ bool tevent_poll_mt_init(void)
612 return tevent_register_backend("poll_mt", &poll_event_mt_ops);