tevent: don't skip a fd event if the previous one was deleted during poll()
[Samba.git] / lib / tevent / tevent_poll.c
blob68885e94c0feb09f8c37e00dc096155ea95e0656
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "system/filesys.h"
27 #include "system/select.h"
28 #include "tevent.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd *fresh;
43 * These two arrays are maintained together.
45 struct pollfd *fds;
46 struct tevent_fd **fdes;
47 unsigned num_fds;
50 * Signal fd to wake the poll() thread
52 int signal_fd;
54 /* information for exiting from the event loop */
55 int exit_code;
58 static int poll_event_context_destructor(struct poll_event_context *poll_ev)
60 struct tevent_fd *fd, *fn;
62 for (fd = poll_ev->fresh; fd; fd = fn) {
63 fn = fd->next;
64 fd->event_ctx = NULL;
65 DLIST_REMOVE(poll_ev->fresh, fd);
68 if (poll_ev->signal_fd == -1) {
70 * Non-threaded, no signal pipe
72 return 0;
75 close(poll_ev->signal_fd);
76 poll_ev->signal_fd = -1;
78 if (poll_ev->num_fds == 0) {
79 return 0;
81 if (poll_ev->fds[0].fd != -1) {
82 close(poll_ev->fds[0].fd);
83 poll_ev->fds[0].fd = -1;
85 return 0;
89 create a poll_event_context structure.
91 static int poll_event_context_init(struct tevent_context *ev)
93 struct poll_event_context *poll_ev;
96 * we might be called during tevent_re_initialise()
97 * which means we need to free our old additional_data
98 * in order to detach old fd events from the
99 * poll_ev->fresh list
101 TALLOC_FREE(ev->additional_data);
103 poll_ev = talloc_zero(ev, struct poll_event_context);
104 if (poll_ev == NULL) {
105 return -1;
107 poll_ev->ev = ev;
108 poll_ev->signal_fd = -1;
109 ev->additional_data = poll_ev;
110 talloc_set_destructor(poll_ev, poll_event_context_destructor);
111 return 0;
114 static bool set_nonblock(int fd)
116 int val;
118 val = fcntl(fd, F_GETFL, 0);
119 if (val == -1) {
120 return false;
122 val |= O_NONBLOCK;
124 return (fcntl(fd, F_SETFL, val) != -1);
127 static int poll_event_context_init_mt(struct tevent_context *ev)
129 struct poll_event_context *poll_ev;
130 struct pollfd *pfd;
131 int fds[2];
132 int ret;
134 ret = poll_event_context_init(ev);
135 if (ret == -1) {
136 return ret;
139 poll_ev = talloc_get_type_abort(
140 ev->additional_data, struct poll_event_context);
142 poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
143 if (poll_ev->fds == NULL) {
144 return -1;
147 ret = pipe(fds);
148 if (ret == -1) {
149 return -1;
152 if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
153 close(fds[0]);
154 close(fds[1]);
155 return -1;
158 poll_ev->signal_fd = fds[1];
160 pfd = &poll_ev->fds[0];
161 pfd->fd = fds[0];
162 pfd->events = (POLLIN|POLLHUP);
164 poll_ev->num_fds = 1;
166 talloc_set_destructor(poll_ev, poll_event_context_destructor);
168 return 0;
171 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
173 char c;
174 ssize_t ret;
176 if (poll_ev->signal_fd == -1) {
177 return;
179 c = 0;
180 do {
181 ret = write(poll_ev->signal_fd, &c, sizeof(c));
182 } while ((ret == -1) && (errno == EINTR));
185 static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
187 char buf[16];
188 ssize_t ret;
189 int fd;
191 if (poll_ev->signal_fd == -1) {
192 return;
195 if (poll_ev->num_fds < 1) {
196 return;
198 fd = poll_ev->fds[0].fd;
200 do {
201 ret = read(fd, buf, sizeof(buf));
202 } while (ret == sizeof(buf));
206 destroy an fd_event
208 static int poll_event_fd_destructor(struct tevent_fd *fde)
210 struct tevent_context *ev = fde->event_ctx;
211 struct poll_event_context *poll_ev;
212 uint64_t del_idx = fde->additional_flags;
214 if (ev == NULL) {
215 goto done;
218 poll_ev = talloc_get_type_abort(
219 ev->additional_data, struct poll_event_context);
221 poll_ev->fdes[del_idx] = NULL;
222 poll_event_wake_pollthread(poll_ev);
223 done:
224 return tevent_common_fd_destructor(fde);
227 static int poll_fresh_fde_destructor(struct tevent_fd *fde)
229 struct tevent_context *ev = fde->event_ctx;
230 struct poll_event_context *poll_ev;
232 if (ev == NULL) {
233 goto done;
235 poll_ev = talloc_get_type_abort(
236 ev->additional_data, struct poll_event_context);
238 DLIST_REMOVE(poll_ev->fresh, fde);
239 done:
240 return tevent_common_fd_destructor(fde);
243 static void poll_event_schedule_immediate(struct tevent_immediate *im,
244 struct tevent_context *ev,
245 tevent_immediate_handler_t handler,
246 void *private_data,
247 const char *handler_name,
248 const char *location)
250 struct poll_event_context *poll_ev = talloc_get_type_abort(
251 ev->additional_data, struct poll_event_context);
253 tevent_common_schedule_immediate(im, ev, handler, private_data,
254 handler_name, location);
255 poll_event_wake_pollthread(poll_ev);
259 Private function called by "standard" backend fallback.
260 Note this only allows fallback to "poll" backend, not "poll-mt".
262 _PRIVATE_ void tevent_poll_event_add_fd_internal(struct tevent_context *ev,
263 struct tevent_fd *fde)
265 struct poll_event_context *poll_ev = talloc_get_type_abort(
266 ev->additional_data, struct poll_event_context);
268 fde->additional_flags = UINT64_MAX;
269 fde->additional_data = NULL;
270 DLIST_ADD(poll_ev->fresh, fde);
271 talloc_set_destructor(fde, poll_fresh_fde_destructor);
275 add a fd based event
276 return NULL on failure (memory allocation error)
278 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
279 TALLOC_CTX *mem_ctx,
280 int fd, uint16_t flags,
281 tevent_fd_handler_t handler,
282 void *private_data,
283 const char *handler_name,
284 const char *location)
286 struct poll_event_context *poll_ev = talloc_get_type_abort(
287 ev->additional_data, struct poll_event_context);
288 struct tevent_fd *fde;
290 if (fd < 0) {
291 return NULL;
294 fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
295 if (fde == NULL) {
296 return NULL;
298 fde->event_ctx = ev;
299 fde->fd = fd;
300 fde->flags = flags;
301 fde->handler = handler;
302 fde->close_fn = NULL;
303 fde->private_data = private_data;
304 fde->handler_name = handler_name;
305 fde->location = location;
306 fde->additional_flags = UINT64_MAX;
307 fde->additional_data = NULL;
309 DLIST_ADD(poll_ev->fresh, fde);
310 talloc_set_destructor(fde, poll_fresh_fde_destructor);
311 poll_event_wake_pollthread(poll_ev);
314 * poll_event_loop_poll will take care of the rest in
315 * poll_event_setup_fresh
317 return fde;
321 set the fd event flags
323 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
325 struct tevent_context *ev = fde->event_ctx;
326 struct poll_event_context *poll_ev;
327 uint64_t idx = fde->additional_flags;
328 uint16_t pollflags;
330 if (ev == NULL) {
331 return;
333 poll_ev = talloc_get_type_abort(
334 ev->additional_data, struct poll_event_context);
336 fde->flags = flags;
338 if (idx == UINT64_MAX) {
340 * poll_event_setup_fresh not yet called after this fde was
341 * added. We don't have to do anything to transfer the changed
342 * flags to the array passed to poll(2)
344 return;
347 pollflags = 0;
349 if (flags & TEVENT_FD_READ) {
350 pollflags |= (POLLIN|POLLHUP);
352 if (flags & TEVENT_FD_WRITE) {
353 pollflags |= (POLLOUT);
355 poll_ev->fds[idx].events = pollflags;
357 poll_event_wake_pollthread(poll_ev);
360 static bool poll_event_setup_fresh(struct tevent_context *ev,
361 struct poll_event_context *poll_ev)
363 struct tevent_fd *fde, *next;
364 unsigned num_fresh, num_fds;
366 if (poll_ev->fresh == NULL) {
367 return true;
370 num_fresh = 0;
371 for (fde = poll_ev->fresh; fde; fde = fde->next) {
372 num_fresh += 1;
374 num_fds = poll_ev->num_fds + num_fresh;
377 * We check the length of fdes here. It is the last one
378 * enlarged, so if the realloc for poll_fd->fdes fails,
379 * poll_fd->fds will have at least the size of poll_fd->fdes
382 if (num_fds >= talloc_array_length(poll_ev->fdes)) {
383 struct pollfd *tmp_fds;
384 struct tevent_fd **tmp_fdes;
385 unsigned array_length;
387 array_length = (num_fds + 15) & ~15; /* round up to 16 */
389 tmp_fds = talloc_realloc(
390 poll_ev, poll_ev->fds, struct pollfd, array_length);
391 if (tmp_fds == NULL) {
392 return false;
394 poll_ev->fds = tmp_fds;
396 tmp_fdes = talloc_realloc(
397 poll_ev, poll_ev->fdes, struct tevent_fd *,
398 array_length);
399 if (tmp_fdes == NULL) {
400 return false;
402 poll_ev->fdes = tmp_fdes;
405 for (fde = poll_ev->fresh; fde; fde = next) {
406 struct pollfd *pfd;
408 pfd = &poll_ev->fds[poll_ev->num_fds];
410 pfd->fd = fde->fd;
411 pfd->events = 0;
412 pfd->revents = 0;
414 if (fde->flags & TEVENT_FD_READ) {
415 pfd->events |= (POLLIN|POLLHUP);
417 if (fde->flags & TEVENT_FD_WRITE) {
418 pfd->events |= (POLLOUT);
421 fde->additional_flags = poll_ev->num_fds;
422 poll_ev->fdes[poll_ev->num_fds] = fde;
424 next = fde->next;
425 DLIST_REMOVE(poll_ev->fresh, fde);
426 DLIST_ADD(ev->fd_events, fde);
428 talloc_set_destructor(fde, poll_event_fd_destructor);
430 poll_ev->num_fds += 1;
432 return true;
436 event loop handling using poll()
438 static int poll_event_loop_poll(struct tevent_context *ev,
439 struct timeval *tvalp)
441 struct poll_event_context *poll_ev = talloc_get_type_abort(
442 ev->additional_data, struct poll_event_context);
443 int pollrtn;
444 int timeout = -1;
445 unsigned first_fd;
446 unsigned i, next_i;
447 int poll_errno;
449 if (ev->signal_events && tevent_common_check_signal(ev)) {
450 return 0;
453 if (tvalp != NULL) {
454 timeout = tvalp->tv_sec * 1000;
455 timeout += (tvalp->tv_usec + 999) / 1000;
458 poll_event_drain_signal_fd(poll_ev);
460 if (!poll_event_setup_fresh(ev, poll_ev)) {
461 return -1;
464 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
465 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
466 poll_errno = errno;
467 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
469 if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
470 tevent_common_check_signal(ev);
471 return 0;
474 if (pollrtn == 0 && tvalp) {
475 /* we don't care about a possible delay here */
476 tevent_common_loop_timer_delay(ev);
477 return 0;
480 if (pollrtn <= 0) {
482 * No fd's ready
484 return 0;
487 first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
489 /* at least one file descriptor is ready - check
490 which ones and call the handler, being careful to allow
491 the handler to remove itself when called */
493 for (i=first_fd; i<poll_ev->num_fds; i = next_i) {
494 struct pollfd *pfd;
495 struct tevent_fd *fde;
496 uint16_t flags = 0;
498 next_i = i + 1;
500 fde = poll_ev->fdes[i];
501 if (fde == NULL) {
503 * This fde was talloc_free()'ed. Delete it
504 * from the arrays
506 poll_ev->num_fds -= 1;
507 if (poll_ev->num_fds == i) {
508 break;
510 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
511 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
512 if (poll_ev->fdes[i] != NULL) {
513 poll_ev->fdes[i]->additional_flags = i;
515 /* we have to reprocess position 'i' */
516 next_i = i;
517 continue;
520 pfd = &poll_ev->fds[i];
522 if (pfd->revents & (POLLHUP|POLLERR)) {
523 /* If we only wait for TEVENT_FD_WRITE, we
524 should not tell the event handler about it,
525 and remove the writable flag, as we only
526 report errors when waiting for read events
527 to match the select behavior. */
528 if (!(fde->flags & TEVENT_FD_READ)) {
529 TEVENT_FD_NOT_WRITEABLE(fde);
530 continue;
532 flags |= TEVENT_FD_READ;
534 if (pfd->revents & POLLIN) {
535 flags |= TEVENT_FD_READ;
537 if (pfd->revents & POLLOUT) {
538 flags |= TEVENT_FD_WRITE;
540 if (flags != 0) {
541 fde->handler(ev, fde, flags, fde->private_data);
542 break;
546 return 0;
550 do a single event loop using the events defined in ev
552 static int poll_event_loop_once(struct tevent_context *ev,
553 const char *location)
555 struct timeval tval;
557 if (ev->signal_events &&
558 tevent_common_check_signal(ev)) {
559 return 0;
562 if (ev->immediate_events &&
563 tevent_common_loop_immediate(ev)) {
564 return 0;
567 tval = tevent_common_loop_timer_delay(ev);
568 if (tevent_timeval_is_zero(&tval)) {
569 return 0;
572 return poll_event_loop_poll(ev, &tval);
575 static int poll_event_loop_wait(struct tevent_context *ev,
576 const char *location)
578 struct poll_event_context *poll_ev = talloc_get_type_abort(
579 ev->additional_data, struct poll_event_context);
582 * loop as long as we have events pending
584 while (ev->fd_events ||
585 ev->timer_events ||
586 ev->immediate_events ||
587 ev->signal_events ||
588 poll_ev->fresh) {
589 int ret;
590 ret = _tevent_loop_once(ev, location);
591 if (ret != 0) {
592 tevent_debug(ev, TEVENT_DEBUG_FATAL,
593 "_tevent_loop_once() failed: %d - %s\n",
594 ret, strerror(errno));
595 return ret;
599 tevent_debug(ev, TEVENT_DEBUG_WARNING,
600 "poll_event_loop_wait() out of events\n");
601 return 0;
604 static const struct tevent_ops poll_event_ops = {
605 .context_init = poll_event_context_init,
606 .add_fd = poll_event_add_fd,
607 .set_fd_close_fn = tevent_common_fd_set_close_fn,
608 .get_fd_flags = tevent_common_fd_get_flags,
609 .set_fd_flags = poll_event_set_fd_flags,
610 .add_timer = tevent_common_add_timer,
611 .schedule_immediate = tevent_common_schedule_immediate,
612 .add_signal = tevent_common_add_signal,
613 .loop_once = poll_event_loop_once,
614 .loop_wait = poll_event_loop_wait,
617 _PRIVATE_ bool tevent_poll_init(void)
619 return tevent_register_backend("poll", &poll_event_ops);
622 static const struct tevent_ops poll_event_mt_ops = {
623 .context_init = poll_event_context_init_mt,
624 .add_fd = poll_event_add_fd,
625 .set_fd_close_fn = tevent_common_fd_set_close_fn,
626 .get_fd_flags = tevent_common_fd_get_flags,
627 .set_fd_flags = poll_event_set_fd_flags,
628 .add_timer = tevent_common_add_timer,
629 .schedule_immediate = poll_event_schedule_immediate,
630 .add_signal = tevent_common_add_signal,
631 .loop_once = poll_event_loop_once,
632 .loop_wait = poll_event_loop_wait,
635 _PRIVATE_ bool tevent_poll_mt_init(void)
637 return tevent_register_backend("poll_mt", &poll_event_mt_ops);