tevent: use tevent_poll_event_add_fd_internal() in poll_event_add_fd()
[Samba.git] / lib / tevent / tevent_poll.c
blob6e1c5c39f2d0995d330c06d623df8f73f1fb4df7
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "system/filesys.h"
27 #include "system/select.h"
28 #include "tevent.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd *fresh;
41 bool deleted;
44 * These two arrays are maintained together.
46 struct pollfd *fds;
47 struct tevent_fd **fdes;
48 unsigned num_fds;
51 * Signal fd to wake the poll() thread
53 int signal_fd;
55 /* information for exiting from the event loop */
56 int exit_code;
59 static int poll_event_context_destructor(struct poll_event_context *poll_ev)
61 struct tevent_fd *fd, *fn;
63 for (fd = poll_ev->fresh; fd; fd = fn) {
64 fn = fd->next;
65 fd->event_ctx = NULL;
66 DLIST_REMOVE(poll_ev->fresh, fd);
69 if (poll_ev->signal_fd == -1) {
71 * Non-threaded, no signal pipe
73 return 0;
76 close(poll_ev->signal_fd);
77 poll_ev->signal_fd = -1;
79 if (poll_ev->num_fds == 0) {
80 return 0;
82 if (poll_ev->fds[0].fd != -1) {
83 close(poll_ev->fds[0].fd);
84 poll_ev->fds[0].fd = -1;
86 return 0;
90 create a poll_event_context structure.
92 static int poll_event_context_init(struct tevent_context *ev)
94 struct poll_event_context *poll_ev;
97 * we might be called during tevent_re_initialise()
98 * which means we need to free our old additional_data
99 * in order to detach old fd events from the
100 * poll_ev->fresh list
102 TALLOC_FREE(ev->additional_data);
104 poll_ev = talloc_zero(ev, struct poll_event_context);
105 if (poll_ev == NULL) {
106 return -1;
108 poll_ev->ev = ev;
109 poll_ev->signal_fd = -1;
110 ev->additional_data = poll_ev;
111 talloc_set_destructor(poll_ev, poll_event_context_destructor);
112 return 0;
115 static bool set_nonblock(int fd)
117 int val;
119 val = fcntl(fd, F_GETFL, 0);
120 if (val == -1) {
121 return false;
123 val |= O_NONBLOCK;
125 return (fcntl(fd, F_SETFL, val) != -1);
128 static int poll_event_context_init_mt(struct tevent_context *ev)
130 struct poll_event_context *poll_ev;
131 struct pollfd *pfd;
132 int fds[2];
133 int ret;
135 ret = poll_event_context_init(ev);
136 if (ret == -1) {
137 return ret;
140 poll_ev = talloc_get_type_abort(
141 ev->additional_data, struct poll_event_context);
143 poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
144 if (poll_ev->fds == NULL) {
145 return -1;
148 ret = pipe(fds);
149 if (ret == -1) {
150 return -1;
153 if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
154 close(fds[0]);
155 close(fds[1]);
156 return -1;
159 poll_ev->signal_fd = fds[1];
161 pfd = &poll_ev->fds[0];
162 pfd->fd = fds[0];
163 pfd->events = (POLLIN|POLLHUP);
165 poll_ev->num_fds = 1;
167 talloc_set_destructor(poll_ev, poll_event_context_destructor);
169 return 0;
172 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
174 char c;
175 ssize_t ret;
177 if (poll_ev->signal_fd == -1) {
178 return;
180 c = 0;
181 do {
182 ret = write(poll_ev->signal_fd, &c, sizeof(c));
183 } while ((ret == -1) && (errno == EINTR));
186 static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
188 char buf[16];
189 ssize_t ret;
190 int fd;
192 if (poll_ev->signal_fd == -1) {
193 return;
196 if (poll_ev->num_fds < 1) {
197 return;
199 fd = poll_ev->fds[0].fd;
201 do {
202 ret = read(fd, buf, sizeof(buf));
203 } while (ret == sizeof(buf));
207 destroy an fd_event
209 static int poll_event_fd_destructor(struct tevent_fd *fde)
211 struct tevent_context *ev = fde->event_ctx;
212 struct poll_event_context *poll_ev;
213 uint64_t del_idx = fde->additional_flags;
215 if (ev == NULL) {
216 goto done;
219 poll_ev = talloc_get_type_abort(
220 ev->additional_data, struct poll_event_context);
222 poll_ev->fdes[del_idx] = NULL;
223 poll_ev->deleted = true;
224 poll_event_wake_pollthread(poll_ev);
225 done:
226 return tevent_common_fd_destructor(fde);
229 static int poll_fresh_fde_destructor(struct tevent_fd *fde)
231 struct tevent_context *ev = fde->event_ctx;
232 struct poll_event_context *poll_ev;
234 if (ev == NULL) {
235 goto done;
237 poll_ev = talloc_get_type_abort(
238 ev->additional_data, struct poll_event_context);
240 DLIST_REMOVE(poll_ev->fresh, fde);
241 done:
242 return tevent_common_fd_destructor(fde);
245 static void poll_event_schedule_immediate(struct tevent_immediate *im,
246 struct tevent_context *ev,
247 tevent_immediate_handler_t handler,
248 void *private_data,
249 const char *handler_name,
250 const char *location)
252 struct poll_event_context *poll_ev = talloc_get_type_abort(
253 ev->additional_data, struct poll_event_context);
255 tevent_common_schedule_immediate(im, ev, handler, private_data,
256 handler_name, location);
257 poll_event_wake_pollthread(poll_ev);
261 Private function called by "standard" backend fallback.
262 Note this only allows fallback to "poll" backend, not "poll-mt".
264 _PRIVATE_ void tevent_poll_event_add_fd_internal(struct tevent_context *ev,
265 struct tevent_fd *fde)
267 struct poll_event_context *poll_ev = talloc_get_type_abort(
268 ev->additional_data, struct poll_event_context);
270 fde->additional_flags = UINT64_MAX;
271 fde->additional_data = NULL;
272 DLIST_ADD(poll_ev->fresh, fde);
273 talloc_set_destructor(fde, poll_fresh_fde_destructor);
277 add a fd based event
278 return NULL on failure (memory allocation error)
280 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
281 TALLOC_CTX *mem_ctx,
282 int fd, uint16_t flags,
283 tevent_fd_handler_t handler,
284 void *private_data,
285 const char *handler_name,
286 const char *location)
288 struct poll_event_context *poll_ev = talloc_get_type_abort(
289 ev->additional_data, struct poll_event_context);
290 struct tevent_fd *fde;
292 if (fd < 0) {
293 return NULL;
296 fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
297 if (fde == NULL) {
298 return NULL;
300 fde->event_ctx = ev;
301 fde->fd = fd;
302 fde->flags = flags;
303 fde->handler = handler;
304 fde->close_fn = NULL;
305 fde->private_data = private_data;
306 fde->handler_name = handler_name;
307 fde->location = location;
308 fde->additional_flags = UINT64_MAX;
309 fde->additional_data = NULL;
311 tevent_poll_event_add_fd_internal(ev, fde);
312 poll_event_wake_pollthread(poll_ev);
315 * poll_event_loop_poll will take care of the rest in
316 * poll_event_setup_fresh
318 return fde;
322 set the fd event flags
324 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
326 struct tevent_context *ev = fde->event_ctx;
327 struct poll_event_context *poll_ev;
328 uint64_t idx = fde->additional_flags;
329 uint16_t pollflags;
331 if (ev == NULL) {
332 return;
334 poll_ev = talloc_get_type_abort(
335 ev->additional_data, struct poll_event_context);
337 fde->flags = flags;
339 if (idx == UINT64_MAX) {
341 * poll_event_setup_fresh not yet called after this fde was
342 * added. We don't have to do anything to transfer the changed
343 * flags to the array passed to poll(2)
345 return;
348 pollflags = 0;
350 if (flags & TEVENT_FD_READ) {
351 pollflags |= (POLLIN|POLLHUP);
353 if (flags & TEVENT_FD_WRITE) {
354 pollflags |= (POLLOUT);
356 poll_ev->fds[idx].events = pollflags;
358 poll_event_wake_pollthread(poll_ev);
361 static bool poll_event_setup_fresh(struct tevent_context *ev,
362 struct poll_event_context *poll_ev)
364 struct tevent_fd *fde, *next;
365 unsigned num_fresh, num_fds;
367 if (poll_ev->deleted) {
368 unsigned first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
369 unsigned i;
371 for (i=first_fd; i < poll_ev->num_fds;) {
372 fde = poll_ev->fdes[i];
373 if (fde != NULL) {
374 i++;
375 continue;
379 * This fde was talloc_free()'ed. Delete it
380 * from the arrays
382 poll_ev->num_fds -= 1;
383 if (poll_ev->num_fds == i) {
384 break;
386 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
387 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
388 if (poll_ev->fdes[i] != NULL) {
389 poll_ev->fdes[i]->additional_flags = i;
393 poll_ev->deleted = false;
395 if (poll_ev->fresh == NULL) {
396 return true;
399 num_fresh = 0;
400 for (fde = poll_ev->fresh; fde; fde = fde->next) {
401 num_fresh += 1;
403 num_fds = poll_ev->num_fds + num_fresh;
406 * We check the length of fdes here. It is the last one
407 * enlarged, so if the realloc for poll_fd->fdes fails,
408 * poll_fd->fds will have at least the size of poll_fd->fdes
411 if (num_fds >= talloc_array_length(poll_ev->fdes)) {
412 struct pollfd *tmp_fds;
413 struct tevent_fd **tmp_fdes;
414 unsigned array_length;
416 array_length = (num_fds + 15) & ~15; /* round up to 16 */
418 tmp_fds = talloc_realloc(
419 poll_ev, poll_ev->fds, struct pollfd, array_length);
420 if (tmp_fds == NULL) {
421 return false;
423 poll_ev->fds = tmp_fds;
425 tmp_fdes = talloc_realloc(
426 poll_ev, poll_ev->fdes, struct tevent_fd *,
427 array_length);
428 if (tmp_fdes == NULL) {
429 return false;
431 poll_ev->fdes = tmp_fdes;
434 for (fde = poll_ev->fresh; fde; fde = next) {
435 struct pollfd *pfd;
437 pfd = &poll_ev->fds[poll_ev->num_fds];
439 pfd->fd = fde->fd;
440 pfd->events = 0;
441 pfd->revents = 0;
443 if (fde->flags & TEVENT_FD_READ) {
444 pfd->events |= (POLLIN|POLLHUP);
446 if (fde->flags & TEVENT_FD_WRITE) {
447 pfd->events |= (POLLOUT);
450 fde->additional_flags = poll_ev->num_fds;
451 poll_ev->fdes[poll_ev->num_fds] = fde;
453 next = fde->next;
454 DLIST_REMOVE(poll_ev->fresh, fde);
455 DLIST_ADD(ev->fd_events, fde);
457 talloc_set_destructor(fde, poll_event_fd_destructor);
459 poll_ev->num_fds += 1;
461 return true;
465 event loop handling using poll()
467 static int poll_event_loop_poll(struct tevent_context *ev,
468 struct timeval *tvalp)
470 struct poll_event_context *poll_ev = talloc_get_type_abort(
471 ev->additional_data, struct poll_event_context);
472 int pollrtn;
473 int timeout = -1;
474 unsigned first_fd;
475 unsigned i, next_i;
476 int poll_errno;
478 if (ev->signal_events && tevent_common_check_signal(ev)) {
479 return 0;
482 if (tvalp != NULL) {
483 timeout = tvalp->tv_sec * 1000;
484 timeout += (tvalp->tv_usec + 999) / 1000;
487 poll_event_drain_signal_fd(poll_ev);
489 if (!poll_event_setup_fresh(ev, poll_ev)) {
490 return -1;
493 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
494 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
495 poll_errno = errno;
496 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
498 if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
499 tevent_common_check_signal(ev);
500 return 0;
503 if (pollrtn == 0 && tvalp) {
504 /* we don't care about a possible delay here */
505 tevent_common_loop_timer_delay(ev);
506 return 0;
509 if (pollrtn <= 0) {
511 * No fd's ready
513 return 0;
516 first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
518 /* at least one file descriptor is ready - check
519 which ones and call the handler, being careful to allow
520 the handler to remove itself when called */
522 for (i=first_fd; i<poll_ev->num_fds; i = next_i) {
523 struct pollfd *pfd;
524 struct tevent_fd *fde;
525 uint16_t flags = 0;
527 next_i = i + 1;
529 fde = poll_ev->fdes[i];
530 if (fde == NULL) {
532 * This fde was talloc_free()'ed. Delete it
533 * from the arrays
535 poll_ev->num_fds -= 1;
536 if (poll_ev->num_fds == i) {
537 break;
539 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
540 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
541 if (poll_ev->fdes[i] != NULL) {
542 poll_ev->fdes[i]->additional_flags = i;
544 /* we have to reprocess position 'i' */
545 next_i = i;
546 continue;
549 pfd = &poll_ev->fds[i];
551 if (pfd->revents & (POLLHUP|POLLERR)) {
552 /* If we only wait for TEVENT_FD_WRITE, we
553 should not tell the event handler about it,
554 and remove the writable flag, as we only
555 report errors when waiting for read events
556 to match the select behavior. */
557 if (!(fde->flags & TEVENT_FD_READ)) {
558 TEVENT_FD_NOT_WRITEABLE(fde);
559 continue;
561 flags |= TEVENT_FD_READ;
563 if (pfd->revents & POLLIN) {
564 flags |= TEVENT_FD_READ;
566 if (pfd->revents & POLLOUT) {
567 flags |= TEVENT_FD_WRITE;
569 if (flags != 0) {
570 fde->handler(ev, fde, flags, fde->private_data);
571 break;
575 return 0;
579 do a single event loop using the events defined in ev
581 static int poll_event_loop_once(struct tevent_context *ev,
582 const char *location)
584 struct timeval tval;
586 if (ev->signal_events &&
587 tevent_common_check_signal(ev)) {
588 return 0;
591 if (ev->immediate_events &&
592 tevent_common_loop_immediate(ev)) {
593 return 0;
596 tval = tevent_common_loop_timer_delay(ev);
597 if (tevent_timeval_is_zero(&tval)) {
598 return 0;
601 return poll_event_loop_poll(ev, &tval);
604 static int poll_event_loop_wait(struct tevent_context *ev,
605 const char *location)
607 struct poll_event_context *poll_ev = talloc_get_type_abort(
608 ev->additional_data, struct poll_event_context);
611 * loop as long as we have events pending
613 while (ev->fd_events ||
614 ev->timer_events ||
615 ev->immediate_events ||
616 ev->signal_events ||
617 poll_ev->fresh) {
618 int ret;
619 ret = _tevent_loop_once(ev, location);
620 if (ret != 0) {
621 tevent_debug(ev, TEVENT_DEBUG_FATAL,
622 "_tevent_loop_once() failed: %d - %s\n",
623 ret, strerror(errno));
624 return ret;
628 tevent_debug(ev, TEVENT_DEBUG_WARNING,
629 "poll_event_loop_wait() out of events\n");
630 return 0;
633 static const struct tevent_ops poll_event_ops = {
634 .context_init = poll_event_context_init,
635 .add_fd = poll_event_add_fd,
636 .set_fd_close_fn = tevent_common_fd_set_close_fn,
637 .get_fd_flags = tevent_common_fd_get_flags,
638 .set_fd_flags = poll_event_set_fd_flags,
639 .add_timer = tevent_common_add_timer,
640 .schedule_immediate = tevent_common_schedule_immediate,
641 .add_signal = tevent_common_add_signal,
642 .loop_once = poll_event_loop_once,
643 .loop_wait = poll_event_loop_wait,
646 _PRIVATE_ bool tevent_poll_init(void)
648 return tevent_register_backend("poll", &poll_event_ops);
651 static const struct tevent_ops poll_event_mt_ops = {
652 .context_init = poll_event_context_init_mt,
653 .add_fd = poll_event_add_fd,
654 .set_fd_close_fn = tevent_common_fd_set_close_fn,
655 .get_fd_flags = tevent_common_fd_get_flags,
656 .set_fd_flags = poll_event_set_fd_flags,
657 .add_timer = tevent_common_add_timer,
658 .schedule_immediate = poll_event_schedule_immediate,
659 .add_signal = tevent_common_add_signal,
660 .loop_once = poll_event_loop_once,
661 .loop_wait = poll_event_loop_wait,
664 _PRIVATE_ bool tevent_poll_mt_init(void)
666 return tevent_register_backend("poll_mt", &poll_event_mt_ops);