tevent: make sure we cleanup the array passed to poll() after deleting an event
[Samba/gebeck_regimport.git] / lib / tevent / tevent_poll.c
blob0928cbd6cb07f46cebb76ac995da9897c93f91ad
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "system/filesys.h"
27 #include "system/select.h"
28 #include "tevent.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd *fresh;
41 bool deleted;
44 * These two arrays are maintained together.
46 struct pollfd *fds;
47 struct tevent_fd **fdes;
48 unsigned num_fds;
51 * Signal fd to wake the poll() thread
53 int signal_fd;
55 /* information for exiting from the event loop */
56 int exit_code;
59 static int poll_event_context_destructor(struct poll_event_context *poll_ev)
61 struct tevent_fd *fd, *fn;
63 for (fd = poll_ev->fresh; fd; fd = fn) {
64 fn = fd->next;
65 fd->event_ctx = NULL;
66 DLIST_REMOVE(poll_ev->fresh, fd);
69 if (poll_ev->signal_fd == -1) {
71 * Non-threaded, no signal pipe
73 return 0;
76 close(poll_ev->signal_fd);
77 poll_ev->signal_fd = -1;
79 if (poll_ev->num_fds == 0) {
80 return 0;
82 if (poll_ev->fds[0].fd != -1) {
83 close(poll_ev->fds[0].fd);
84 poll_ev->fds[0].fd = -1;
86 return 0;
90 create a poll_event_context structure.
92 static int poll_event_context_init(struct tevent_context *ev)
94 struct poll_event_context *poll_ev;
97 * we might be called during tevent_re_initialise()
98 * which means we need to free our old additional_data
99 * in order to detach old fd events from the
100 * poll_ev->fresh list
102 TALLOC_FREE(ev->additional_data);
104 poll_ev = talloc_zero(ev, struct poll_event_context);
105 if (poll_ev == NULL) {
106 return -1;
108 poll_ev->ev = ev;
109 poll_ev->signal_fd = -1;
110 ev->additional_data = poll_ev;
111 talloc_set_destructor(poll_ev, poll_event_context_destructor);
112 return 0;
115 static bool set_nonblock(int fd)
117 int val;
119 val = fcntl(fd, F_GETFL, 0);
120 if (val == -1) {
121 return false;
123 val |= O_NONBLOCK;
125 return (fcntl(fd, F_SETFL, val) != -1);
128 static int poll_event_context_init_mt(struct tevent_context *ev)
130 struct poll_event_context *poll_ev;
131 struct pollfd *pfd;
132 int fds[2];
133 int ret;
135 ret = poll_event_context_init(ev);
136 if (ret == -1) {
137 return ret;
140 poll_ev = talloc_get_type_abort(
141 ev->additional_data, struct poll_event_context);
143 poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
144 if (poll_ev->fds == NULL) {
145 return -1;
148 ret = pipe(fds);
149 if (ret == -1) {
150 return -1;
153 if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
154 close(fds[0]);
155 close(fds[1]);
156 return -1;
159 poll_ev->signal_fd = fds[1];
161 pfd = &poll_ev->fds[0];
162 pfd->fd = fds[0];
163 pfd->events = (POLLIN|POLLHUP);
165 poll_ev->num_fds = 1;
167 talloc_set_destructor(poll_ev, poll_event_context_destructor);
169 return 0;
172 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
174 char c;
175 ssize_t ret;
177 if (poll_ev->signal_fd == -1) {
178 return;
180 c = 0;
181 do {
182 ret = write(poll_ev->signal_fd, &c, sizeof(c));
183 } while ((ret == -1) && (errno == EINTR));
186 static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
188 char buf[16];
189 ssize_t ret;
190 int fd;
192 if (poll_ev->signal_fd == -1) {
193 return;
196 if (poll_ev->num_fds < 1) {
197 return;
199 fd = poll_ev->fds[0].fd;
201 do {
202 ret = read(fd, buf, sizeof(buf));
203 } while (ret == sizeof(buf));
207 destroy an fd_event
209 static int poll_event_fd_destructor(struct tevent_fd *fde)
211 struct tevent_context *ev = fde->event_ctx;
212 struct poll_event_context *poll_ev;
213 uint64_t del_idx = fde->additional_flags;
215 if (ev == NULL) {
216 goto done;
219 poll_ev = talloc_get_type_abort(
220 ev->additional_data, struct poll_event_context);
222 poll_ev->fdes[del_idx] = NULL;
223 poll_ev->deleted = true;
224 poll_event_wake_pollthread(poll_ev);
225 done:
226 return tevent_common_fd_destructor(fde);
229 static int poll_fresh_fde_destructor(struct tevent_fd *fde)
231 struct tevent_context *ev = fde->event_ctx;
232 struct poll_event_context *poll_ev;
234 if (ev == NULL) {
235 goto done;
237 poll_ev = talloc_get_type_abort(
238 ev->additional_data, struct poll_event_context);
240 DLIST_REMOVE(poll_ev->fresh, fde);
241 done:
242 return tevent_common_fd_destructor(fde);
245 static void poll_event_schedule_immediate(struct tevent_immediate *im,
246 struct tevent_context *ev,
247 tevent_immediate_handler_t handler,
248 void *private_data,
249 const char *handler_name,
250 const char *location)
252 struct poll_event_context *poll_ev = talloc_get_type_abort(
253 ev->additional_data, struct poll_event_context);
255 tevent_common_schedule_immediate(im, ev, handler, private_data,
256 handler_name, location);
257 poll_event_wake_pollthread(poll_ev);
261 Private function called by "standard" backend fallback.
262 Note this only allows fallback to "poll" backend, not "poll-mt".
264 _PRIVATE_ void tevent_poll_event_add_fd_internal(struct tevent_context *ev,
265 struct tevent_fd *fde)
267 struct poll_event_context *poll_ev = talloc_get_type_abort(
268 ev->additional_data, struct poll_event_context);
270 fde->additional_flags = UINT64_MAX;
271 fde->additional_data = NULL;
272 DLIST_ADD(poll_ev->fresh, fde);
273 talloc_set_destructor(fde, poll_fresh_fde_destructor);
277 add a fd based event
278 return NULL on failure (memory allocation error)
280 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
281 TALLOC_CTX *mem_ctx,
282 int fd, uint16_t flags,
283 tevent_fd_handler_t handler,
284 void *private_data,
285 const char *handler_name,
286 const char *location)
288 struct poll_event_context *poll_ev = talloc_get_type_abort(
289 ev->additional_data, struct poll_event_context);
290 struct tevent_fd *fde;
292 if (fd < 0) {
293 return NULL;
296 fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
297 if (fde == NULL) {
298 return NULL;
300 fde->event_ctx = ev;
301 fde->fd = fd;
302 fde->flags = flags;
303 fde->handler = handler;
304 fde->close_fn = NULL;
305 fde->private_data = private_data;
306 fde->handler_name = handler_name;
307 fde->location = location;
308 fde->additional_flags = UINT64_MAX;
309 fde->additional_data = NULL;
311 DLIST_ADD(poll_ev->fresh, fde);
312 talloc_set_destructor(fde, poll_fresh_fde_destructor);
313 poll_event_wake_pollthread(poll_ev);
316 * poll_event_loop_poll will take care of the rest in
317 * poll_event_setup_fresh
319 return fde;
323 set the fd event flags
325 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
327 struct tevent_context *ev = fde->event_ctx;
328 struct poll_event_context *poll_ev;
329 uint64_t idx = fde->additional_flags;
330 uint16_t pollflags;
332 if (ev == NULL) {
333 return;
335 poll_ev = talloc_get_type_abort(
336 ev->additional_data, struct poll_event_context);
338 fde->flags = flags;
340 if (idx == UINT64_MAX) {
342 * poll_event_setup_fresh not yet called after this fde was
343 * added. We don't have to do anything to transfer the changed
344 * flags to the array passed to poll(2)
346 return;
349 pollflags = 0;
351 if (flags & TEVENT_FD_READ) {
352 pollflags |= (POLLIN|POLLHUP);
354 if (flags & TEVENT_FD_WRITE) {
355 pollflags |= (POLLOUT);
357 poll_ev->fds[idx].events = pollflags;
359 poll_event_wake_pollthread(poll_ev);
362 static bool poll_event_setup_fresh(struct tevent_context *ev,
363 struct poll_event_context *poll_ev)
365 struct tevent_fd *fde, *next;
366 unsigned num_fresh, num_fds;
368 if (poll_ev->deleted) {
369 unsigned first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
370 unsigned i;
372 for (i=first_fd; i < poll_ev->num_fds;) {
373 fde = poll_ev->fdes[i];
374 if (fde != NULL) {
375 i++;
376 continue;
380 * This fde was talloc_free()'ed. Delete it
381 * from the arrays
383 poll_ev->num_fds -= 1;
384 if (poll_ev->num_fds == i) {
385 break;
387 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
388 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
389 if (poll_ev->fdes[i] != NULL) {
390 poll_ev->fdes[i]->additional_flags = i;
394 poll_ev->deleted = false;
396 if (poll_ev->fresh == NULL) {
397 return true;
400 num_fresh = 0;
401 for (fde = poll_ev->fresh; fde; fde = fde->next) {
402 num_fresh += 1;
404 num_fds = poll_ev->num_fds + num_fresh;
407 * We check the length of fdes here. It is the last one
408 * enlarged, so if the realloc for poll_fd->fdes fails,
409 * poll_fd->fds will have at least the size of poll_fd->fdes
412 if (num_fds >= talloc_array_length(poll_ev->fdes)) {
413 struct pollfd *tmp_fds;
414 struct tevent_fd **tmp_fdes;
415 unsigned array_length;
417 array_length = (num_fds + 15) & ~15; /* round up to 16 */
419 tmp_fds = talloc_realloc(
420 poll_ev, poll_ev->fds, struct pollfd, array_length);
421 if (tmp_fds == NULL) {
422 return false;
424 poll_ev->fds = tmp_fds;
426 tmp_fdes = talloc_realloc(
427 poll_ev, poll_ev->fdes, struct tevent_fd *,
428 array_length);
429 if (tmp_fdes == NULL) {
430 return false;
432 poll_ev->fdes = tmp_fdes;
435 for (fde = poll_ev->fresh; fde; fde = next) {
436 struct pollfd *pfd;
438 pfd = &poll_ev->fds[poll_ev->num_fds];
440 pfd->fd = fde->fd;
441 pfd->events = 0;
442 pfd->revents = 0;
444 if (fde->flags & TEVENT_FD_READ) {
445 pfd->events |= (POLLIN|POLLHUP);
447 if (fde->flags & TEVENT_FD_WRITE) {
448 pfd->events |= (POLLOUT);
451 fde->additional_flags = poll_ev->num_fds;
452 poll_ev->fdes[poll_ev->num_fds] = fde;
454 next = fde->next;
455 DLIST_REMOVE(poll_ev->fresh, fde);
456 DLIST_ADD(ev->fd_events, fde);
458 talloc_set_destructor(fde, poll_event_fd_destructor);
460 poll_ev->num_fds += 1;
462 return true;
466 event loop handling using poll()
468 static int poll_event_loop_poll(struct tevent_context *ev,
469 struct timeval *tvalp)
471 struct poll_event_context *poll_ev = talloc_get_type_abort(
472 ev->additional_data, struct poll_event_context);
473 int pollrtn;
474 int timeout = -1;
475 unsigned first_fd;
476 unsigned i, next_i;
477 int poll_errno;
479 if (ev->signal_events && tevent_common_check_signal(ev)) {
480 return 0;
483 if (tvalp != NULL) {
484 timeout = tvalp->tv_sec * 1000;
485 timeout += (tvalp->tv_usec + 999) / 1000;
488 poll_event_drain_signal_fd(poll_ev);
490 if (!poll_event_setup_fresh(ev, poll_ev)) {
491 return -1;
494 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
495 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
496 poll_errno = errno;
497 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
499 if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
500 tevent_common_check_signal(ev);
501 return 0;
504 if (pollrtn == 0 && tvalp) {
505 /* we don't care about a possible delay here */
506 tevent_common_loop_timer_delay(ev);
507 return 0;
510 if (pollrtn <= 0) {
512 * No fd's ready
514 return 0;
517 first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
519 /* at least one file descriptor is ready - check
520 which ones and call the handler, being careful to allow
521 the handler to remove itself when called */
523 for (i=first_fd; i<poll_ev->num_fds; i = next_i) {
524 struct pollfd *pfd;
525 struct tevent_fd *fde;
526 uint16_t flags = 0;
528 next_i = i + 1;
530 fde = poll_ev->fdes[i];
531 if (fde == NULL) {
533 * This fde was talloc_free()'ed. Delete it
534 * from the arrays
536 poll_ev->num_fds -= 1;
537 if (poll_ev->num_fds == i) {
538 break;
540 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
541 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
542 if (poll_ev->fdes[i] != NULL) {
543 poll_ev->fdes[i]->additional_flags = i;
545 /* we have to reprocess position 'i' */
546 next_i = i;
547 continue;
550 pfd = &poll_ev->fds[i];
552 if (pfd->revents & (POLLHUP|POLLERR)) {
553 /* If we only wait for TEVENT_FD_WRITE, we
554 should not tell the event handler about it,
555 and remove the writable flag, as we only
556 report errors when waiting for read events
557 to match the select behavior. */
558 if (!(fde->flags & TEVENT_FD_READ)) {
559 TEVENT_FD_NOT_WRITEABLE(fde);
560 continue;
562 flags |= TEVENT_FD_READ;
564 if (pfd->revents & POLLIN) {
565 flags |= TEVENT_FD_READ;
567 if (pfd->revents & POLLOUT) {
568 flags |= TEVENT_FD_WRITE;
570 if (flags != 0) {
571 fde->handler(ev, fde, flags, fde->private_data);
572 break;
576 return 0;
580 do a single event loop using the events defined in ev
582 static int poll_event_loop_once(struct tevent_context *ev,
583 const char *location)
585 struct timeval tval;
587 if (ev->signal_events &&
588 tevent_common_check_signal(ev)) {
589 return 0;
592 if (ev->immediate_events &&
593 tevent_common_loop_immediate(ev)) {
594 return 0;
597 tval = tevent_common_loop_timer_delay(ev);
598 if (tevent_timeval_is_zero(&tval)) {
599 return 0;
602 return poll_event_loop_poll(ev, &tval);
605 static int poll_event_loop_wait(struct tevent_context *ev,
606 const char *location)
608 struct poll_event_context *poll_ev = talloc_get_type_abort(
609 ev->additional_data, struct poll_event_context);
612 * loop as long as we have events pending
614 while (ev->fd_events ||
615 ev->timer_events ||
616 ev->immediate_events ||
617 ev->signal_events ||
618 poll_ev->fresh) {
619 int ret;
620 ret = _tevent_loop_once(ev, location);
621 if (ret != 0) {
622 tevent_debug(ev, TEVENT_DEBUG_FATAL,
623 "_tevent_loop_once() failed: %d - %s\n",
624 ret, strerror(errno));
625 return ret;
629 tevent_debug(ev, TEVENT_DEBUG_WARNING,
630 "poll_event_loop_wait() out of events\n");
631 return 0;
634 static const struct tevent_ops poll_event_ops = {
635 .context_init = poll_event_context_init,
636 .add_fd = poll_event_add_fd,
637 .set_fd_close_fn = tevent_common_fd_set_close_fn,
638 .get_fd_flags = tevent_common_fd_get_flags,
639 .set_fd_flags = poll_event_set_fd_flags,
640 .add_timer = tevent_common_add_timer,
641 .schedule_immediate = tevent_common_schedule_immediate,
642 .add_signal = tevent_common_add_signal,
643 .loop_once = poll_event_loop_once,
644 .loop_wait = poll_event_loop_wait,
647 _PRIVATE_ bool tevent_poll_init(void)
649 return tevent_register_backend("poll", &poll_event_ops);
652 static const struct tevent_ops poll_event_mt_ops = {
653 .context_init = poll_event_context_init_mt,
654 .add_fd = poll_event_add_fd,
655 .set_fd_close_fn = tevent_common_fd_set_close_fn,
656 .get_fd_flags = tevent_common_fd_get_flags,
657 .set_fd_flags = poll_event_set_fd_flags,
658 .add_timer = tevent_common_add_timer,
659 .schedule_immediate = poll_event_schedule_immediate,
660 .add_signal = tevent_common_add_signal,
661 .loop_once = poll_event_loop_once,
662 .loop_wait = poll_event_loop_wait,
665 _PRIVATE_ bool tevent_poll_mt_init(void)
667 return tevent_register_backend("poll_mt", &poll_event_mt_ops);