tevent: release 0.16.1
[Samba.git] / lib / tevent / tevent_poll.c
blob91a93817a8c91866619dac0013a31ea935c53716
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "system/filesys.h"
27 #include "system/select.h"
28 #include "tevent.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * one or more events were deleted or disabled
39 bool deleted;
42 * These two arrays are maintained together.
44 * The following is always true:
45 * num_fds <= num_fdes
47 * new 'fresh' elements are added at the end
48 * of the 'fdes' array and picked up later
49 * to the 'fds' array in poll_event_sync_arrays()
50 * before the poll() syscall.
52 struct pollfd *fds;
53 size_t num_fds;
54 struct tevent_fd **fdes;
55 size_t num_fdes;
58 * use tevent_common_wakeup(ev) to wake the poll() thread
60 bool use_mt_mode;
64 create a poll_event_context structure.
66 static int poll_event_context_init(struct tevent_context *ev)
68 struct poll_event_context *poll_ev;
71 * we might be called during tevent_re_initialise()
72 * which means we need to free our old additional_data
73 * in order to detach old fd events from the
74 * poll_ev->fresh list
76 TALLOC_FREE(ev->additional_data);
78 poll_ev = talloc_zero(ev, struct poll_event_context);
79 if (poll_ev == NULL) {
80 return -1;
82 poll_ev->ev = ev;
83 ev->additional_data = poll_ev;
84 return 0;
87 static int poll_event_context_init_mt(struct tevent_context *ev)
89 struct poll_event_context *poll_ev;
90 int ret;
92 ret = poll_event_context_init(ev);
93 if (ret == -1) {
94 return ret;
97 poll_ev = talloc_get_type_abort(
98 ev->additional_data, struct poll_event_context);
100 ret = tevent_common_wakeup_init(ev);
101 if (ret != 0) {
102 return ret;
105 poll_ev->use_mt_mode = true;
107 return 0;
110 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
112 if (!poll_ev->use_mt_mode) {
113 return;
115 tevent_common_wakeup(poll_ev->ev);
119 destroy an fd_event
121 static int poll_event_fd_destructor(struct tevent_fd *fde)
123 struct tevent_context *ev = fde->event_ctx;
124 struct poll_event_context *poll_ev;
125 uint64_t del_idx = fde->additional_flags;
127 if (ev == NULL) {
128 goto done;
131 poll_ev = talloc_get_type_abort(
132 ev->additional_data, struct poll_event_context);
134 if (del_idx == UINT64_MAX) {
135 goto done;
138 poll_ev->fdes[del_idx] = NULL;
139 poll_ev->deleted = true;
140 poll_event_wake_pollthread(poll_ev);
141 done:
142 return tevent_common_fd_destructor(fde);
145 static void poll_event_schedule_immediate(struct tevent_immediate *im,
146 struct tevent_context *ev,
147 tevent_immediate_handler_t handler,
148 void *private_data,
149 const char *handler_name,
150 const char *location)
152 struct poll_event_context *poll_ev = talloc_get_type_abort(
153 ev->additional_data, struct poll_event_context);
155 tevent_common_schedule_immediate(im, ev, handler, private_data,
156 handler_name, location);
157 poll_event_wake_pollthread(poll_ev);
161 Private function called by "standard" backend fallback.
162 Note this only allows fallback to "poll" backend, not "poll-mt".
164 _PRIVATE_ bool tevent_poll_event_add_fd_internal(struct tevent_context *ev,
165 struct tevent_fd *fde)
167 struct poll_event_context *poll_ev = talloc_get_type_abort(
168 ev->additional_data, struct poll_event_context);
169 uint64_t fde_idx = UINT64_MAX;
170 size_t num_fdes;
172 fde->additional_flags = UINT64_MAX;
173 tevent_common_fd_mpx_reinit(fde);
174 talloc_set_destructor(fde, poll_event_fd_destructor);
176 if (fde->flags == 0) {
178 * Nothing more to do...
180 return true;
184 * We need to add it to the end of the 'fdes' array.
186 num_fdes = poll_ev->num_fdes + 1;
187 if (num_fdes > talloc_array_length(poll_ev->fdes)) {
188 struct tevent_fd **tmp_fdes = NULL;
189 size_t array_length;
191 array_length = (num_fdes + 15) & ~15; /* round up to 16 */
193 tmp_fdes = talloc_realloc(poll_ev,
194 poll_ev->fdes,
195 struct tevent_fd *,
196 array_length);
197 if (tmp_fdes == NULL) {
198 return false;
200 poll_ev->fdes = tmp_fdes;
203 fde_idx = poll_ev->num_fdes;
204 fde->additional_flags = fde_idx;
205 poll_ev->fdes[fde_idx] = fde;
206 poll_ev->num_fdes++;
208 return true;
212 add a fd based event
213 return NULL on failure (memory allocation error)
215 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
216 TALLOC_CTX *mem_ctx,
217 int fd, uint16_t flags,
218 tevent_fd_handler_t handler,
219 void *private_data,
220 const char *handler_name,
221 const char *location)
223 struct poll_event_context *poll_ev = talloc_get_type_abort(
224 ev->additional_data, struct poll_event_context);
225 struct tevent_fd *fde;
226 bool ok;
228 if (fd < 0) {
229 return NULL;
232 fde = tevent_common_add_fd(ev,
233 mem_ctx,
235 flags,
236 handler,
237 private_data,
238 handler_name,
239 location);
240 if (fde == NULL) {
241 return NULL;
244 ok = tevent_poll_event_add_fd_internal(ev, fde);
245 if (!ok) {
246 TALLOC_FREE(fde);
247 return NULL;
249 poll_event_wake_pollthread(poll_ev);
252 * poll_event_loop_poll will take care of the rest in
253 * poll_event_setup_fresh
255 return fde;
259 map from TEVENT_FD_* to POLLIN/POLLOUT
261 static uint16_t poll_map_flags(uint16_t flags)
263 uint16_t pollflags = 0;
266 * we do not need to specify POLLERR | POLLHUP
267 * they are always reported.
270 if (flags & TEVENT_FD_READ) {
271 pollflags |= POLLIN;
272 #ifdef POLLRDHUP
274 * Note that at least on Linux
275 * POLLRDHUP always returns
276 * POLLIN in addition, so this
277 * is not strictly needed, but
278 * we want to make it explicit.
280 pollflags |= POLLRDHUP;
281 #endif
283 if (flags & TEVENT_FD_WRITE) {
284 pollflags |= POLLOUT;
286 if (flags & TEVENT_FD_ERROR) {
287 #ifdef POLLRDHUP
288 pollflags |= POLLRDHUP;
289 #endif
292 return pollflags;
296 set the fd event flags
298 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
300 struct tevent_context *ev = fde->event_ctx;
301 struct poll_event_context *poll_ev;
302 uint64_t idx = fde->additional_flags;
304 if (ev == NULL) {
305 return;
308 if (fde->flags == flags) {
309 return;
312 poll_ev = talloc_get_type_abort(
313 ev->additional_data, struct poll_event_context);
315 fde->flags = flags;
317 if (idx == UINT64_MAX) {
319 * We move it between the fresh and disabled lists.
321 tevent_poll_event_add_fd_internal(ev, fde);
322 poll_event_wake_pollthread(poll_ev);
323 return;
326 if (fde->flags == 0) {
328 * We need to remove it from the array
329 * and move it to the disabled list.
331 poll_ev->fdes[idx] = NULL;
332 poll_ev->deleted = true;
333 fde->additional_flags = UINT64_MAX;
334 poll_event_wake_pollthread(poll_ev);
335 return;
338 if (idx >= poll_ev->num_fds) {
340 * Not yet added to the
341 * poll_ev->fds array.
343 poll_event_wake_pollthread(poll_ev);
344 return;
347 poll_ev->fds[idx].events = poll_map_flags(flags);
349 poll_event_wake_pollthread(poll_ev);
352 static bool poll_event_sync_arrays(struct tevent_context *ev,
353 struct poll_event_context *poll_ev)
355 size_t i;
356 size_t array_length;
358 if (poll_ev->deleted) {
360 for (i=0; i < poll_ev->num_fds;) {
361 struct tevent_fd *fde = poll_ev->fdes[i];
362 size_t ci;
364 if (fde != NULL) {
365 i++;
366 continue;
370 * This fde was talloc_free()'ed. Delete it
371 * from the arrays
373 poll_ev->num_fds -= 1;
374 ci = poll_ev->num_fds;
375 if (ci > i) {
376 poll_ev->fds[i] = poll_ev->fds[ci];
377 poll_ev->fdes[i] = poll_ev->fdes[ci];
378 if (poll_ev->fdes[i] != NULL) {
379 poll_ev->fdes[i]->additional_flags = i;
382 poll_ev->fds[ci] = (struct pollfd) { .fd = -1 };
383 poll_ev->fdes[ci] = NULL;
385 poll_ev->deleted = false;
388 if (poll_ev->num_fds == poll_ev->num_fdes) {
389 return true;
393 * Recheck the size of both arrays and make sure
394 * poll_fd->fds array has at least the size of the
395 * in use poll_ev->fdes array.
397 if (poll_ev->num_fdes > talloc_array_length(poll_ev->fds)) {
398 struct pollfd *tmp_fds = NULL;
401 * Make sure both allocated the same length.
403 array_length = talloc_array_length(poll_ev->fdes);
405 tmp_fds = talloc_realloc(poll_ev,
406 poll_ev->fds,
407 struct pollfd,
408 array_length);
409 if (tmp_fds == NULL) {
410 return false;
412 poll_ev->fds = tmp_fds;
416 * Now setup the new elements.
418 for (i = poll_ev->num_fds; i < poll_ev->num_fdes; i++) {
419 struct tevent_fd *fde = poll_ev->fdes[i];
420 struct pollfd *pfd = &poll_ev->fds[poll_ev->num_fds];
422 if (fde == NULL) {
423 continue;
426 if (i > poll_ev->num_fds) {
427 poll_ev->fdes[poll_ev->num_fds] = fde;
428 fde->additional_flags = poll_ev->num_fds;
429 poll_ev->fdes[i] = NULL;
432 pfd->fd = fde->fd;
433 pfd->events = poll_map_flags(fde->flags);
434 pfd->revents = 0;
436 poll_ev->num_fds += 1;
438 /* Both are in sync again */
439 poll_ev->num_fdes = poll_ev->num_fds;
442 * Check if we should shrink the arrays
443 * But keep at least 16 elements.
446 array_length = (poll_ev->num_fds + 15) & ~15; /* round up to 16 */
447 array_length = MAX(array_length, 16);
448 if (array_length < talloc_array_length(poll_ev->fdes)) {
449 struct tevent_fd **tmp_fdes = NULL;
450 struct pollfd *tmp_fds = NULL;
452 tmp_fdes = talloc_realloc(poll_ev,
453 poll_ev->fdes,
454 struct tevent_fd *,
455 array_length);
456 if (tmp_fdes == NULL) {
457 return false;
459 poll_ev->fdes = tmp_fdes;
461 tmp_fds = talloc_realloc(poll_ev,
462 poll_ev->fds,
463 struct pollfd,
464 array_length);
465 if (tmp_fds == NULL) {
466 return false;
468 poll_ev->fds = tmp_fds;
471 return true;
475 event loop handling using poll()
477 static int poll_event_loop_poll(struct tevent_context *ev,
478 struct timeval *tvalp)
480 struct poll_event_context *poll_ev = talloc_get_type_abort(
481 ev->additional_data, struct poll_event_context);
482 int pollrtn;
483 int timeout = -1;
484 int poll_errno;
485 struct tevent_fd *fde = NULL;
486 struct tevent_fd *next = NULL;
487 unsigned i;
488 bool ok;
490 if (ev->signal_events && tevent_common_check_signal(ev)) {
491 return 0;
494 if (tvalp != NULL) {
495 timeout = tvalp->tv_sec * 1000;
496 timeout += (tvalp->tv_usec + 999) / 1000;
499 ok = poll_event_sync_arrays(ev, poll_ev);
500 if (!ok) {
501 return -1;
504 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
505 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
506 poll_errno = errno;
507 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
509 if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
510 tevent_common_check_signal(ev);
511 return 0;
514 if (pollrtn == 0 && tvalp) {
515 /* we don't care about a possible delay here */
516 tevent_common_loop_timer_delay(ev);
517 return 0;
520 if (pollrtn <= 0) {
522 * No fd's ready
524 return 0;
527 /* at least one file descriptor is ready - check
528 which ones and call the handler, being careful to allow
529 the handler to remove itself when called */
531 for (fde = ev->fd_events; fde; fde = next) {
532 uint64_t idx = fde->additional_flags;
533 struct pollfd *pfd;
534 uint16_t flags = 0;
536 next = fde->next;
538 if (idx == UINT64_MAX) {
539 continue;
542 pfd = &poll_ev->fds[idx];
544 if (pfd->revents & POLLNVAL) {
546 * the socket is dead! this should never
547 * happen as the socket should have first been
548 * made readable and that should have removed
549 * the event, so this must be a bug.
551 * We ignore it here to match the epoll
552 * behavior.
554 tevent_debug(ev, TEVENT_DEBUG_ERROR,
555 "POLLNVAL on fde[%p] fd[%d] - disabling\n",
556 fde, pfd->fd);
557 poll_ev->fdes[idx] = NULL;
558 poll_ev->deleted = true;
559 tevent_common_fd_disarm(fde);
560 continue;
563 #ifdef POLLRDHUP
564 #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR|POLLRDHUP)
565 #else
566 #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR)
567 #endif
569 if (pfd->revents & __POLL_RETURN_ERROR_FLAGS) {
571 * If we only wait for TEVENT_FD_WRITE, we
572 * should not tell the event handler about it,
573 * and remove the writable flag, as we only
574 * report errors when waiting for read events
575 * or explicit for errors.
577 if (!(fde->flags & (TEVENT_FD_READ|TEVENT_FD_ERROR)))
579 TEVENT_FD_NOT_WRITEABLE(fde);
580 continue;
582 if (fde->flags & TEVENT_FD_ERROR) {
583 flags |= TEVENT_FD_ERROR;
585 if (fde->flags & TEVENT_FD_READ) {
586 flags |= TEVENT_FD_READ;
589 if (pfd->revents & POLLIN) {
590 flags |= TEVENT_FD_READ;
592 if (pfd->revents & POLLOUT) {
593 flags |= TEVENT_FD_WRITE;
596 * Note that fde->flags could be changed when using
597 * the poll_mt backend together with threads,
598 * that why we need to check pfd->revents and fde->flags
600 flags &= fde->flags;
601 if (flags != 0) {
602 DLIST_DEMOTE(ev->fd_events, fde);
603 return tevent_common_invoke_fd_handler(fde, flags, NULL);
607 for (i = 0; i < poll_ev->num_fds; i++) {
608 if (poll_ev->fds[i].revents & POLLNVAL) {
610 * the socket is dead! this should never
611 * happen as the socket should have first been
612 * made readable and that should have removed
613 * the event, so this must be a bug or
614 * a race in the poll_mt usage.
616 fde = poll_ev->fdes[i];
617 tevent_debug(ev, TEVENT_DEBUG_WARNING,
618 "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n",
619 poll_ev->fds[i].fd, fde);
620 poll_ev->fdes[i] = NULL;
621 poll_ev->deleted = true;
622 if (fde != NULL) {
623 tevent_common_fd_disarm(fde);
628 return 0;
632 do a single event loop using the events defined in ev
634 static int poll_event_loop_once(struct tevent_context *ev,
635 const char *location)
637 struct timeval tval;
639 if (ev->signal_events &&
640 tevent_common_check_signal(ev)) {
641 return 0;
644 if (ev->threaded_contexts != NULL) {
645 tevent_common_threaded_activate_immediate(ev);
648 if (ev->immediate_events &&
649 tevent_common_loop_immediate(ev)) {
650 return 0;
653 tval = tevent_common_loop_timer_delay(ev);
654 if (tevent_timeval_is_zero(&tval)) {
655 return 0;
658 return poll_event_loop_poll(ev, &tval);
661 static const struct tevent_ops poll_event_ops = {
662 .context_init = poll_event_context_init,
663 .add_fd = poll_event_add_fd,
664 .set_fd_close_fn = tevent_common_fd_set_close_fn,
665 .get_fd_flags = tevent_common_fd_get_flags,
666 .set_fd_flags = poll_event_set_fd_flags,
667 .add_timer = tevent_common_add_timer_v2,
668 .schedule_immediate = tevent_common_schedule_immediate,
669 .add_signal = tevent_common_add_signal,
670 .loop_once = poll_event_loop_once,
671 .loop_wait = tevent_common_loop_wait,
674 _PRIVATE_ bool tevent_poll_init(void)
676 return tevent_register_backend("poll", &poll_event_ops);
679 static const struct tevent_ops poll_event_mt_ops = {
680 .context_init = poll_event_context_init_mt,
681 .add_fd = poll_event_add_fd,
682 .set_fd_close_fn = tevent_common_fd_set_close_fn,
683 .get_fd_flags = tevent_common_fd_get_flags,
684 .set_fd_flags = poll_event_set_fd_flags,
685 .add_timer = tevent_common_add_timer_v2,
686 .schedule_immediate = poll_event_schedule_immediate,
687 .add_signal = tevent_common_add_signal,
688 .loop_once = poll_event_loop_once,
689 .loop_wait = tevent_common_loop_wait,
692 _PRIVATE_ bool tevent_poll_mt_init(void)
694 return tevent_register_backend("poll_mt", &poll_event_mt_ops);