tevent: merge poll_fresh_fde_destructor() into poll_event_fd_destructor()
[Samba/gebeck_regimport.git] / lib / tevent / tevent_poll.c
blob792abefd48c97155d849855941cee0040d53aa68
1 /*
2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
9 ** under the LGPL
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "replace.h"
26 #include "system/filesys.h"
27 #include "system/select.h"
28 #include "tevent.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context {
33 /* a pointer back to the generic event_context */
34 struct tevent_context *ev;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd *fresh;
41 bool deleted;
44 * These two arrays are maintained together.
46 struct pollfd *fds;
47 struct tevent_fd **fdes;
48 unsigned num_fds;
51 * Signal fd to wake the poll() thread
53 int signal_fd;
55 /* information for exiting from the event loop */
56 int exit_code;
59 static int poll_event_context_destructor(struct poll_event_context *poll_ev)
61 struct tevent_fd *fd, *fn;
63 for (fd = poll_ev->fresh; fd; fd = fn) {
64 fn = fd->next;
65 fd->event_ctx = NULL;
66 DLIST_REMOVE(poll_ev->fresh, fd);
69 if (poll_ev->signal_fd == -1) {
71 * Non-threaded, no signal pipe
73 return 0;
76 close(poll_ev->signal_fd);
77 poll_ev->signal_fd = -1;
79 if (poll_ev->num_fds == 0) {
80 return 0;
82 if (poll_ev->fds[0].fd != -1) {
83 close(poll_ev->fds[0].fd);
84 poll_ev->fds[0].fd = -1;
86 return 0;
90 create a poll_event_context structure.
92 static int poll_event_context_init(struct tevent_context *ev)
94 struct poll_event_context *poll_ev;
97 * we might be called during tevent_re_initialise()
98 * which means we need to free our old additional_data
99 * in order to detach old fd events from the
100 * poll_ev->fresh list
102 TALLOC_FREE(ev->additional_data);
104 poll_ev = talloc_zero(ev, struct poll_event_context);
105 if (poll_ev == NULL) {
106 return -1;
108 poll_ev->ev = ev;
109 poll_ev->signal_fd = -1;
110 ev->additional_data = poll_ev;
111 talloc_set_destructor(poll_ev, poll_event_context_destructor);
112 return 0;
115 static bool set_nonblock(int fd)
117 int val;
119 val = fcntl(fd, F_GETFL, 0);
120 if (val == -1) {
121 return false;
123 val |= O_NONBLOCK;
125 return (fcntl(fd, F_SETFL, val) != -1);
128 static int poll_event_context_init_mt(struct tevent_context *ev)
130 struct poll_event_context *poll_ev;
131 struct pollfd *pfd;
132 int fds[2];
133 int ret;
135 ret = poll_event_context_init(ev);
136 if (ret == -1) {
137 return ret;
140 poll_ev = talloc_get_type_abort(
141 ev->additional_data, struct poll_event_context);
143 poll_ev->fds = talloc_zero(poll_ev, struct pollfd);
144 if (poll_ev->fds == NULL) {
145 return -1;
148 ret = pipe(fds);
149 if (ret == -1) {
150 return -1;
153 if (!set_nonblock(fds[0]) || !set_nonblock(fds[1])) {
154 close(fds[0]);
155 close(fds[1]);
156 return -1;
159 poll_ev->signal_fd = fds[1];
161 pfd = &poll_ev->fds[0];
162 pfd->fd = fds[0];
163 pfd->events = (POLLIN|POLLHUP);
165 poll_ev->num_fds = 1;
167 talloc_set_destructor(poll_ev, poll_event_context_destructor);
169 return 0;
172 static void poll_event_wake_pollthread(struct poll_event_context *poll_ev)
174 char c;
175 ssize_t ret;
177 if (poll_ev->signal_fd == -1) {
178 return;
180 c = 0;
181 do {
182 ret = write(poll_ev->signal_fd, &c, sizeof(c));
183 } while ((ret == -1) && (errno == EINTR));
186 static void poll_event_drain_signal_fd(struct poll_event_context *poll_ev)
188 char buf[16];
189 ssize_t ret;
190 int fd;
192 if (poll_ev->signal_fd == -1) {
193 return;
196 if (poll_ev->num_fds < 1) {
197 return;
199 fd = poll_ev->fds[0].fd;
201 do {
202 ret = read(fd, buf, sizeof(buf));
203 } while (ret == sizeof(buf));
207 destroy an fd_event
209 static int poll_event_fd_destructor(struct tevent_fd *fde)
211 struct tevent_context *ev = fde->event_ctx;
212 struct poll_event_context *poll_ev;
213 uint64_t del_idx = fde->additional_flags;
215 if (ev == NULL) {
216 goto done;
219 poll_ev = talloc_get_type_abort(
220 ev->additional_data, struct poll_event_context);
222 if (del_idx == UINT64_MAX) {
224 DLIST_REMOVE(poll_ev->fresh, fde);
225 goto done;
228 poll_ev->fdes[del_idx] = NULL;
229 poll_ev->deleted = true;
230 poll_event_wake_pollthread(poll_ev);
231 done:
232 return tevent_common_fd_destructor(fde);
235 static void poll_event_schedule_immediate(struct tevent_immediate *im,
236 struct tevent_context *ev,
237 tevent_immediate_handler_t handler,
238 void *private_data,
239 const char *handler_name,
240 const char *location)
242 struct poll_event_context *poll_ev = talloc_get_type_abort(
243 ev->additional_data, struct poll_event_context);
245 tevent_common_schedule_immediate(im, ev, handler, private_data,
246 handler_name, location);
247 poll_event_wake_pollthread(poll_ev);
251 Private function called by "standard" backend fallback.
252 Note this only allows fallback to "poll" backend, not "poll-mt".
254 _PRIVATE_ void tevent_poll_event_add_fd_internal(struct tevent_context *ev,
255 struct tevent_fd *fde)
257 struct poll_event_context *poll_ev = talloc_get_type_abort(
258 ev->additional_data, struct poll_event_context);
260 fde->additional_flags = UINT64_MAX;
261 fde->additional_data = NULL;
262 DLIST_ADD(poll_ev->fresh, fde);
263 talloc_set_destructor(fde, poll_event_fd_destructor);
267 add a fd based event
268 return NULL on failure (memory allocation error)
270 static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
271 TALLOC_CTX *mem_ctx,
272 int fd, uint16_t flags,
273 tevent_fd_handler_t handler,
274 void *private_data,
275 const char *handler_name,
276 const char *location)
278 struct poll_event_context *poll_ev = talloc_get_type_abort(
279 ev->additional_data, struct poll_event_context);
280 struct tevent_fd *fde;
282 if (fd < 0) {
283 return NULL;
286 fde = talloc(mem_ctx ? mem_ctx : ev, struct tevent_fd);
287 if (fde == NULL) {
288 return NULL;
290 fde->event_ctx = ev;
291 fde->fd = fd;
292 fde->flags = flags;
293 fde->handler = handler;
294 fde->close_fn = NULL;
295 fde->private_data = private_data;
296 fde->handler_name = handler_name;
297 fde->location = location;
298 fde->additional_flags = UINT64_MAX;
299 fde->additional_data = NULL;
301 tevent_poll_event_add_fd_internal(ev, fde);
302 poll_event_wake_pollthread(poll_ev);
305 * poll_event_loop_poll will take care of the rest in
306 * poll_event_setup_fresh
308 return fde;
312 set the fd event flags
314 static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
316 struct tevent_context *ev = fde->event_ctx;
317 struct poll_event_context *poll_ev;
318 uint64_t idx = fde->additional_flags;
319 uint16_t pollflags;
321 if (ev == NULL) {
322 return;
324 poll_ev = talloc_get_type_abort(
325 ev->additional_data, struct poll_event_context);
327 fde->flags = flags;
329 if (idx == UINT64_MAX) {
331 * poll_event_setup_fresh not yet called after this fde was
332 * added. We don't have to do anything to transfer the changed
333 * flags to the array passed to poll(2)
335 return;
338 pollflags = 0;
340 if (flags & TEVENT_FD_READ) {
341 pollflags |= (POLLIN|POLLHUP);
343 if (flags & TEVENT_FD_WRITE) {
344 pollflags |= (POLLOUT);
346 poll_ev->fds[idx].events = pollflags;
348 poll_event_wake_pollthread(poll_ev);
351 static bool poll_event_setup_fresh(struct tevent_context *ev,
352 struct poll_event_context *poll_ev)
354 struct tevent_fd *fde, *next;
355 unsigned num_fresh, num_fds;
357 if (poll_ev->deleted) {
358 unsigned first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
359 unsigned i;
361 for (i=first_fd; i < poll_ev->num_fds;) {
362 fde = poll_ev->fdes[i];
363 if (fde != NULL) {
364 i++;
365 continue;
369 * This fde was talloc_free()'ed. Delete it
370 * from the arrays
372 poll_ev->num_fds -= 1;
373 if (poll_ev->num_fds == i) {
374 break;
376 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
377 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
378 if (poll_ev->fdes[i] != NULL) {
379 poll_ev->fdes[i]->additional_flags = i;
383 poll_ev->deleted = false;
385 if (poll_ev->fresh == NULL) {
386 return true;
389 num_fresh = 0;
390 for (fde = poll_ev->fresh; fde; fde = fde->next) {
391 num_fresh += 1;
393 num_fds = poll_ev->num_fds + num_fresh;
396 * We check the length of fdes here. It is the last one
397 * enlarged, so if the realloc for poll_fd->fdes fails,
398 * poll_fd->fds will have at least the size of poll_fd->fdes
401 if (num_fds >= talloc_array_length(poll_ev->fdes)) {
402 struct pollfd *tmp_fds;
403 struct tevent_fd **tmp_fdes;
404 unsigned array_length;
406 array_length = (num_fds + 15) & ~15; /* round up to 16 */
408 tmp_fds = talloc_realloc(
409 poll_ev, poll_ev->fds, struct pollfd, array_length);
410 if (tmp_fds == NULL) {
411 return false;
413 poll_ev->fds = tmp_fds;
415 tmp_fdes = talloc_realloc(
416 poll_ev, poll_ev->fdes, struct tevent_fd *,
417 array_length);
418 if (tmp_fdes == NULL) {
419 return false;
421 poll_ev->fdes = tmp_fdes;
424 for (fde = poll_ev->fresh; fde; fde = next) {
425 struct pollfd *pfd;
427 pfd = &poll_ev->fds[poll_ev->num_fds];
429 pfd->fd = fde->fd;
430 pfd->events = 0;
431 pfd->revents = 0;
433 if (fde->flags & TEVENT_FD_READ) {
434 pfd->events |= (POLLIN|POLLHUP);
436 if (fde->flags & TEVENT_FD_WRITE) {
437 pfd->events |= (POLLOUT);
440 fde->additional_flags = poll_ev->num_fds;
441 poll_ev->fdes[poll_ev->num_fds] = fde;
443 next = fde->next;
444 DLIST_REMOVE(poll_ev->fresh, fde);
445 DLIST_ADD(ev->fd_events, fde);
447 poll_ev->num_fds += 1;
449 return true;
453 event loop handling using poll()
455 static int poll_event_loop_poll(struct tevent_context *ev,
456 struct timeval *tvalp)
458 struct poll_event_context *poll_ev = talloc_get_type_abort(
459 ev->additional_data, struct poll_event_context);
460 int pollrtn;
461 int timeout = -1;
462 unsigned first_fd;
463 unsigned i, next_i;
464 int poll_errno;
466 if (ev->signal_events && tevent_common_check_signal(ev)) {
467 return 0;
470 if (tvalp != NULL) {
471 timeout = tvalp->tv_sec * 1000;
472 timeout += (tvalp->tv_usec + 999) / 1000;
475 poll_event_drain_signal_fd(poll_ev);
477 if (!poll_event_setup_fresh(ev, poll_ev)) {
478 return -1;
481 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
482 pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
483 poll_errno = errno;
484 tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
486 if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) {
487 tevent_common_check_signal(ev);
488 return 0;
491 if (pollrtn == 0 && tvalp) {
492 /* we don't care about a possible delay here */
493 tevent_common_loop_timer_delay(ev);
494 return 0;
497 if (pollrtn <= 0) {
499 * No fd's ready
501 return 0;
504 first_fd = (poll_ev->signal_fd != -1) ? 1 : 0;
506 /* at least one file descriptor is ready - check
507 which ones and call the handler, being careful to allow
508 the handler to remove itself when called */
510 for (i=first_fd; i<poll_ev->num_fds; i = next_i) {
511 struct pollfd *pfd;
512 struct tevent_fd *fde;
513 uint16_t flags = 0;
515 next_i = i + 1;
517 fde = poll_ev->fdes[i];
518 if (fde == NULL) {
520 * This fde was talloc_free()'ed. Delete it
521 * from the arrays
523 poll_ev->num_fds -= 1;
524 if (poll_ev->num_fds == i) {
525 break;
527 poll_ev->fds[i] = poll_ev->fds[poll_ev->num_fds];
528 poll_ev->fdes[i] = poll_ev->fdes[poll_ev->num_fds];
529 if (poll_ev->fdes[i] != NULL) {
530 poll_ev->fdes[i]->additional_flags = i;
532 /* we have to reprocess position 'i' */
533 next_i = i;
534 continue;
537 pfd = &poll_ev->fds[i];
539 if (pfd->revents & (POLLHUP|POLLERR)) {
540 /* If we only wait for TEVENT_FD_WRITE, we
541 should not tell the event handler about it,
542 and remove the writable flag, as we only
543 report errors when waiting for read events
544 to match the select behavior. */
545 if (!(fde->flags & TEVENT_FD_READ)) {
546 TEVENT_FD_NOT_WRITEABLE(fde);
547 continue;
549 flags |= TEVENT_FD_READ;
551 if (pfd->revents & POLLIN) {
552 flags |= TEVENT_FD_READ;
554 if (pfd->revents & POLLOUT) {
555 flags |= TEVENT_FD_WRITE;
557 if (flags != 0) {
558 fde->handler(ev, fde, flags, fde->private_data);
559 break;
563 return 0;
567 do a single event loop using the events defined in ev
569 static int poll_event_loop_once(struct tevent_context *ev,
570 const char *location)
572 struct timeval tval;
574 if (ev->signal_events &&
575 tevent_common_check_signal(ev)) {
576 return 0;
579 if (ev->immediate_events &&
580 tevent_common_loop_immediate(ev)) {
581 return 0;
584 tval = tevent_common_loop_timer_delay(ev);
585 if (tevent_timeval_is_zero(&tval)) {
586 return 0;
589 return poll_event_loop_poll(ev, &tval);
592 static int poll_event_loop_wait(struct tevent_context *ev,
593 const char *location)
595 struct poll_event_context *poll_ev = talloc_get_type_abort(
596 ev->additional_data, struct poll_event_context);
599 * loop as long as we have events pending
601 while (ev->fd_events ||
602 ev->timer_events ||
603 ev->immediate_events ||
604 ev->signal_events ||
605 poll_ev->fresh) {
606 int ret;
607 ret = _tevent_loop_once(ev, location);
608 if (ret != 0) {
609 tevent_debug(ev, TEVENT_DEBUG_FATAL,
610 "_tevent_loop_once() failed: %d - %s\n",
611 ret, strerror(errno));
612 return ret;
616 tevent_debug(ev, TEVENT_DEBUG_WARNING,
617 "poll_event_loop_wait() out of events\n");
618 return 0;
621 static const struct tevent_ops poll_event_ops = {
622 .context_init = poll_event_context_init,
623 .add_fd = poll_event_add_fd,
624 .set_fd_close_fn = tevent_common_fd_set_close_fn,
625 .get_fd_flags = tevent_common_fd_get_flags,
626 .set_fd_flags = poll_event_set_fd_flags,
627 .add_timer = tevent_common_add_timer,
628 .schedule_immediate = tevent_common_schedule_immediate,
629 .add_signal = tevent_common_add_signal,
630 .loop_once = poll_event_loop_once,
631 .loop_wait = poll_event_loop_wait,
634 _PRIVATE_ bool tevent_poll_init(void)
636 return tevent_register_backend("poll", &poll_event_ops);
639 static const struct tevent_ops poll_event_mt_ops = {
640 .context_init = poll_event_context_init_mt,
641 .add_fd = poll_event_add_fd,
642 .set_fd_close_fn = tevent_common_fd_set_close_fn,
643 .get_fd_flags = tevent_common_fd_get_flags,
644 .set_fd_flags = poll_event_set_fd_flags,
645 .add_timer = tevent_common_add_timer,
646 .schedule_immediate = poll_event_schedule_immediate,
647 .add_signal = tevent_common_add_signal,
648 .loop_once = poll_event_loop_once,
649 .loop_wait = poll_event_loop_wait,
652 _PRIVATE_ bool tevent_poll_mt_init(void)
654 return tevent_register_backend("poll_mt", &poll_event_mt_ops);