2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
44 * These two arrays are maintained together.
47 struct tevent_fd
**fdes
;
51 * Signal fd to wake the poll() thread
55 /* information for exiting from the event loop */
59 static int poll_event_context_destructor(struct poll_event_context
*poll_ev
)
61 struct tevent_fd
*fd
, *fn
;
63 for (fd
= poll_ev
->fresh
; fd
; fd
= fn
) {
66 DLIST_REMOVE(poll_ev
->fresh
, fd
);
69 if (poll_ev
->signal_fd
== -1) {
71 * Non-threaded, no signal pipe
76 close(poll_ev
->signal_fd
);
77 poll_ev
->signal_fd
= -1;
79 if (poll_ev
->num_fds
== 0) {
82 if (poll_ev
->fds
[0].fd
!= -1) {
83 close(poll_ev
->fds
[0].fd
);
84 poll_ev
->fds
[0].fd
= -1;
90 create a poll_event_context structure.
92 static int poll_event_context_init(struct tevent_context
*ev
)
94 struct poll_event_context
*poll_ev
;
97 * we might be called during tevent_re_initialise()
98 * which means we need to free our old additional_data
99 * in order to detach old fd events from the
100 * poll_ev->fresh list
102 TALLOC_FREE(ev
->additional_data
);
104 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
105 if (poll_ev
== NULL
) {
109 poll_ev
->signal_fd
= -1;
110 ev
->additional_data
= poll_ev
;
111 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
115 static bool set_nonblock(int fd
)
119 val
= fcntl(fd
, F_GETFL
, 0);
125 return (fcntl(fd
, F_SETFL
, val
) != -1);
128 static int poll_event_context_init_mt(struct tevent_context
*ev
)
130 struct poll_event_context
*poll_ev
;
135 ret
= poll_event_context_init(ev
);
140 poll_ev
= talloc_get_type_abort(
141 ev
->additional_data
, struct poll_event_context
);
143 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
144 if (poll_ev
->fds
== NULL
) {
153 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
159 poll_ev
->signal_fd
= fds
[1];
161 pfd
= &poll_ev
->fds
[0];
163 pfd
->events
= (POLLIN
|POLLHUP
);
165 poll_ev
->num_fds
= 1;
167 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
172 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
177 if (poll_ev
->signal_fd
== -1) {
182 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
183 } while ((ret
== -1) && (errno
== EINTR
));
186 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
192 if (poll_ev
->signal_fd
== -1) {
196 if (poll_ev
->num_fds
< 1) {
199 fd
= poll_ev
->fds
[0].fd
;
202 ret
= read(fd
, buf
, sizeof(buf
));
203 } while (ret
== sizeof(buf
));
209 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
211 struct tevent_context
*ev
= fde
->event_ctx
;
212 struct poll_event_context
*poll_ev
;
213 uint64_t del_idx
= fde
->additional_flags
;
219 poll_ev
= talloc_get_type_abort(
220 ev
->additional_data
, struct poll_event_context
);
222 if (del_idx
== UINT64_MAX
) {
224 DLIST_REMOVE(poll_ev
->fresh
, fde
);
228 poll_ev
->fdes
[del_idx
] = NULL
;
229 poll_ev
->deleted
= true;
230 poll_event_wake_pollthread(poll_ev
);
232 return tevent_common_fd_destructor(fde
);
235 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
236 struct tevent_context
*ev
,
237 tevent_immediate_handler_t handler
,
239 const char *handler_name
,
240 const char *location
)
242 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
243 ev
->additional_data
, struct poll_event_context
);
245 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
246 handler_name
, location
);
247 poll_event_wake_pollthread(poll_ev
);
251 Private function called by "standard" backend fallback.
252 Note this only allows fallback to "poll" backend, not "poll-mt".
254 _PRIVATE_
void tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
255 struct tevent_fd
*fde
)
257 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
258 ev
->additional_data
, struct poll_event_context
);
260 fde
->additional_flags
= UINT64_MAX
;
261 fde
->additional_data
= NULL
;
262 DLIST_ADD(poll_ev
->fresh
, fde
);
263 talloc_set_destructor(fde
, poll_event_fd_destructor
);
268 return NULL on failure (memory allocation error)
270 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
272 int fd
, uint16_t flags
,
273 tevent_fd_handler_t handler
,
275 const char *handler_name
,
276 const char *location
)
278 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
279 ev
->additional_data
, struct poll_event_context
);
280 struct tevent_fd
*fde
;
286 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
293 fde
->handler
= handler
;
294 fde
->close_fn
= NULL
;
295 fde
->private_data
= private_data
;
296 fde
->handler_name
= handler_name
;
297 fde
->location
= location
;
298 fde
->additional_flags
= UINT64_MAX
;
299 fde
->additional_data
= NULL
;
301 tevent_poll_event_add_fd_internal(ev
, fde
);
302 poll_event_wake_pollthread(poll_ev
);
305 * poll_event_loop_poll will take care of the rest in
306 * poll_event_setup_fresh
312 set the fd event flags
314 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
316 struct tevent_context
*ev
= fde
->event_ctx
;
317 struct poll_event_context
*poll_ev
;
318 uint64_t idx
= fde
->additional_flags
;
324 poll_ev
= talloc_get_type_abort(
325 ev
->additional_data
, struct poll_event_context
);
329 if (idx
== UINT64_MAX
) {
331 * poll_event_setup_fresh not yet called after this fde was
332 * added. We don't have to do anything to transfer the changed
333 * flags to the array passed to poll(2)
340 if (flags
& TEVENT_FD_READ
) {
341 pollflags
|= (POLLIN
|POLLHUP
);
343 if (flags
& TEVENT_FD_WRITE
) {
344 pollflags
|= (POLLOUT
);
346 poll_ev
->fds
[idx
].events
= pollflags
;
348 poll_event_wake_pollthread(poll_ev
);
351 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
352 struct poll_event_context
*poll_ev
)
354 struct tevent_fd
*fde
, *next
;
355 unsigned num_fresh
, num_fds
;
357 if (poll_ev
->deleted
) {
358 unsigned first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
361 for (i
=first_fd
; i
< poll_ev
->num_fds
;) {
362 fde
= poll_ev
->fdes
[i
];
369 * This fde was talloc_free()'ed. Delete it
372 poll_ev
->num_fds
-= 1;
373 if (poll_ev
->num_fds
== i
) {
376 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
377 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
378 if (poll_ev
->fdes
[i
] != NULL
) {
379 poll_ev
->fdes
[i
]->additional_flags
= i
;
383 poll_ev
->deleted
= false;
385 if (poll_ev
->fresh
== NULL
) {
390 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
393 num_fds
= poll_ev
->num_fds
+ num_fresh
;
396 * We check the length of fdes here. It is the last one
397 * enlarged, so if the realloc for poll_fd->fdes fails,
398 * poll_fd->fds will have at least the size of poll_fd->fdes
401 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
402 struct pollfd
*tmp_fds
;
403 struct tevent_fd
**tmp_fdes
;
404 unsigned array_length
;
406 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
408 tmp_fds
= talloc_realloc(
409 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
410 if (tmp_fds
== NULL
) {
413 poll_ev
->fds
= tmp_fds
;
415 tmp_fdes
= talloc_realloc(
416 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
418 if (tmp_fdes
== NULL
) {
421 poll_ev
->fdes
= tmp_fdes
;
424 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
427 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
433 if (fde
->flags
& TEVENT_FD_READ
) {
434 pfd
->events
|= (POLLIN
|POLLHUP
);
436 if (fde
->flags
& TEVENT_FD_WRITE
) {
437 pfd
->events
|= (POLLOUT
);
440 fde
->additional_flags
= poll_ev
->num_fds
;
441 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
444 DLIST_REMOVE(poll_ev
->fresh
, fde
);
445 DLIST_ADD(ev
->fd_events
, fde
);
447 poll_ev
->num_fds
+= 1;
453 event loop handling using poll()
455 static int poll_event_loop_poll(struct tevent_context
*ev
,
456 struct timeval
*tvalp
)
458 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
459 ev
->additional_data
, struct poll_event_context
);
466 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
471 timeout
= tvalp
->tv_sec
* 1000;
472 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
475 poll_event_drain_signal_fd(poll_ev
);
477 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
481 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
482 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
484 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
486 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
487 tevent_common_check_signal(ev
);
491 if (pollrtn
== 0 && tvalp
) {
492 /* we don't care about a possible delay here */
493 tevent_common_loop_timer_delay(ev
);
504 first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
506 /* at least one file descriptor is ready - check
507 which ones and call the handler, being careful to allow
508 the handler to remove itself when called */
510 for (i
=first_fd
; i
<poll_ev
->num_fds
; i
= next_i
) {
512 struct tevent_fd
*fde
;
517 fde
= poll_ev
->fdes
[i
];
520 * This fde was talloc_free()'ed. Delete it
523 poll_ev
->num_fds
-= 1;
524 if (poll_ev
->num_fds
== i
) {
527 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
528 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
529 if (poll_ev
->fdes
[i
] != NULL
) {
530 poll_ev
->fdes
[i
]->additional_flags
= i
;
532 /* we have to reprocess position 'i' */
537 pfd
= &poll_ev
->fds
[i
];
539 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
540 /* If we only wait for TEVENT_FD_WRITE, we
541 should not tell the event handler about it,
542 and remove the writable flag, as we only
543 report errors when waiting for read events
544 to match the select behavior. */
545 if (!(fde
->flags
& TEVENT_FD_READ
)) {
546 TEVENT_FD_NOT_WRITEABLE(fde
);
549 flags
|= TEVENT_FD_READ
;
551 if (pfd
->revents
& POLLIN
) {
552 flags
|= TEVENT_FD_READ
;
554 if (pfd
->revents
& POLLOUT
) {
555 flags
|= TEVENT_FD_WRITE
;
558 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
567 do a single event loop using the events defined in ev
569 static int poll_event_loop_once(struct tevent_context
*ev
,
570 const char *location
)
574 if (ev
->signal_events
&&
575 tevent_common_check_signal(ev
)) {
579 if (ev
->immediate_events
&&
580 tevent_common_loop_immediate(ev
)) {
584 tval
= tevent_common_loop_timer_delay(ev
);
585 if (tevent_timeval_is_zero(&tval
)) {
589 return poll_event_loop_poll(ev
, &tval
);
592 static int poll_event_loop_wait(struct tevent_context
*ev
,
593 const char *location
)
595 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
596 ev
->additional_data
, struct poll_event_context
);
599 * loop as long as we have events pending
601 while (ev
->fd_events
||
603 ev
->immediate_events
||
607 ret
= _tevent_loop_once(ev
, location
);
609 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
610 "_tevent_loop_once() failed: %d - %s\n",
611 ret
, strerror(errno
));
616 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
617 "poll_event_loop_wait() out of events\n");
621 static const struct tevent_ops poll_event_ops
= {
622 .context_init
= poll_event_context_init
,
623 .add_fd
= poll_event_add_fd
,
624 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
625 .get_fd_flags
= tevent_common_fd_get_flags
,
626 .set_fd_flags
= poll_event_set_fd_flags
,
627 .add_timer
= tevent_common_add_timer
,
628 .schedule_immediate
= tevent_common_schedule_immediate
,
629 .add_signal
= tevent_common_add_signal
,
630 .loop_once
= poll_event_loop_once
,
631 .loop_wait
= poll_event_loop_wait
,
634 _PRIVATE_
bool tevent_poll_init(void)
636 return tevent_register_backend("poll", &poll_event_ops
);
639 static const struct tevent_ops poll_event_mt_ops
= {
640 .context_init
= poll_event_context_init_mt
,
641 .add_fd
= poll_event_add_fd
,
642 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
643 .get_fd_flags
= tevent_common_fd_get_flags
,
644 .set_fd_flags
= poll_event_set_fd_flags
,
645 .add_timer
= tevent_common_add_timer
,
646 .schedule_immediate
= poll_event_schedule_immediate
,
647 .add_signal
= tevent_common_add_signal
,
648 .loop_once
= poll_event_loop_once
,
649 .loop_wait
= poll_event_loop_wait
,
652 _PRIVATE_
bool tevent_poll_mt_init(void)
654 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);