2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * one or more events were deleted or disabled
42 * These two arrays are maintained together.
44 * The following is always true:
47 * new 'fresh' elements are added at the end
48 * of the 'fdes' array and picked up later
49 * to the 'fds' array in poll_event_sync_arrays()
50 * before the poll() syscall.
54 struct tevent_fd
**fdes
;
58 * use tevent_common_wakeup(ev) to wake the poll() thread
64 create a poll_event_context structure.
66 static int poll_event_context_init(struct tevent_context
*ev
)
68 struct poll_event_context
*poll_ev
;
71 * we might be called during tevent_re_initialise()
72 * which means we need to free our old additional_data
73 * in order to detach old fd events from the
76 TALLOC_FREE(ev
->additional_data
);
78 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
79 if (poll_ev
== NULL
) {
83 ev
->additional_data
= poll_ev
;
87 static int poll_event_context_init_mt(struct tevent_context
*ev
)
89 struct poll_event_context
*poll_ev
;
92 ret
= poll_event_context_init(ev
);
97 poll_ev
= talloc_get_type_abort(
98 ev
->additional_data
, struct poll_event_context
);
100 ret
= tevent_common_wakeup_init(ev
);
105 poll_ev
->use_mt_mode
= true;
110 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
112 if (!poll_ev
->use_mt_mode
) {
115 tevent_common_wakeup(poll_ev
->ev
);
121 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
123 struct tevent_context
*ev
= fde
->event_ctx
;
124 struct poll_event_context
*poll_ev
;
125 uint64_t del_idx
= fde
->additional_flags
;
131 poll_ev
= talloc_get_type_abort(
132 ev
->additional_data
, struct poll_event_context
);
134 if (del_idx
== UINT64_MAX
) {
138 poll_ev
->fdes
[del_idx
] = NULL
;
139 poll_ev
->deleted
= true;
140 poll_event_wake_pollthread(poll_ev
);
142 return tevent_common_fd_destructor(fde
);
145 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
146 struct tevent_context
*ev
,
147 tevent_immediate_handler_t handler
,
149 const char *handler_name
,
150 const char *location
)
152 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
153 ev
->additional_data
, struct poll_event_context
);
155 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
156 handler_name
, location
);
157 poll_event_wake_pollthread(poll_ev
);
161 Private function called by "standard" backend fallback.
162 Note this only allows fallback to "poll" backend, not "poll-mt".
164 _PRIVATE_
bool tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
165 struct tevent_fd
*fde
)
167 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
168 ev
->additional_data
, struct poll_event_context
);
169 uint64_t fde_idx
= UINT64_MAX
;
172 fde
->additional_flags
= UINT64_MAX
;
173 talloc_set_destructor(fde
, poll_event_fd_destructor
);
175 if (fde
->flags
== 0) {
177 * Nothing more to do...
183 * We need to add it to the end of the 'fdes' array.
185 num_fdes
= poll_ev
->num_fdes
+ 1;
186 if (num_fdes
> talloc_array_length(poll_ev
->fdes
)) {
187 struct tevent_fd
**tmp_fdes
= NULL
;
190 array_length
= (num_fdes
+ 15) & ~15; /* round up to 16 */
192 tmp_fdes
= talloc_realloc(poll_ev
,
196 if (tmp_fdes
== NULL
) {
199 poll_ev
->fdes
= tmp_fdes
;
202 fde_idx
= poll_ev
->num_fdes
;
203 fde
->additional_flags
= fde_idx
;
204 poll_ev
->fdes
[fde_idx
] = fde
;
212 return NULL on failure (memory allocation error)
214 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
216 int fd
, uint16_t flags
,
217 tevent_fd_handler_t handler
,
219 const char *handler_name
,
220 const char *location
)
222 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
223 ev
->additional_data
, struct poll_event_context
);
224 struct tevent_fd
*fde
;
231 fde
= tevent_common_add_fd(ev
,
243 ok
= tevent_poll_event_add_fd_internal(ev
, fde
);
248 poll_event_wake_pollthread(poll_ev
);
251 * poll_event_loop_poll will take care of the rest in
252 * poll_event_setup_fresh
258 set the fd event flags
260 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
262 struct tevent_context
*ev
= fde
->event_ctx
;
263 struct poll_event_context
*poll_ev
;
264 uint64_t idx
= fde
->additional_flags
;
271 if (fde
->flags
== flags
) {
275 poll_ev
= talloc_get_type_abort(
276 ev
->additional_data
, struct poll_event_context
);
280 if (idx
== UINT64_MAX
) {
282 * We move it between the fresh and disabled lists.
284 tevent_poll_event_add_fd_internal(ev
, fde
);
285 poll_event_wake_pollthread(poll_ev
);
289 if (fde
->flags
== 0) {
291 * We need to remove it from the array
292 * and move it to the disabled list.
294 poll_ev
->fdes
[idx
] = NULL
;
295 poll_ev
->deleted
= true;
296 fde
->additional_flags
= UINT64_MAX
;
297 poll_event_wake_pollthread(poll_ev
);
301 if (idx
>= poll_ev
->num_fds
) {
303 * Not yet added to the
304 * poll_ev->fds array.
306 poll_event_wake_pollthread(poll_ev
);
312 if (flags
& TEVENT_FD_READ
) {
313 pollflags
|= (POLLIN
|POLLHUP
);
315 if (flags
& TEVENT_FD_WRITE
) {
316 pollflags
|= (POLLOUT
);
318 poll_ev
->fds
[idx
].events
= pollflags
;
320 poll_event_wake_pollthread(poll_ev
);
323 static bool poll_event_sync_arrays(struct tevent_context
*ev
,
324 struct poll_event_context
*poll_ev
)
329 if (poll_ev
->deleted
) {
331 for (i
=0; i
< poll_ev
->num_fds
;) {
332 struct tevent_fd
*fde
= poll_ev
->fdes
[i
];
341 * This fde was talloc_free()'ed. Delete it
344 poll_ev
->num_fds
-= 1;
345 ci
= poll_ev
->num_fds
;
347 poll_ev
->fds
[i
] = poll_ev
->fds
[ci
];
348 poll_ev
->fdes
[i
] = poll_ev
->fdes
[ci
];
349 if (poll_ev
->fdes
[i
] != NULL
) {
350 poll_ev
->fdes
[i
]->additional_flags
= i
;
353 poll_ev
->fds
[ci
] = (struct pollfd
) { .fd
= -1 };
354 poll_ev
->fdes
[ci
] = NULL
;
356 poll_ev
->deleted
= false;
359 if (poll_ev
->num_fds
== poll_ev
->num_fdes
) {
364 * Recheck the size of both arrays and make sure
365 * poll_fd->fds array has at least the size of the
366 * in use poll_ev->fdes array.
368 if (poll_ev
->num_fdes
> talloc_array_length(poll_ev
->fds
)) {
369 struct pollfd
*tmp_fds
= NULL
;
372 * Make sure both allocated the same length.
374 array_length
= talloc_array_length(poll_ev
->fdes
);
376 tmp_fds
= talloc_realloc(poll_ev
,
380 if (tmp_fds
== NULL
) {
383 poll_ev
->fds
= tmp_fds
;
387 * Now setup the new elements.
389 for (i
= poll_ev
->num_fds
; i
< poll_ev
->num_fdes
; i
++) {
390 struct tevent_fd
*fde
= poll_ev
->fdes
[i
];
391 struct pollfd
*pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
397 if (i
> poll_ev
->num_fds
) {
398 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
399 fde
->additional_flags
= poll_ev
->num_fds
;
400 poll_ev
->fdes
[i
] = NULL
;
407 if (fde
->flags
& TEVENT_FD_READ
) {
408 pfd
->events
|= (POLLIN
|POLLHUP
);
410 if (fde
->flags
& TEVENT_FD_WRITE
) {
411 pfd
->events
|= (POLLOUT
);
414 poll_ev
->num_fds
+= 1;
416 /* Both are in sync again */
417 poll_ev
->num_fdes
= poll_ev
->num_fds
;
420 * Check if we should shrink the arrays
421 * But keep at least 16 elements.
424 array_length
= (poll_ev
->num_fds
+ 15) & ~15; /* round up to 16 */
425 array_length
= MAX(array_length
, 16);
426 if (array_length
< talloc_array_length(poll_ev
->fdes
)) {
427 struct tevent_fd
**tmp_fdes
= NULL
;
428 struct pollfd
*tmp_fds
= NULL
;
430 tmp_fdes
= talloc_realloc(poll_ev
,
434 if (tmp_fdes
== NULL
) {
437 poll_ev
->fdes
= tmp_fdes
;
439 tmp_fds
= talloc_realloc(poll_ev
,
443 if (tmp_fds
== NULL
) {
446 poll_ev
->fds
= tmp_fds
;
453 event loop handling using poll()
455 static int poll_event_loop_poll(struct tevent_context
*ev
,
456 struct timeval
*tvalp
)
458 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
459 ev
->additional_data
, struct poll_event_context
);
463 struct tevent_fd
*fde
= NULL
;
464 struct tevent_fd
*next
= NULL
;
468 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
473 timeout
= tvalp
->tv_sec
* 1000;
474 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
477 ok
= poll_event_sync_arrays(ev
, poll_ev
);
482 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
483 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
485 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
487 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
488 tevent_common_check_signal(ev
);
492 if (pollrtn
== 0 && tvalp
) {
493 /* we don't care about a possible delay here */
494 tevent_common_loop_timer_delay(ev
);
505 /* at least one file descriptor is ready - check
506 which ones and call the handler, being careful to allow
507 the handler to remove itself when called */
509 for (fde
= ev
->fd_events
; fde
; fde
= next
) {
510 uint64_t idx
= fde
->additional_flags
;
516 if (idx
== UINT64_MAX
) {
520 pfd
= &poll_ev
->fds
[idx
];
522 if (pfd
->revents
& POLLNVAL
) {
524 * the socket is dead! this should never
525 * happen as the socket should have first been
526 * made readable and that should have removed
527 * the event, so this must be a bug.
529 * We ignore it here to match the epoll
532 tevent_debug(ev
, TEVENT_DEBUG_ERROR
,
533 "POLLNVAL on fde[%p] fd[%d] - disabling\n",
535 poll_ev
->fdes
[idx
] = NULL
;
536 poll_ev
->deleted
= true;
537 DLIST_REMOVE(ev
->fd_events
, fde
);
539 fde
->event_ctx
= NULL
;
543 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
544 /* If we only wait for TEVENT_FD_WRITE, we
545 should not tell the event handler about it,
546 and remove the writable flag, as we only
547 report errors when waiting for read events
548 to match the select behavior. */
549 if (!(fde
->flags
& TEVENT_FD_READ
)) {
550 TEVENT_FD_NOT_WRITEABLE(fde
);
553 flags
|= TEVENT_FD_READ
;
555 if (pfd
->revents
& POLLIN
) {
556 flags
|= TEVENT_FD_READ
;
558 if (pfd
->revents
& POLLOUT
) {
559 flags
|= TEVENT_FD_WRITE
;
562 * Note that fde->flags could be changed when using
563 * the poll_mt backend together with threads,
564 * that why we need to check pfd->revents and fde->flags
568 DLIST_DEMOTE(ev
->fd_events
, fde
);
569 return tevent_common_invoke_fd_handler(fde
, flags
, NULL
);
573 for (i
= 0; i
< poll_ev
->num_fds
; i
++) {
574 if (poll_ev
->fds
[i
].revents
& POLLNVAL
) {
576 * the socket is dead! this should never
577 * happen as the socket should have first been
578 * made readable and that should have removed
579 * the event, so this must be a bug or
580 * a race in the poll_mt usage.
582 fde
= poll_ev
->fdes
[i
];
583 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
584 "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n",
585 poll_ev
->fds
[i
].fd
, fde
);
586 poll_ev
->fdes
[i
] = NULL
;
587 poll_ev
->deleted
= true;
589 DLIST_REMOVE(ev
->fd_events
, fde
);
591 fde
->event_ctx
= NULL
;
600 do a single event loop using the events defined in ev
602 static int poll_event_loop_once(struct tevent_context
*ev
,
603 const char *location
)
607 if (ev
->signal_events
&&
608 tevent_common_check_signal(ev
)) {
612 if (ev
->threaded_contexts
!= NULL
) {
613 tevent_common_threaded_activate_immediate(ev
);
616 if (ev
->immediate_events
&&
617 tevent_common_loop_immediate(ev
)) {
621 tval
= tevent_common_loop_timer_delay(ev
);
622 if (tevent_timeval_is_zero(&tval
)) {
626 return poll_event_loop_poll(ev
, &tval
);
629 static const struct tevent_ops poll_event_ops
= {
630 .context_init
= poll_event_context_init
,
631 .add_fd
= poll_event_add_fd
,
632 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
633 .get_fd_flags
= tevent_common_fd_get_flags
,
634 .set_fd_flags
= poll_event_set_fd_flags
,
635 .add_timer
= tevent_common_add_timer_v2
,
636 .schedule_immediate
= tevent_common_schedule_immediate
,
637 .add_signal
= tevent_common_add_signal
,
638 .loop_once
= poll_event_loop_once
,
639 .loop_wait
= tevent_common_loop_wait
,
642 _PRIVATE_
bool tevent_poll_init(void)
644 return tevent_register_backend("poll", &poll_event_ops
);
647 static const struct tevent_ops poll_event_mt_ops
= {
648 .context_init
= poll_event_context_init_mt
,
649 .add_fd
= poll_event_add_fd
,
650 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
651 .get_fd_flags
= tevent_common_fd_get_flags
,
652 .set_fd_flags
= poll_event_set_fd_flags
,
653 .add_timer
= tevent_common_add_timer_v2
,
654 .schedule_immediate
= poll_event_schedule_immediate
,
655 .add_signal
= tevent_common_add_signal
,
656 .loop_once
= poll_event_loop_once
,
657 .loop_wait
= tevent_common_loop_wait
,
660 _PRIVATE_
bool tevent_poll_mt_init(void)
662 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);