2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
43 * These two arrays are maintained together.
46 struct tevent_fd
**fdes
;
50 * Signal fd to wake the poll() thread
54 /* information for exiting from the event loop */
58 static int poll_event_mt_destructor(struct poll_event_context
*poll_ev
)
60 if (poll_ev
->signal_fd
!= -1) {
61 close(poll_ev
->signal_fd
);
62 poll_ev
->signal_fd
= -1;
64 if (poll_ev
->num_fds
== 0) {
67 if (poll_ev
->fds
[0].fd
!= -1) {
68 close(poll_ev
->fds
[0].fd
);
69 poll_ev
->fds
[0].fd
= -1;
75 create a poll_event_context structure.
77 static int poll_event_context_init(struct tevent_context
*ev
)
79 struct poll_event_context
*poll_ev
;
81 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
82 if (poll_ev
== NULL
) {
86 poll_ev
->signal_fd
= -1;
87 ev
->additional_data
= poll_ev
;
91 static bool set_nonblock(int fd
)
95 val
= fcntl(fd
, F_GETFL
, 0);
101 return (fcntl(fd
, F_SETFL
, val
) != -1);
104 static int poll_event_context_init_mt(struct tevent_context
*ev
)
106 struct poll_event_context
*poll_ev
;
111 ret
= poll_event_context_init(ev
);
116 poll_ev
= talloc_get_type_abort(
117 ev
->additional_data
, struct poll_event_context
);
119 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
120 if (poll_ev
->fds
== NULL
) {
129 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
135 poll_ev
->signal_fd
= fds
[1];
137 pfd
= &poll_ev
->fds
[0];
139 pfd
->events
= (POLLIN
|POLLHUP
);
141 poll_ev
->num_fds
= 1;
143 talloc_set_destructor(poll_ev
, poll_event_mt_destructor
);
148 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
153 if (poll_ev
->signal_fd
== -1) {
158 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
159 } while ((ret
== -1) && (errno
== EINTR
));
162 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
168 if (poll_ev
->signal_fd
== -1) {
172 if (poll_ev
->num_fds
< 1) {
175 fd
= poll_ev
->fds
[0].fd
;
178 ret
= read(fd
, buf
, sizeof(buf
));
179 } while (ret
== sizeof(buf
));
185 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
187 struct tevent_context
*ev
= fde
->event_ctx
;
188 struct poll_event_context
*poll_ev
;
189 uint64_t del_idx
= fde
->additional_flags
;
195 poll_ev
= talloc_get_type_abort(
196 ev
->additional_data
, struct poll_event_context
);
198 poll_ev
->fdes
[del_idx
] = NULL
;
199 poll_event_wake_pollthread(poll_ev
);
201 return tevent_common_fd_destructor(fde
);
204 static int poll_fresh_fde_destructor(struct tevent_fd
*fde
)
206 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
207 fde
->event_ctx
->additional_data
, struct poll_event_context
);
208 DLIST_REMOVE(poll_ev
->fresh
, fde
);
212 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
213 struct tevent_context
*ev
,
214 tevent_immediate_handler_t handler
,
216 const char *handler_name
,
217 const char *location
)
219 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
220 ev
->additional_data
, struct poll_event_context
);
222 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
223 handler_name
, location
);
224 poll_event_wake_pollthread(poll_ev
);
229 return NULL on failure (memory allocation error)
231 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
233 int fd
, uint16_t flags
,
234 tevent_fd_handler_t handler
,
236 const char *handler_name
,
237 const char *location
)
239 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
240 ev
->additional_data
, struct poll_event_context
);
241 struct tevent_fd
*fde
;
247 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
254 fde
->handler
= handler
;
255 fde
->close_fn
= NULL
;
256 fde
->private_data
= private_data
;
257 fde
->handler_name
= handler_name
;
258 fde
->location
= location
;
259 fde
->additional_flags
= UINT64_MAX
;
260 fde
->additional_data
= NULL
;
262 DLIST_ADD(poll_ev
->fresh
, fde
);
263 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
264 poll_event_wake_pollthread(poll_ev
);
267 * poll_event_loop_poll will take care of the rest in
268 * poll_event_setup_fresh
274 set the fd event flags
276 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
278 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
279 fde
->event_ctx
->additional_data
, struct poll_event_context
);
280 uint64_t idx
= fde
->additional_flags
;
285 if (idx
== UINT64_MAX
) {
287 * poll_event_setup_fresh not yet called after this fde was
288 * added. We don't have to do anything to transfer the changed
289 * flags to the array passed to poll(2)
296 if (flags
& TEVENT_FD_READ
) {
297 pollflags
|= (POLLIN
|POLLHUP
);
299 if (flags
& TEVENT_FD_WRITE
) {
300 pollflags
|= (POLLOUT
);
302 poll_ev
->fds
[idx
].events
= pollflags
;
304 poll_event_wake_pollthread(poll_ev
);
307 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
308 struct poll_event_context
*poll_ev
)
310 struct tevent_fd
*fde
, *next
;
311 unsigned num_fresh
, num_fds
;
313 if (poll_ev
->fresh
== NULL
) {
318 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
321 num_fds
= poll_ev
->num_fds
+ num_fresh
;
324 * We check the length of fdes here. It is the last one
325 * enlarged, so if the realloc for poll_fd->fdes fails,
326 * poll_fd->fds will have at least the size of poll_fd->fdes
329 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
330 struct pollfd
*tmp_fds
;
331 struct tevent_fd
**tmp_fdes
;
332 unsigned array_length
;
334 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
336 tmp_fds
= talloc_realloc(
337 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
338 if (tmp_fds
== NULL
) {
341 poll_ev
->fds
= tmp_fds
;
343 tmp_fdes
= talloc_realloc(
344 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
346 if (tmp_fdes
== NULL
) {
349 poll_ev
->fdes
= tmp_fdes
;
352 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
355 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
361 if (fde
->flags
& TEVENT_FD_READ
) {
362 pfd
->events
|= (POLLIN
|POLLHUP
);
364 if (fde
->flags
& TEVENT_FD_WRITE
) {
365 pfd
->events
|= (POLLOUT
);
368 fde
->additional_flags
= poll_ev
->num_fds
;
369 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
372 DLIST_REMOVE(poll_ev
->fresh
, fde
);
373 DLIST_ADD(ev
->fd_events
, fde
);
375 talloc_set_destructor(fde
, poll_event_fd_destructor
);
377 poll_ev
->num_fds
+= 1;
383 event loop handling using poll()
385 static int poll_event_loop_poll(struct tevent_context
*ev
,
386 struct timeval
*tvalp
)
388 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
389 ev
->additional_data
, struct poll_event_context
);
395 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
400 timeout
= tvalp
->tv_sec
* 1000;
401 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
404 poll_event_drain_signal_fd(poll_ev
);
406 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
410 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
411 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
412 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
414 if (pollrtn
== -1 && errno
== EINTR
&& ev
->signal_events
) {
415 tevent_common_check_signal(ev
);
419 if (pollrtn
== 0 && tvalp
) {
420 /* we don't care about a possible delay here */
421 tevent_common_loop_timer_delay(ev
);
432 first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
434 /* at least one file descriptor is ready - check
435 which ones and call the handler, being careful to allow
436 the handler to remove itself when called */
438 for (i
=first_fd
; i
<poll_ev
->num_fds
; i
++) {
440 struct tevent_fd
*fde
;
443 fde
= poll_ev
->fdes
[i
];
446 * This fde was talloc_free()'ed. Delete it
449 poll_ev
->num_fds
-= 1;
450 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
451 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
452 if (poll_ev
->fdes
[i
] != NULL
) {
453 poll_ev
->fdes
[i
]->additional_flags
= i
;
458 pfd
= &poll_ev
->fds
[i
];
460 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
461 /* If we only wait for TEVENT_FD_WRITE, we
462 should not tell the event handler about it,
463 and remove the writable flag, as we only
464 report errors when waiting for read events
465 to match the select behavior. */
466 if (!(fde
->flags
& TEVENT_FD_READ
)) {
467 TEVENT_FD_NOT_WRITEABLE(fde
);
470 flags
|= TEVENT_FD_READ
;
472 if (pfd
->revents
& POLLIN
) {
473 flags
|= TEVENT_FD_READ
;
475 if (pfd
->revents
& POLLOUT
) {
476 flags
|= TEVENT_FD_WRITE
;
479 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
488 do a single event loop using the events defined in ev
490 static int poll_event_loop_once(struct tevent_context
*ev
,
491 const char *location
)
495 if (ev
->signal_events
&&
496 tevent_common_check_signal(ev
)) {
500 if (ev
->immediate_events
&&
501 tevent_common_loop_immediate(ev
)) {
505 tval
= tevent_common_loop_timer_delay(ev
);
506 if (tevent_timeval_is_zero(&tval
)) {
510 return poll_event_loop_poll(ev
, &tval
);
513 static const struct tevent_ops poll_event_ops
= {
514 .context_init
= poll_event_context_init
,
515 .add_fd
= poll_event_add_fd
,
516 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
517 .get_fd_flags
= tevent_common_fd_get_flags
,
518 .set_fd_flags
= poll_event_set_fd_flags
,
519 .add_timer
= tevent_common_add_timer
,
520 .schedule_immediate
= tevent_common_schedule_immediate
,
521 .add_signal
= tevent_common_add_signal
,
522 .loop_once
= poll_event_loop_once
,
523 .loop_wait
= tevent_common_loop_wait
,
526 _PRIVATE_
bool tevent_poll_init(void)
528 return tevent_register_backend("poll", &poll_event_ops
);
531 static const struct tevent_ops poll_event_mt_ops
= {
532 .context_init
= poll_event_context_init_mt
,
533 .add_fd
= poll_event_add_fd
,
534 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
535 .get_fd_flags
= tevent_common_fd_get_flags
,
536 .set_fd_flags
= poll_event_set_fd_flags
,
537 .add_timer
= tevent_common_add_timer
,
538 .schedule_immediate
= poll_event_schedule_immediate
,
539 .add_signal
= tevent_common_add_signal
,
540 .loop_once
= poll_event_loop_once
,
541 .loop_wait
= tevent_common_loop_wait
,
544 _PRIVATE_
bool tevent_poll_mt_init(void)
546 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);