2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * A DLIST for fresh fde's added by poll_event_add_fd but not
38 * picked up yet by poll_event_loop_once
40 struct tevent_fd
*fresh
;
43 * These two arrays are maintained together.
46 struct tevent_fd
**fdes
;
50 * Signal fd to wake the poll() thread
54 /* information for exiting from the event loop */
58 static int poll_event_context_destructor(struct poll_event_context
*poll_ev
)
60 struct tevent_fd
*fd
, *fn
;
62 for (fd
= poll_ev
->fresh
; fd
; fd
= fn
) {
65 DLIST_REMOVE(poll_ev
->fresh
, fd
);
68 if (poll_ev
->signal_fd
== -1) {
70 * Non-threaded, no signal pipe
75 close(poll_ev
->signal_fd
);
76 poll_ev
->signal_fd
= -1;
78 if (poll_ev
->num_fds
== 0) {
81 if (poll_ev
->fds
[0].fd
!= -1) {
82 close(poll_ev
->fds
[0].fd
);
83 poll_ev
->fds
[0].fd
= -1;
89 create a poll_event_context structure.
91 static int poll_event_context_init(struct tevent_context
*ev
)
93 struct poll_event_context
*poll_ev
;
95 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
96 if (poll_ev
== NULL
) {
100 poll_ev
->signal_fd
= -1;
101 ev
->additional_data
= poll_ev
;
102 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
106 static bool set_nonblock(int fd
)
110 val
= fcntl(fd
, F_GETFL
, 0);
116 return (fcntl(fd
, F_SETFL
, val
) != -1);
119 static int poll_event_context_init_mt(struct tevent_context
*ev
)
121 struct poll_event_context
*poll_ev
;
126 ret
= poll_event_context_init(ev
);
131 poll_ev
= talloc_get_type_abort(
132 ev
->additional_data
, struct poll_event_context
);
134 poll_ev
->fds
= talloc_zero(poll_ev
, struct pollfd
);
135 if (poll_ev
->fds
== NULL
) {
144 if (!set_nonblock(fds
[0]) || !set_nonblock(fds
[1])) {
150 poll_ev
->signal_fd
= fds
[1];
152 pfd
= &poll_ev
->fds
[0];
154 pfd
->events
= (POLLIN
|POLLHUP
);
156 poll_ev
->num_fds
= 1;
158 talloc_set_destructor(poll_ev
, poll_event_context_destructor
);
163 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
168 if (poll_ev
->signal_fd
== -1) {
173 ret
= write(poll_ev
->signal_fd
, &c
, sizeof(c
));
174 } while ((ret
== -1) && (errno
== EINTR
));
177 static void poll_event_drain_signal_fd(struct poll_event_context
*poll_ev
)
183 if (poll_ev
->signal_fd
== -1) {
187 if (poll_ev
->num_fds
< 1) {
190 fd
= poll_ev
->fds
[0].fd
;
193 ret
= read(fd
, buf
, sizeof(buf
));
194 } while (ret
== sizeof(buf
));
200 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
202 struct tevent_context
*ev
= fde
->event_ctx
;
203 struct poll_event_context
*poll_ev
;
204 uint64_t del_idx
= fde
->additional_flags
;
210 poll_ev
= talloc_get_type_abort(
211 ev
->additional_data
, struct poll_event_context
);
213 poll_ev
->fdes
[del_idx
] = NULL
;
214 poll_event_wake_pollthread(poll_ev
);
216 return tevent_common_fd_destructor(fde
);
219 static int poll_fresh_fde_destructor(struct tevent_fd
*fde
)
221 struct tevent_context
*ev
= fde
->event_ctx
;
222 struct poll_event_context
*poll_ev
;
227 poll_ev
= talloc_get_type_abort(
228 ev
->additional_data
, struct poll_event_context
);
230 DLIST_REMOVE(poll_ev
->fresh
, fde
);
234 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
235 struct tevent_context
*ev
,
236 tevent_immediate_handler_t handler
,
238 const char *handler_name
,
239 const char *location
)
241 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
242 ev
->additional_data
, struct poll_event_context
);
244 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
245 handler_name
, location
);
246 poll_event_wake_pollthread(poll_ev
);
251 return NULL on failure (memory allocation error)
253 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
255 int fd
, uint16_t flags
,
256 tevent_fd_handler_t handler
,
258 const char *handler_name
,
259 const char *location
)
261 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
262 ev
->additional_data
, struct poll_event_context
);
263 struct tevent_fd
*fde
;
269 fde
= talloc(mem_ctx
? mem_ctx
: ev
, struct tevent_fd
);
276 fde
->handler
= handler
;
277 fde
->close_fn
= NULL
;
278 fde
->private_data
= private_data
;
279 fde
->handler_name
= handler_name
;
280 fde
->location
= location
;
281 fde
->additional_flags
= UINT64_MAX
;
282 fde
->additional_data
= NULL
;
284 DLIST_ADD(poll_ev
->fresh
, fde
);
285 talloc_set_destructor(fde
, poll_fresh_fde_destructor
);
286 poll_event_wake_pollthread(poll_ev
);
289 * poll_event_loop_poll will take care of the rest in
290 * poll_event_setup_fresh
296 set the fd event flags
298 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
300 struct tevent_context
*ev
= fde
->event_ctx
;
301 struct poll_event_context
*poll_ev
;
302 uint64_t idx
= fde
->additional_flags
;
308 poll_ev
= talloc_get_type_abort(
309 ev
->additional_data
, struct poll_event_context
);
313 if (idx
== UINT64_MAX
) {
315 * poll_event_setup_fresh not yet called after this fde was
316 * added. We don't have to do anything to transfer the changed
317 * flags to the array passed to poll(2)
324 if (flags
& TEVENT_FD_READ
) {
325 pollflags
|= (POLLIN
|POLLHUP
);
327 if (flags
& TEVENT_FD_WRITE
) {
328 pollflags
|= (POLLOUT
);
330 poll_ev
->fds
[idx
].events
= pollflags
;
332 poll_event_wake_pollthread(poll_ev
);
335 static bool poll_event_setup_fresh(struct tevent_context
*ev
,
336 struct poll_event_context
*poll_ev
)
338 struct tevent_fd
*fde
, *next
;
339 unsigned num_fresh
, num_fds
;
341 if (poll_ev
->fresh
== NULL
) {
346 for (fde
= poll_ev
->fresh
; fde
; fde
= fde
->next
) {
349 num_fds
= poll_ev
->num_fds
+ num_fresh
;
352 * We check the length of fdes here. It is the last one
353 * enlarged, so if the realloc for poll_fd->fdes fails,
354 * poll_fd->fds will have at least the size of poll_fd->fdes
357 if (num_fds
>= talloc_array_length(poll_ev
->fdes
)) {
358 struct pollfd
*tmp_fds
;
359 struct tevent_fd
**tmp_fdes
;
360 unsigned array_length
;
362 array_length
= (num_fds
+ 15) & ~15; /* round up to 16 */
364 tmp_fds
= talloc_realloc(
365 poll_ev
, poll_ev
->fds
, struct pollfd
, array_length
);
366 if (tmp_fds
== NULL
) {
369 poll_ev
->fds
= tmp_fds
;
371 tmp_fdes
= talloc_realloc(
372 poll_ev
, poll_ev
->fdes
, struct tevent_fd
*,
374 if (tmp_fdes
== NULL
) {
377 poll_ev
->fdes
= tmp_fdes
;
380 for (fde
= poll_ev
->fresh
; fde
; fde
= next
) {
383 pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
389 if (fde
->flags
& TEVENT_FD_READ
) {
390 pfd
->events
|= (POLLIN
|POLLHUP
);
392 if (fde
->flags
& TEVENT_FD_WRITE
) {
393 pfd
->events
|= (POLLOUT
);
396 fde
->additional_flags
= poll_ev
->num_fds
;
397 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
400 DLIST_REMOVE(poll_ev
->fresh
, fde
);
401 DLIST_ADD(ev
->fd_events
, fde
);
403 talloc_set_destructor(fde
, poll_event_fd_destructor
);
405 poll_ev
->num_fds
+= 1;
411 event loop handling using poll()
413 static int poll_event_loop_poll(struct tevent_context
*ev
,
414 struct timeval
*tvalp
)
416 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
417 ev
->additional_data
, struct poll_event_context
);
423 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
428 timeout
= tvalp
->tv_sec
* 1000;
429 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
432 poll_event_drain_signal_fd(poll_ev
);
434 if (!poll_event_setup_fresh(ev
, poll_ev
)) {
438 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
439 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
440 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
442 if (pollrtn
== -1 && errno
== EINTR
&& ev
->signal_events
) {
443 tevent_common_check_signal(ev
);
447 if (pollrtn
== 0 && tvalp
) {
448 /* we don't care about a possible delay here */
449 tevent_common_loop_timer_delay(ev
);
460 first_fd
= (poll_ev
->signal_fd
!= -1) ? 1 : 0;
462 /* at least one file descriptor is ready - check
463 which ones and call the handler, being careful to allow
464 the handler to remove itself when called */
466 for (i
=first_fd
; i
<poll_ev
->num_fds
; i
++) {
468 struct tevent_fd
*fde
;
471 fde
= poll_ev
->fdes
[i
];
474 * This fde was talloc_free()'ed. Delete it
477 poll_ev
->num_fds
-= 1;
478 poll_ev
->fds
[i
] = poll_ev
->fds
[poll_ev
->num_fds
];
479 poll_ev
->fdes
[i
] = poll_ev
->fdes
[poll_ev
->num_fds
];
480 if (poll_ev
->fdes
[i
] != NULL
) {
481 poll_ev
->fdes
[i
]->additional_flags
= i
;
486 pfd
= &poll_ev
->fds
[i
];
488 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
489 /* If we only wait for TEVENT_FD_WRITE, we
490 should not tell the event handler about it,
491 and remove the writable flag, as we only
492 report errors when waiting for read events
493 to match the select behavior. */
494 if (!(fde
->flags
& TEVENT_FD_READ
)) {
495 TEVENT_FD_NOT_WRITEABLE(fde
);
498 flags
|= TEVENT_FD_READ
;
500 if (pfd
->revents
& POLLIN
) {
501 flags
|= TEVENT_FD_READ
;
503 if (pfd
->revents
& POLLOUT
) {
504 flags
|= TEVENT_FD_WRITE
;
507 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
516 do a single event loop using the events defined in ev
518 static int poll_event_loop_once(struct tevent_context
*ev
,
519 const char *location
)
523 if (ev
->signal_events
&&
524 tevent_common_check_signal(ev
)) {
528 if (ev
->immediate_events
&&
529 tevent_common_loop_immediate(ev
)) {
533 tval
= tevent_common_loop_timer_delay(ev
);
534 if (tevent_timeval_is_zero(&tval
)) {
538 return poll_event_loop_poll(ev
, &tval
);
541 static const struct tevent_ops poll_event_ops
= {
542 .context_init
= poll_event_context_init
,
543 .add_fd
= poll_event_add_fd
,
544 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
545 .get_fd_flags
= tevent_common_fd_get_flags
,
546 .set_fd_flags
= poll_event_set_fd_flags
,
547 .add_timer
= tevent_common_add_timer
,
548 .schedule_immediate
= tevent_common_schedule_immediate
,
549 .add_signal
= tevent_common_add_signal
,
550 .loop_once
= poll_event_loop_once
,
551 .loop_wait
= tevent_common_loop_wait
,
554 _PRIVATE_
bool tevent_poll_init(void)
556 return tevent_register_backend("poll", &poll_event_ops
);
559 static const struct tevent_ops poll_event_mt_ops
= {
560 .context_init
= poll_event_context_init_mt
,
561 .add_fd
= poll_event_add_fd
,
562 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
563 .get_fd_flags
= tevent_common_fd_get_flags
,
564 .set_fd_flags
= poll_event_set_fd_flags
,
565 .add_timer
= tevent_common_add_timer
,
566 .schedule_immediate
= poll_event_schedule_immediate
,
567 .add_signal
= tevent_common_add_signal
,
568 .loop_once
= poll_event_loop_once
,
569 .loop_wait
= tevent_common_loop_wait
,
572 _PRIVATE_
bool tevent_poll_mt_init(void)
574 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);