2 Unix SMB/CIFS implementation.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005-2007
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private
{
28 * Index from file descriptor into the pollfd array
33 * Cache for s3_event_loop_once to avoid reallocs
38 static struct tevent_poll_private
*tevent_get_poll_private(
39 struct tevent_context
*ev
)
41 struct tevent_poll_private
*state
;
43 state
= (struct tevent_poll_private
*)ev
->additional_data
;
45 state
= talloc_zero(ev
, struct tevent_poll_private
);
46 ev
->additional_data
= (void *)state
;
48 DEBUG(10, ("talloc failed\n"));
54 static void count_fds(struct tevent_context
*ev
,
55 int *pnum_fds
, int *pmax_fd
)
57 struct tevent_fd
*fde
;
61 for (fde
= ev
->fd_events
; fde
!= NULL
; fde
= fde
->next
) {
62 if (fde
->flags
& (TEVENT_FD_READ
|TEVENT_FD_WRITE
)) {
64 if (fde
->fd
> max_fd
) {
73 bool event_add_to_poll_args(struct tevent_context
*ev
, TALLOC_CTX
*mem_ctx
,
74 struct pollfd
**pfds
, int *pnum_pfds
,
77 struct tevent_poll_private
*state
;
78 struct tevent_fd
*fde
;
79 int i
, num_fds
, max_fd
, num_pollfds
, idx_len
;
81 struct timeval now
, diff
;
84 state
= tevent_get_poll_private(ev
);
88 count_fds(ev
, &num_fds
, &max_fd
);
92 if (talloc_array_length(state
->pollfd_idx
) < idx_len
) {
93 state
->pollfd_idx
= talloc_realloc(
94 state
, state
->pollfd_idx
, int, idx_len
);
95 if (state
->pollfd_idx
== NULL
) {
96 DEBUG(10, ("talloc_realloc failed\n"));
102 num_pollfds
= *pnum_pfds
;
104 if (talloc_array_length(fds
) < num_pollfds
+ num_fds
) {
105 fds
= talloc_realloc(mem_ctx
, fds
, struct pollfd
,
106 num_pollfds
+ num_fds
);
108 DEBUG(10, ("talloc_realloc failed\n"));
113 memset(&fds
[num_pollfds
], 0, sizeof(struct pollfd
) * num_fds
);
116 * This needs tuning. We need to cope with multiple fde's for a file
117 * descriptor. The problem is that we need to re-use pollfd_idx across
118 * calls for efficiency. One way would be a direct bitmask that might
119 * be initialized quicker, but our bitmap_init implementation is
120 * pretty heavy-weight as well.
122 for (i
=0; i
<idx_len
; i
++) {
123 state
->pollfd_idx
[i
] = -1;
126 for (fde
= ev
->fd_events
; fde
; fde
= fde
->next
) {
129 if ((fde
->flags
& (TEVENT_FD_READ
|TEVENT_FD_WRITE
)) == 0) {
133 if (state
->pollfd_idx
[fde
->fd
] == -1) {
135 * We haven't seen this fd yet. Allocate a new pollfd.
137 state
->pollfd_idx
[fde
->fd
] = num_pollfds
;
138 pfd
= &fds
[num_pollfds
];
142 * We have already seen this fd. OR in the flags.
144 pfd
= &fds
[state
->pollfd_idx
[fde
->fd
]];
149 if (fde
->flags
& TEVENT_FD_READ
) {
150 pfd
->events
|= (POLLIN
|POLLHUP
);
152 if (fde
->flags
& TEVENT_FD_WRITE
) {
153 pfd
->events
|= POLLOUT
;
157 *pnum_pfds
= num_pollfds
;
159 if (ev
->immediate_events
!= NULL
) {
163 if (ev
->timer_events
== NULL
) {
164 *ptimeout
= MIN(*ptimeout
, INT_MAX
);
168 now
= timeval_current();
169 diff
= timeval_until(&now
, &ev
->timer_events
->next_event
);
170 timeout
= timeval_to_msec(diff
);
172 if (timeout
< *ptimeout
) {
179 bool run_events_poll(struct tevent_context
*ev
, int pollrtn
,
180 struct pollfd
*pfds
, int num_pfds
)
182 struct tevent_poll_private
*state
;
184 struct tevent_fd
*fde
;
186 if (ev
->signal_events
&&
187 tevent_common_check_signal(ev
)) {
191 if (ev
->threaded_contexts
!= NULL
) {
192 tevent_common_threaded_activate_immediate(ev
);
195 if (ev
->immediate_events
&&
196 tevent_common_loop_immediate(ev
)) {
203 tval
= tevent_common_loop_timer_delay(ev
);
204 if (tevent_timeval_is_zero(&tval
)) {
214 state
= (struct tevent_poll_private
*)ev
->additional_data
;
215 pollfd_idx
= state
->pollfd_idx
;
217 for (fde
= ev
->fd_events
; fde
; fde
= fde
->next
) {
221 if ((fde
->flags
& (TEVENT_FD_READ
|TEVENT_FD_WRITE
)) == 0) {
225 if (pollfd_idx
[fde
->fd
] >= num_pfds
) {
226 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
227 ">= num_pfds (%d)\n", pollfd_idx
[fde
->fd
],
231 pfd
= &pfds
[pollfd_idx
[fde
->fd
]];
233 if (pfd
->fd
!= fde
->fd
) {
234 DEBUG(1, ("internal error: pfd->fd (%d) "
235 "!= fde->fd (%d)\n", pollfd_idx
[fde
->fd
],
240 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
241 /* If we only wait for TEVENT_FD_WRITE, we
242 should not tell the event handler about it,
243 and remove the writable flag, as we only
244 report errors when waiting for read events
245 to match the select behavior. */
246 if (!(fde
->flags
& TEVENT_FD_READ
)) {
247 TEVENT_FD_NOT_WRITEABLE(fde
);
250 flags
|= TEVENT_FD_READ
;
253 if (pfd
->revents
& POLLIN
) {
254 flags
|= TEVENT_FD_READ
;
256 if (pfd
->revents
& POLLOUT
) {
257 flags
|= TEVENT_FD_WRITE
;
259 if (flags
& fde
->flags
) {
260 DLIST_DEMOTE(ev
->fd_events
, fde
);
261 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
269 struct timeval
*get_timed_events_timeout(struct tevent_context
*ev
,
270 struct timeval
*to_ret
)
274 if ((ev
->timer_events
== NULL
) && (ev
->immediate_events
== NULL
)) {
277 if (ev
->immediate_events
!= NULL
) {
278 *to_ret
= timeval_zero();
282 now
= timeval_current();
283 *to_ret
= timeval_until(&now
, &ev
->timer_events
->next_event
);
285 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret
->tv_sec
,
286 (int)to_ret
->tv_usec
));
291 static int s3_event_loop_once(struct tevent_context
*ev
, const char *location
)
293 struct tevent_poll_private
*state
;
301 state
= tevent_get_poll_private(ev
);
307 if (run_events_poll(ev
, 0, NULL
, 0)) {
312 if (!event_add_to_poll_args(ev
, state
,
313 &state
->pfds
, &num_pfds
, &timeout
)) {
317 tevent_trace_point_callback(ev
, TEVENT_TRACE_BEFORE_WAIT
);
318 ret
= poll(state
->pfds
, num_pfds
, timeout
);
320 tevent_trace_point_callback(ev
, TEVENT_TRACE_AFTER_WAIT
);
323 if (ret
== -1 && errno
!= EINTR
) {
324 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
325 "poll() failed: %d:%s\n",
326 errno
, strerror(errno
));
330 run_events_poll(ev
, ret
, state
->pfds
, num_pfds
);
334 static int s3_event_context_init(struct tevent_context
*ev
)
339 void dump_event_list(struct tevent_context
*ev
)
341 struct tevent_timer
*te
;
342 struct tevent_fd
*fe
;
343 struct timeval evt
, now
;
349 now
= timeval_current();
351 DEBUG(10,("dump_event_list:\n"));
353 for (te
= ev
->timer_events
; te
; te
= te
->next
) {
355 evt
= timeval_until(&now
, &te
->next_event
);
357 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
361 http_timestring(talloc_tos(), te
->next_event
.tv_sec
)));
364 for (fe
= ev
->fd_events
; fe
; fe
= fe
->next
) {
366 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
373 static const struct tevent_ops s3_event_ops
= {
374 .context_init
= s3_event_context_init
,
375 .add_fd
= tevent_common_add_fd
,
376 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
377 .get_fd_flags
= tevent_common_fd_get_flags
,
378 .set_fd_flags
= tevent_common_fd_set_flags
,
379 .add_timer
= tevent_common_add_timer
,
380 .schedule_immediate
= tevent_common_schedule_immediate
,
381 .add_signal
= tevent_common_add_signal
,
382 .loop_once
= s3_event_loop_once
,
383 .loop_wait
= tevent_common_loop_wait
,
386 static bool s3_tevent_init(void)
388 static bool initialized
;
392 initialized
= tevent_register_backend("s3", &s3_event_ops
);
393 tevent_set_default_backend("s3");
397 struct tevent_context
*s3_tevent_context_init(TALLOC_CTX
*mem_ctx
)
399 struct tevent_context
*ev
;
403 ev
= tevent_context_init_byname(mem_ctx
, "s3");
405 samba_tevent_set_debug(ev
, "s3_tevent");
412 struct tevent_timer
*te
;
413 struct timeval interval
;
415 bool (*handler
)(const struct timeval
*now
, void *private_data
);
419 static void smbd_idle_event_handler(struct tevent_context
*ctx
,
420 struct tevent_timer
*te
,
424 struct idle_event
*event
=
425 talloc_get_type_abort(private_data
, struct idle_event
);
427 TALLOC_FREE(event
->te
);
429 DEBUG(10,("smbd_idle_event_handler: %s %p called\n",
430 event
->name
, event
->te
));
432 if (!event
->handler(&now
, event
->private_data
)) {
433 DEBUG(10,("smbd_idle_event_handler: %s %p stopped\n",
434 event
->name
, event
->te
));
435 /* Don't repeat, delete ourselves */
440 DEBUG(10,("smbd_idle_event_handler: %s %p rescheduled\n",
441 event
->name
, event
->te
));
443 event
->te
= tevent_add_timer(ctx
, event
,
444 timeval_sum(&now
, &event
->interval
),
445 smbd_idle_event_handler
, event
);
447 /* We can't do much but fail here. */
448 SMB_ASSERT(event
->te
!= NULL
);
451 struct idle_event
*event_add_idle(struct tevent_context
*event_ctx
,
453 struct timeval interval
,
455 bool (*handler
)(const struct timeval
*now
,
459 struct idle_event
*result
;
460 struct timeval now
= timeval_current();
462 result
= talloc(mem_ctx
, struct idle_event
);
463 if (result
== NULL
) {
464 DEBUG(0, ("talloc failed\n"));
468 result
->interval
= interval
;
469 result
->handler
= handler
;
470 result
->private_data
= private_data
;
472 if (!(result
->name
= talloc_asprintf(result
, "idle_evt(%s)", name
))) {
473 DEBUG(0, ("talloc failed\n"));
478 result
->te
= tevent_add_timer(event_ctx
, result
,
479 timeval_sum(&now
, &interval
),
480 smbd_idle_event_handler
, result
);
481 if (result
->te
== NULL
) {
482 DEBUG(0, ("event_add_timed failed\n"));
487 DEBUG(10,("event_add_idle: %s %p\n", result
->name
, result
->te
));