2 Unix SMB/CIFS implementation.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private
{
28 * Index from file descriptor into the pollfd array
33 * Cache for s3_event_loop_once to avoid reallocs
38 static struct tevent_poll_private
*tevent_get_poll_private(
39 struct tevent_context
*ev
)
41 struct tevent_poll_private
*state
;
43 state
= (struct tevent_poll_private
*)ev
->additional_data
;
45 state
= TALLOC_ZERO_P(ev
, struct tevent_poll_private
);
46 ev
->additional_data
= (void *)state
;
48 DEBUG(10, ("talloc failed\n"));
54 static void count_fds(struct tevent_context
*ev
,
55 int *pnum_fds
, int *pmax_fd
)
57 struct tevent_fd
*fde
;
61 for (fde
= ev
->fd_events
; fde
!= NULL
; fde
= fde
->next
) {
62 if (fde
->flags
& (EVENT_FD_READ
|EVENT_FD_WRITE
)) {
64 if (fde
->fd
> max_fd
) {
73 bool event_add_to_poll_args(struct tevent_context
*ev
, TALLOC_CTX
*mem_ctx
,
74 struct pollfd
**pfds
, int *pnum_pfds
,
77 struct tevent_poll_private
*state
;
78 struct tevent_fd
*fde
;
79 int i
, num_fds
, max_fd
, num_pollfds
, idx_len
;
81 struct timeval now
, diff
;
84 state
= tevent_get_poll_private(ev
);
88 count_fds(ev
, &num_fds
, &max_fd
);
92 if (talloc_array_length(state
->pollfd_idx
) < idx_len
) {
93 state
->pollfd_idx
= TALLOC_REALLOC_ARRAY(
94 state
, state
->pollfd_idx
, int, idx_len
);
95 if (state
->pollfd_idx
== NULL
) {
96 DEBUG(10, ("talloc_realloc failed\n"));
102 num_pollfds
= *pnum_pfds
;
105 * The +1 is for the sys_poll calling convention. It expects
106 * an array 1 longer for the signal pipe
109 if (talloc_array_length(fds
) < num_pollfds
+ num_fds
+ 1) {
110 fds
= TALLOC_REALLOC_ARRAY(mem_ctx
, fds
, struct pollfd
,
111 num_pollfds
+ num_fds
+ 1);
113 DEBUG(10, ("talloc_realloc failed\n"));
118 memset(&fds
[num_pollfds
], 0, sizeof(struct pollfd
) * num_fds
);
121 * This needs tuning. We need to cope with multiple fde's for a file
122 * descriptor. The problem is that we need to re-use pollfd_idx across
123 * calls for efficiency. One way would be a direct bitmask that might
124 * be initialized quicker, but our bitmap_init implementation is
125 * pretty heavy-weight as well.
127 for (i
=0; i
<idx_len
; i
++) {
128 state
->pollfd_idx
[i
] = -1;
131 for (fde
= ev
->fd_events
; fde
; fde
= fde
->next
) {
134 if ((fde
->flags
& (EVENT_FD_READ
|EVENT_FD_WRITE
)) == 0) {
138 if (state
->pollfd_idx
[fde
->fd
] == -1) {
140 * We haven't seen this fd yet. Allocate a new pollfd.
142 state
->pollfd_idx
[fde
->fd
] = num_pollfds
;
143 pfd
= &fds
[num_pollfds
];
147 * We have already seen this fd. OR in the flags.
149 pfd
= &fds
[state
->pollfd_idx
[fde
->fd
]];
154 if (fde
->flags
& EVENT_FD_READ
) {
155 pfd
->events
|= (POLLIN
|POLLHUP
);
157 if (fde
->flags
& EVENT_FD_WRITE
) {
158 pfd
->events
|= POLLOUT
;
162 *pnum_pfds
= num_pollfds
;
164 if (ev
->immediate_events
!= NULL
) {
168 if (ev
->timer_events
== NULL
) {
169 *ptimeout
= MIN(*ptimeout
, INT_MAX
);
173 now
= timeval_current();
174 diff
= timeval_until(&now
, &ev
->timer_events
->next_event
);
175 timeout
= timeval_to_msec(diff
);
177 if (timeout
< *ptimeout
) {
184 bool run_events_poll(struct tevent_context
*ev
, int pollrtn
,
185 struct pollfd
*pfds
, int num_pfds
)
187 struct tevent_poll_private
*state
;
189 struct tevent_fd
*fde
;
192 if (ev
->signal_events
&&
193 tevent_common_check_signal(ev
)) {
197 if (ev
->immediate_events
&&
198 tevent_common_loop_immediate(ev
)) {
204 if ((ev
->timer_events
!= NULL
)
205 && (timeval_compare(&now
, &ev
->timer_events
->next_event
) >= 0)) {
206 /* this older events system did not auto-free timed
207 events on running them, and had a race condition
208 where the event could be called twice if the
209 talloc_free of the te happened after the callback
210 made a call which invoked the event loop. To avoid
211 this while still allowing old code which frees the
212 te, we need to create a temporary context which
213 will be used to ensure the te is freed. We also
214 remove the te from the timed event list before we
215 call the handler, to ensure we can't loop */
217 struct tevent_timer
*te
= ev
->timer_events
;
218 TALLOC_CTX
*tmp_ctx
= talloc_new(ev
);
220 DEBUG(10, ("Running timed event \"%s\" %p\n",
221 ev
->timer_events
->handler_name
, ev
->timer_events
));
223 DLIST_REMOVE(ev
->timer_events
, te
);
224 talloc_steal(tmp_ctx
, te
);
226 te
->handler(ev
, te
, now
, te
->private_data
);
228 talloc_free(tmp_ctx
);
239 state
= (struct tevent_poll_private
*)ev
->additional_data
;
240 pollfd_idx
= state
->pollfd_idx
;
242 for (fde
= ev
->fd_events
; fde
; fde
= fde
->next
) {
246 if ((fde
->flags
& (EVENT_FD_READ
|EVENT_FD_WRITE
)) == 0) {
250 if (pollfd_idx
[fde
->fd
] >= num_pfds
) {
251 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
252 ">= num_pfds (%d)\n", pollfd_idx
[fde
->fd
],
256 pfd
= &pfds
[pollfd_idx
[fde
->fd
]];
258 if (pfd
->fd
!= fde
->fd
) {
259 DEBUG(1, ("internal error: pfd->fd (%d) "
260 "!= fde->fd (%d)\n", pollfd_idx
[fde
->fd
],
265 if (pfd
->revents
& (POLLHUP
|POLLERR
)) {
266 /* If we only wait for EVENT_FD_WRITE, we
267 should not tell the event handler about it,
268 and remove the writable flag, as we only
269 report errors when waiting for read events
270 to match the select behavior. */
271 if (!(fde
->flags
& EVENT_FD_READ
)) {
272 EVENT_FD_NOT_WRITEABLE(fde
);
275 flags
|= EVENT_FD_READ
;
278 if (pfd
->revents
& POLLIN
) {
279 flags
|= EVENT_FD_READ
;
281 if (pfd
->revents
& POLLOUT
) {
282 flags
|= EVENT_FD_WRITE
;
284 if (flags
& fde
->flags
) {
285 DLIST_DEMOTE(ev
->fd_events
, fde
, struct tevent_fd
);
286 fde
->handler(ev
, fde
, flags
, fde
->private_data
);
294 struct timeval
*get_timed_events_timeout(struct tevent_context
*ev
,
295 struct timeval
*to_ret
)
299 if ((ev
->timer_events
== NULL
) && (ev
->immediate_events
== NULL
)) {
302 if (ev
->immediate_events
!= NULL
) {
303 *to_ret
= timeval_zero();
307 now
= timeval_current();
308 *to_ret
= timeval_until(&now
, &ev
->timer_events
->next_event
);
310 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret
->tv_sec
,
311 (int)to_ret
->tv_usec
));
316 static int s3_event_loop_once(struct tevent_context
*ev
, const char *location
)
318 struct tevent_poll_private
*state
;
325 state
= tevent_get_poll_private(ev
);
331 if (run_events_poll(ev
, 0, NULL
, 0)) {
336 if (!event_add_to_poll_args(ev
, state
,
337 &state
->pfds
, &num_pfds
, &timeout
)) {
341 ret
= sys_poll(state
->pfds
, num_pfds
, timeout
);
342 if (ret
== -1 && errno
!= EINTR
) {
343 tevent_debug(ev
, TEVENT_DEBUG_FATAL
,
344 "poll() failed: %d:%s\n",
345 errno
, strerror(errno
));
349 run_events_poll(ev
, ret
, state
->pfds
, num_pfds
);
353 static int s3_event_context_init(struct tevent_context
*ev
)
358 void dump_event_list(struct tevent_context
*ev
)
360 struct tevent_timer
*te
;
361 struct tevent_fd
*fe
;
362 struct timeval evt
, now
;
368 now
= timeval_current();
370 DEBUG(10,("dump_event_list:\n"));
372 for (te
= ev
->timer_events
; te
; te
= te
->next
) {
374 evt
= timeval_until(&now
, &te
->next_event
);
376 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
380 http_timestring(talloc_tos(), te
->next_event
.tv_sec
)));
383 for (fe
= ev
->fd_events
; fe
; fe
= fe
->next
) {
385 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
392 static const struct tevent_ops s3_event_ops
= {
393 .context_init
= s3_event_context_init
,
394 .add_fd
= tevent_common_add_fd
,
395 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
396 .get_fd_flags
= tevent_common_fd_get_flags
,
397 .set_fd_flags
= tevent_common_fd_set_flags
,
398 .add_timer
= tevent_common_add_timer
,
399 .schedule_immediate
= tevent_common_schedule_immediate
,
400 .add_signal
= tevent_common_add_signal
,
401 .loop_once
= s3_event_loop_once
,
402 .loop_wait
= tevent_common_loop_wait
,
405 static bool s3_tevent_init(void)
407 static bool initialized
;
411 initialized
= tevent_register_backend("s3", &s3_event_ops
);
412 tevent_set_default_backend("s3");
417 this is used to catch debug messages from events
419 static void s3_event_debug(void *context
, enum tevent_debug_level level
,
420 const char *fmt
, va_list ap
) PRINTF_ATTRIBUTE(3,0);
422 static void s3_event_debug(void *context
, enum tevent_debug_level level
,
423 const char *fmt
, va_list ap
)
425 int samba_level
= -1;
428 case TEVENT_DEBUG_FATAL
:
431 case TEVENT_DEBUG_ERROR
:
434 case TEVENT_DEBUG_WARNING
:
437 case TEVENT_DEBUG_TRACE
:
442 if (vasprintf(&s
, fmt
, ap
) == -1) {
445 DEBUG(samba_level
, ("s3_event: %s", s
));
449 struct tevent_context
*s3_tevent_context_init(TALLOC_CTX
*mem_ctx
)
451 struct tevent_context
*ev
;
455 ev
= tevent_context_init_byname(mem_ctx
, "s3");
457 tevent_set_debug(ev
, s3_event_debug
, NULL
);