printing: explicitly clear PUBLISHED attribute
[Samba.git] / source3 / lib / events.c
blob56314369f11c9946887f730757381d05f25128f4
1 /*
2 Unix SMB/CIFS implementation.
3 Timed event library.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private {
28 * Index from file descriptor into the pollfd array
30 int *pollfd_idx;
33 * Cache for s3_event_loop_once to avoid reallocs
35 struct pollfd *pfds;
38 static struct tevent_poll_private *tevent_get_poll_private(
39 struct tevent_context *ev)
41 struct tevent_poll_private *state;
43 state = (struct tevent_poll_private *)ev->additional_data;
44 if (state == NULL) {
45 state = TALLOC_ZERO_P(ev, struct tevent_poll_private);
46 ev->additional_data = (void *)state;
47 if (state == NULL) {
48 DEBUG(10, ("talloc failed\n"));
51 return state;
54 static void count_fds(struct tevent_context *ev,
55 int *pnum_fds, int *pmax_fd)
57 struct tevent_fd *fde;
58 int num_fds = 0;
59 int max_fd = 0;
61 for (fde = ev->fd_events; fde != NULL; fde = fde->next) {
62 if (fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) {
63 num_fds += 1;
64 if (fde->fd > max_fd) {
65 max_fd = fde->fd;
69 *pnum_fds = num_fds;
70 *pmax_fd = max_fd;
73 bool event_add_to_poll_args(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
74 struct pollfd **pfds, int *pnum_pfds,
75 int *ptimeout)
77 struct tevent_poll_private *state;
78 struct tevent_fd *fde;
79 int i, num_fds, max_fd, num_pollfds, idx_len;
80 struct pollfd *fds;
81 struct timeval now, diff;
82 int timeout;
84 state = tevent_get_poll_private(ev);
85 if (state == NULL) {
86 return false;
88 count_fds(ev, &num_fds, &max_fd);
90 idx_len = max_fd+1;
92 if (talloc_array_length(state->pollfd_idx) < idx_len) {
93 state->pollfd_idx = TALLOC_REALLOC_ARRAY(
94 state, state->pollfd_idx, int, idx_len);
95 if (state->pollfd_idx == NULL) {
96 DEBUG(10, ("talloc_realloc failed\n"));
97 return false;
101 fds = *pfds;
102 num_pollfds = *pnum_pfds;
105 * The +1 is for the sys_poll calling convention. It expects
106 * an array 1 longer for the signal pipe
109 if (talloc_array_length(fds) < num_pollfds + num_fds + 1) {
110 fds = TALLOC_REALLOC_ARRAY(mem_ctx, fds, struct pollfd,
111 num_pollfds + num_fds + 1);
112 if (fds == NULL) {
113 DEBUG(10, ("talloc_realloc failed\n"));
114 return false;
118 memset(&fds[num_pollfds], 0, sizeof(struct pollfd) * num_fds);
121 * This needs tuning. We need to cope with multiple fde's for a file
122 * descriptor. The problem is that we need to re-use pollfd_idx across
123 * calls for efficiency. One way would be a direct bitmask that might
124 * be initialized quicker, but our bitmap_init implementation is
125 * pretty heavy-weight as well.
127 for (i=0; i<idx_len; i++) {
128 state->pollfd_idx[i] = -1;
131 for (fde = ev->fd_events; fde; fde = fde->next) {
132 struct pollfd *pfd;
134 if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
135 continue;
138 if (state->pollfd_idx[fde->fd] == -1) {
140 * We haven't seen this fd yet. Allocate a new pollfd.
142 state->pollfd_idx[fde->fd] = num_pollfds;
143 pfd = &fds[num_pollfds];
144 num_pollfds += 1;
145 } else {
147 * We have already seen this fd. OR in the flags.
149 pfd = &fds[state->pollfd_idx[fde->fd]];
152 pfd->fd = fde->fd;
154 if (fde->flags & EVENT_FD_READ) {
155 pfd->events |= (POLLIN|POLLHUP);
157 if (fde->flags & EVENT_FD_WRITE) {
158 pfd->events |= POLLOUT;
161 *pfds = fds;
162 *pnum_pfds = num_pollfds;
164 if (ev->immediate_events != NULL) {
165 *ptimeout = 0;
166 return true;
168 if (ev->timer_events == NULL) {
169 *ptimeout = MIN(*ptimeout, INT_MAX);
170 return true;
173 now = timeval_current();
174 diff = timeval_until(&now, &ev->timer_events->next_event);
175 timeout = timeval_to_msec(diff);
177 if (timeout < *ptimeout) {
178 *ptimeout = timeout;
181 return true;
184 bool run_events_poll(struct tevent_context *ev, int pollrtn,
185 struct pollfd *pfds, int num_pfds)
187 struct tevent_poll_private *state;
188 int *pollfd_idx;
189 struct tevent_fd *fde;
190 struct timeval now;
192 if (ev->signal_events &&
193 tevent_common_check_signal(ev)) {
194 return true;
197 if (ev->immediate_events &&
198 tevent_common_loop_immediate(ev)) {
199 return true;
202 GetTimeOfDay(&now);
204 if ((ev->timer_events != NULL)
205 && (timeval_compare(&now, &ev->timer_events->next_event) >= 0)) {
206 /* this older events system did not auto-free timed
207 events on running them, and had a race condition
208 where the event could be called twice if the
209 talloc_free of the te happened after the callback
210 made a call which invoked the event loop. To avoid
211 this while still allowing old code which frees the
212 te, we need to create a temporary context which
213 will be used to ensure the te is freed. We also
214 remove the te from the timed event list before we
215 call the handler, to ensure we can't loop */
217 struct tevent_timer *te = ev->timer_events;
218 TALLOC_CTX *tmp_ctx = talloc_new(ev);
220 DEBUG(10, ("Running timed event \"%s\" %p\n",
221 ev->timer_events->handler_name, ev->timer_events));
223 DLIST_REMOVE(ev->timer_events, te);
224 talloc_steal(tmp_ctx, te);
226 te->handler(ev, te, now, te->private_data);
228 talloc_free(tmp_ctx);
229 return true;
232 if (pollrtn <= 0) {
234 * No fd ready
236 return false;
239 state = (struct tevent_poll_private *)ev->additional_data;
240 pollfd_idx = state->pollfd_idx;
242 for (fde = ev->fd_events; fde; fde = fde->next) {
243 struct pollfd *pfd;
244 uint16 flags = 0;
246 if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
247 continue;
250 if (pollfd_idx[fde->fd] >= num_pfds) {
251 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
252 ">= num_pfds (%d)\n", pollfd_idx[fde->fd],
253 num_pfds));
254 return false;
256 pfd = &pfds[pollfd_idx[fde->fd]];
258 if (pfd->fd != fde->fd) {
259 DEBUG(1, ("internal error: pfd->fd (%d) "
260 "!= fde->fd (%d)\n", pollfd_idx[fde->fd],
261 num_pfds));
262 return false;
265 if (pfd->revents & (POLLHUP|POLLERR)) {
266 /* If we only wait for EVENT_FD_WRITE, we
267 should not tell the event handler about it,
268 and remove the writable flag, as we only
269 report errors when waiting for read events
270 to match the select behavior. */
271 if (!(fde->flags & EVENT_FD_READ)) {
272 EVENT_FD_NOT_WRITEABLE(fde);
273 continue;
275 flags |= EVENT_FD_READ;
278 if (pfd->revents & POLLIN) {
279 flags |= EVENT_FD_READ;
281 if (pfd->revents & POLLOUT) {
282 flags |= EVENT_FD_WRITE;
284 if (flags & fde->flags) {
285 DLIST_DEMOTE(ev->fd_events, fde, struct tevent_fd);
286 fde->handler(ev, fde, flags, fde->private_data);
287 return true;
291 return false;
294 struct timeval *get_timed_events_timeout(struct tevent_context *ev,
295 struct timeval *to_ret)
297 struct timeval now;
299 if ((ev->timer_events == NULL) && (ev->immediate_events == NULL)) {
300 return NULL;
302 if (ev->immediate_events != NULL) {
303 *to_ret = timeval_zero();
304 return to_ret;
307 now = timeval_current();
308 *to_ret = timeval_until(&now, &ev->timer_events->next_event);
310 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
311 (int)to_ret->tv_usec));
313 return to_ret;
316 static int s3_event_loop_once(struct tevent_context *ev, const char *location)
318 struct tevent_poll_private *state;
319 int timeout;
320 int num_pfds;
321 int ret;
323 timeout = INT_MAX;
325 state = tevent_get_poll_private(ev);
326 if (state == NULL) {
327 errno = ENOMEM;
328 return -1;
331 if (run_events_poll(ev, 0, NULL, 0)) {
332 return 0;
335 num_pfds = 0;
336 if (!event_add_to_poll_args(ev, state,
337 &state->pfds, &num_pfds, &timeout)) {
338 return -1;
341 ret = sys_poll(state->pfds, num_pfds, timeout);
342 if (ret == -1 && errno != EINTR) {
343 tevent_debug(ev, TEVENT_DEBUG_FATAL,
344 "poll() failed: %d:%s\n",
345 errno, strerror(errno));
346 return -1;
349 run_events_poll(ev, ret, state->pfds, num_pfds);
350 return 0;
353 static int s3_event_context_init(struct tevent_context *ev)
355 return 0;
358 void dump_event_list(struct tevent_context *ev)
360 struct tevent_timer *te;
361 struct tevent_fd *fe;
362 struct timeval evt, now;
364 if (!ev) {
365 return;
368 now = timeval_current();
370 DEBUG(10,("dump_event_list:\n"));
372 for (te = ev->timer_events; te; te = te->next) {
374 evt = timeval_until(&now, &te->next_event);
376 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
377 te->handler_name,
379 (int)evt.tv_sec,
380 http_timestring(talloc_tos(), te->next_event.tv_sec)));
383 for (fe = ev->fd_events; fe; fe = fe->next) {
385 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
386 fe->fd,
388 fe->flags));
392 static const struct tevent_ops s3_event_ops = {
393 .context_init = s3_event_context_init,
394 .add_fd = tevent_common_add_fd,
395 .set_fd_close_fn = tevent_common_fd_set_close_fn,
396 .get_fd_flags = tevent_common_fd_get_flags,
397 .set_fd_flags = tevent_common_fd_set_flags,
398 .add_timer = tevent_common_add_timer,
399 .schedule_immediate = tevent_common_schedule_immediate,
400 .add_signal = tevent_common_add_signal,
401 .loop_once = s3_event_loop_once,
402 .loop_wait = tevent_common_loop_wait,
405 static bool s3_tevent_init(void)
407 static bool initialized;
408 if (initialized) {
409 return true;
411 initialized = tevent_register_backend("s3", &s3_event_ops);
412 tevent_set_default_backend("s3");
413 return initialized;
417 this is used to catch debug messages from events
419 static void s3_event_debug(void *context, enum tevent_debug_level level,
420 const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0);
422 static void s3_event_debug(void *context, enum tevent_debug_level level,
423 const char *fmt, va_list ap)
425 int samba_level = -1;
426 char *s = NULL;
427 switch (level) {
428 case TEVENT_DEBUG_FATAL:
429 samba_level = 0;
430 break;
431 case TEVENT_DEBUG_ERROR:
432 samba_level = 1;
433 break;
434 case TEVENT_DEBUG_WARNING:
435 samba_level = 2;
436 break;
437 case TEVENT_DEBUG_TRACE:
438 samba_level = 11;
439 break;
442 if (vasprintf(&s, fmt, ap) == -1) {
443 return;
445 DEBUG(samba_level, ("s3_event: %s", s));
446 free(s);
449 struct tevent_context *s3_tevent_context_init(TALLOC_CTX *mem_ctx)
451 struct tevent_context *ev;
453 s3_tevent_init();
455 ev = tevent_context_init_byname(mem_ctx, "s3");
456 if (ev) {
457 tevent_set_debug(ev, s3_event_debug, NULL);
460 return ev;