s3:smbd: make typedef write_cache private to fileio.c
[Samba/gebeck_regimport.git] / source3 / lib / events.c
blobc71876ce39df3be83dd5da9a0742a66868f9c807
1 /*
2 Unix SMB/CIFS implementation.
3 Timed event library.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005-2007
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private {
28 * Index from file descriptor into the pollfd array
30 int *pollfd_idx;
33 * Cache for s3_event_loop_once to avoid reallocs
35 struct pollfd *pfds;
38 static struct tevent_poll_private *tevent_get_poll_private(
39 struct tevent_context *ev)
41 struct tevent_poll_private *state;
43 state = (struct tevent_poll_private *)ev->additional_data;
44 if (state == NULL) {
45 state = talloc_zero(ev, struct tevent_poll_private);
46 ev->additional_data = (void *)state;
47 if (state == NULL) {
48 DEBUG(10, ("talloc failed\n"));
51 return state;
54 static void count_fds(struct tevent_context *ev,
55 int *pnum_fds, int *pmax_fd)
57 struct tevent_fd *fde;
58 int num_fds = 0;
59 int max_fd = 0;
61 for (fde = ev->fd_events; fde != NULL; fde = fde->next) {
62 if (fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) {
63 num_fds += 1;
64 if (fde->fd > max_fd) {
65 max_fd = fde->fd;
69 *pnum_fds = num_fds;
70 *pmax_fd = max_fd;
73 bool event_add_to_poll_args(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
74 struct pollfd **pfds, int *pnum_pfds,
75 int *ptimeout)
77 struct tevent_poll_private *state;
78 struct tevent_fd *fde;
79 int i, num_fds, max_fd, num_pollfds, idx_len;
80 struct pollfd *fds;
81 struct timeval now, diff;
82 int timeout;
84 state = tevent_get_poll_private(ev);
85 if (state == NULL) {
86 return false;
88 count_fds(ev, &num_fds, &max_fd);
90 idx_len = max_fd+1;
92 if (talloc_array_length(state->pollfd_idx) < idx_len) {
93 state->pollfd_idx = talloc_realloc(
94 state, state->pollfd_idx, int, idx_len);
95 if (state->pollfd_idx == NULL) {
96 DEBUG(10, ("talloc_realloc failed\n"));
97 return false;
101 fds = *pfds;
102 num_pollfds = *pnum_pfds;
104 if (talloc_array_length(fds) < num_pollfds + num_fds) {
105 fds = talloc_realloc(mem_ctx, fds, struct pollfd,
106 num_pollfds + num_fds);
107 if (fds == NULL) {
108 DEBUG(10, ("talloc_realloc failed\n"));
109 return false;
113 memset(&fds[num_pollfds], 0, sizeof(struct pollfd) * num_fds);
116 * This needs tuning. We need to cope with multiple fde's for a file
117 * descriptor. The problem is that we need to re-use pollfd_idx across
118 * calls for efficiency. One way would be a direct bitmask that might
119 * be initialized quicker, but our bitmap_init implementation is
120 * pretty heavy-weight as well.
122 for (i=0; i<idx_len; i++) {
123 state->pollfd_idx[i] = -1;
126 for (fde = ev->fd_events; fde; fde = fde->next) {
127 struct pollfd *pfd;
129 if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
130 continue;
133 if (state->pollfd_idx[fde->fd] == -1) {
135 * We haven't seen this fd yet. Allocate a new pollfd.
137 state->pollfd_idx[fde->fd] = num_pollfds;
138 pfd = &fds[num_pollfds];
139 num_pollfds += 1;
140 } else {
142 * We have already seen this fd. OR in the flags.
144 pfd = &fds[state->pollfd_idx[fde->fd]];
147 pfd->fd = fde->fd;
149 if (fde->flags & EVENT_FD_READ) {
150 pfd->events |= (POLLIN|POLLHUP);
152 if (fde->flags & EVENT_FD_WRITE) {
153 pfd->events |= POLLOUT;
156 *pfds = fds;
157 *pnum_pfds = num_pollfds;
159 if (ev->immediate_events != NULL) {
160 *ptimeout = 0;
161 return true;
163 if (ev->timer_events == NULL) {
164 *ptimeout = MIN(*ptimeout, INT_MAX);
165 return true;
168 now = timeval_current();
169 diff = timeval_until(&now, &ev->timer_events->next_event);
170 timeout = timeval_to_msec(diff);
172 if (timeout < *ptimeout) {
173 *ptimeout = timeout;
176 return true;
179 bool run_events_poll(struct tevent_context *ev, int pollrtn,
180 struct pollfd *pfds, int num_pfds)
182 struct tevent_poll_private *state;
183 int *pollfd_idx;
184 struct tevent_fd *fde;
185 struct timeval now;
187 if (ev->signal_events &&
188 tevent_common_check_signal(ev)) {
189 return true;
192 if (ev->immediate_events &&
193 tevent_common_loop_immediate(ev)) {
194 return true;
197 GetTimeOfDay(&now);
199 if ((ev->timer_events != NULL)
200 && (timeval_compare(&now, &ev->timer_events->next_event) >= 0)) {
201 /* this older events system did not auto-free timed
202 events on running them, and had a race condition
203 where the event could be called twice if the
204 talloc_free of the te happened after the callback
205 made a call which invoked the event loop. To avoid
206 this while still allowing old code which frees the
207 te, we need to create a temporary context which
208 will be used to ensure the te is freed. We also
209 remove the te from the timed event list before we
210 call the handler, to ensure we can't loop */
212 struct tevent_timer *te = ev->timer_events;
213 TALLOC_CTX *tmp_ctx = talloc_new(ev);
215 DEBUG(10, ("Running timed event \"%s\" %p\n",
216 ev->timer_events->handler_name, ev->timer_events));
218 DLIST_REMOVE(ev->timer_events, te);
219 talloc_steal(tmp_ctx, te);
221 te->handler(ev, te, now, te->private_data);
223 talloc_free(tmp_ctx);
224 return true;
227 if (pollrtn <= 0) {
229 * No fd ready
231 return false;
234 state = (struct tevent_poll_private *)ev->additional_data;
235 pollfd_idx = state->pollfd_idx;
237 for (fde = ev->fd_events; fde; fde = fde->next) {
238 struct pollfd *pfd;
239 uint16 flags = 0;
241 if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE)) == 0) {
242 continue;
245 if (pollfd_idx[fde->fd] >= num_pfds) {
246 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
247 ">= num_pfds (%d)\n", pollfd_idx[fde->fd],
248 num_pfds));
249 return false;
251 pfd = &pfds[pollfd_idx[fde->fd]];
253 if (pfd->fd != fde->fd) {
254 DEBUG(1, ("internal error: pfd->fd (%d) "
255 "!= fde->fd (%d)\n", pollfd_idx[fde->fd],
256 num_pfds));
257 return false;
260 if (pfd->revents & (POLLHUP|POLLERR)) {
261 /* If we only wait for EVENT_FD_WRITE, we
262 should not tell the event handler about it,
263 and remove the writable flag, as we only
264 report errors when waiting for read events
265 to match the select behavior. */
266 if (!(fde->flags & EVENT_FD_READ)) {
267 EVENT_FD_NOT_WRITEABLE(fde);
268 continue;
270 flags |= EVENT_FD_READ;
273 if (pfd->revents & POLLIN) {
274 flags |= EVENT_FD_READ;
276 if (pfd->revents & POLLOUT) {
277 flags |= EVENT_FD_WRITE;
279 if (flags & fde->flags) {
280 DLIST_DEMOTE(ev->fd_events, fde, struct tevent_fd);
281 fde->handler(ev, fde, flags, fde->private_data);
282 return true;
286 return false;
289 struct timeval *get_timed_events_timeout(struct tevent_context *ev,
290 struct timeval *to_ret)
292 struct timeval now;
294 if ((ev->timer_events == NULL) && (ev->immediate_events == NULL)) {
295 return NULL;
297 if (ev->immediate_events != NULL) {
298 *to_ret = timeval_zero();
299 return to_ret;
302 now = timeval_current();
303 *to_ret = timeval_until(&now, &ev->timer_events->next_event);
305 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
306 (int)to_ret->tv_usec));
308 return to_ret;
311 static int s3_event_loop_once(struct tevent_context *ev, const char *location)
313 struct tevent_poll_private *state;
314 int timeout;
315 int num_pfds;
316 int ret;
318 timeout = INT_MAX;
320 state = tevent_get_poll_private(ev);
321 if (state == NULL) {
322 errno = ENOMEM;
323 return -1;
326 if (run_events_poll(ev, 0, NULL, 0)) {
327 return 0;
330 num_pfds = 0;
331 if (!event_add_to_poll_args(ev, state,
332 &state->pfds, &num_pfds, &timeout)) {
333 return -1;
336 ret = poll(state->pfds, num_pfds, timeout);
337 if (ret == -1 && errno != EINTR) {
338 tevent_debug(ev, TEVENT_DEBUG_FATAL,
339 "poll() failed: %d:%s\n",
340 errno, strerror(errno));
341 return -1;
344 run_events_poll(ev, ret, state->pfds, num_pfds);
345 return 0;
348 static int s3_event_context_init(struct tevent_context *ev)
350 return 0;
353 void dump_event_list(struct tevent_context *ev)
355 struct tevent_timer *te;
356 struct tevent_fd *fe;
357 struct timeval evt, now;
359 if (!ev) {
360 return;
363 now = timeval_current();
365 DEBUG(10,("dump_event_list:\n"));
367 for (te = ev->timer_events; te; te = te->next) {
369 evt = timeval_until(&now, &te->next_event);
371 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
372 te->handler_name,
374 (int)evt.tv_sec,
375 http_timestring(talloc_tos(), te->next_event.tv_sec)));
378 for (fe = ev->fd_events; fe; fe = fe->next) {
380 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
381 fe->fd,
383 fe->flags));
387 static const struct tevent_ops s3_event_ops = {
388 .context_init = s3_event_context_init,
389 .add_fd = tevent_common_add_fd,
390 .set_fd_close_fn = tevent_common_fd_set_close_fn,
391 .get_fd_flags = tevent_common_fd_get_flags,
392 .set_fd_flags = tevent_common_fd_set_flags,
393 .add_timer = tevent_common_add_timer,
394 .schedule_immediate = tevent_common_schedule_immediate,
395 .add_signal = tevent_common_add_signal,
396 .loop_once = s3_event_loop_once,
397 .loop_wait = tevent_common_loop_wait,
400 static bool s3_tevent_init(void)
402 static bool initialized;
403 if (initialized) {
404 return true;
406 initialized = tevent_register_backend("s3", &s3_event_ops);
407 tevent_set_default_backend("s3");
408 return initialized;
412 this is used to catch debug messages from events
414 static void s3_event_debug(void *context, enum tevent_debug_level level,
415 const char *fmt, va_list ap) PRINTF_ATTRIBUTE(3,0);
417 static void s3_event_debug(void *context, enum tevent_debug_level level,
418 const char *fmt, va_list ap)
420 int samba_level = -1;
421 char *s = NULL;
422 switch (level) {
423 case TEVENT_DEBUG_FATAL:
424 samba_level = 0;
425 break;
426 case TEVENT_DEBUG_ERROR:
427 samba_level = 1;
428 break;
429 case TEVENT_DEBUG_WARNING:
430 samba_level = 2;
431 break;
432 case TEVENT_DEBUG_TRACE:
433 samba_level = 11;
434 break;
437 if (vasprintf(&s, fmt, ap) == -1) {
438 return;
440 DEBUG(samba_level, ("s3_event: %s", s));
441 free(s);
444 struct tevent_context *s3_tevent_context_init(TALLOC_CTX *mem_ctx)
446 struct tevent_context *ev;
448 s3_tevent_init();
450 ev = tevent_context_init_byname(mem_ctx, "s3");
451 if (ev) {
452 tevent_set_debug(ev, s3_event_debug, NULL);
455 return ev;
458 struct idle_event {
459 struct timed_event *te;
460 struct timeval interval;
461 char *name;
462 bool (*handler)(const struct timeval *now, void *private_data);
463 void *private_data;
466 static void smbd_idle_event_handler(struct event_context *ctx,
467 struct timed_event *te,
468 struct timeval now,
469 void *private_data)
471 struct idle_event *event =
472 talloc_get_type_abort(private_data, struct idle_event);
474 TALLOC_FREE(event->te);
476 DEBUG(10,("smbd_idle_event_handler: %s %p called\n",
477 event->name, event->te));
479 if (!event->handler(&now, event->private_data)) {
480 DEBUG(10,("smbd_idle_event_handler: %s %p stopped\n",
481 event->name, event->te));
482 /* Don't repeat, delete ourselves */
483 TALLOC_FREE(event);
484 return;
487 DEBUG(10,("smbd_idle_event_handler: %s %p rescheduled\n",
488 event->name, event->te));
490 event->te = event_add_timed(ctx, event,
491 timeval_sum(&now, &event->interval),
492 smbd_idle_event_handler, event);
494 /* We can't do much but fail here. */
495 SMB_ASSERT(event->te != NULL);
498 struct idle_event *event_add_idle(struct event_context *event_ctx,
499 TALLOC_CTX *mem_ctx,
500 struct timeval interval,
501 const char *name,
502 bool (*handler)(const struct timeval *now,
503 void *private_data),
504 void *private_data)
506 struct idle_event *result;
507 struct timeval now = timeval_current();
509 result = talloc(mem_ctx, struct idle_event);
510 if (result == NULL) {
511 DEBUG(0, ("talloc failed\n"));
512 return NULL;
515 result->interval = interval;
516 result->handler = handler;
517 result->private_data = private_data;
519 if (!(result->name = talloc_asprintf(result, "idle_evt(%s)", name))) {
520 DEBUG(0, ("talloc failed\n"));
521 TALLOC_FREE(result);
522 return NULL;
525 result->te = event_add_timed(event_ctx, result,
526 timeval_sum(&now, &interval),
527 smbd_idle_event_handler, result);
528 if (result->te == NULL) {
529 DEBUG(0, ("event_add_timed failed\n"));
530 TALLOC_FREE(result);
531 return NULL;
534 DEBUG(10,("event_add_idle: %s %p\n", result->name, result->te));
535 return result;