ctdb: server_id_get->server_id_fetch
[Samba.git] / source3 / lib / events.c
blob62aa4d973e94bb869dbe8004249371e80192ee4b
1 /*
2 Unix SMB/CIFS implementation.
3 Timed event library.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005-2007
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private {
28 * Index from file descriptor into the pollfd array
30 int *pollfd_idx;
33 * Cache for s3_event_loop_once to avoid reallocs
35 struct pollfd *pfds;
38 static struct tevent_poll_private *tevent_get_poll_private(
39 struct tevent_context *ev)
41 struct tevent_poll_private *state;
43 state = (struct tevent_poll_private *)ev->additional_data;
44 if (state == NULL) {
45 state = talloc_zero(ev, struct tevent_poll_private);
46 ev->additional_data = (void *)state;
47 if (state == NULL) {
48 DEBUG(10, ("talloc failed\n"));
51 return state;
54 static void count_fds(struct tevent_context *ev,
55 int *pnum_fds, int *pmax_fd)
57 struct tevent_fd *fde;
58 int num_fds = 0;
59 int max_fd = 0;
61 for (fde = ev->fd_events; fde != NULL; fde = fde->next) {
62 if (fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) {
63 num_fds += 1;
64 if (fde->fd > max_fd) {
65 max_fd = fde->fd;
69 *pnum_fds = num_fds;
70 *pmax_fd = max_fd;
73 bool event_add_to_poll_args(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
74 struct pollfd **pfds, int *pnum_pfds,
75 int *ptimeout)
77 struct tevent_poll_private *state;
78 struct tevent_fd *fde;
79 int i, num_fds, max_fd, num_pollfds, idx_len;
80 struct pollfd *fds;
81 struct timeval now, diff;
82 int timeout;
84 state = tevent_get_poll_private(ev);
85 if (state == NULL) {
86 return false;
88 count_fds(ev, &num_fds, &max_fd);
90 idx_len = max_fd+1;
92 if (talloc_array_length(state->pollfd_idx) < idx_len) {
93 state->pollfd_idx = talloc_realloc(
94 state, state->pollfd_idx, int, idx_len);
95 if (state->pollfd_idx == NULL) {
96 DEBUG(10, ("talloc_realloc failed\n"));
97 return false;
101 fds = *pfds;
102 num_pollfds = *pnum_pfds;
104 if (talloc_array_length(fds) < num_pollfds + num_fds) {
105 fds = talloc_realloc(mem_ctx, fds, struct pollfd,
106 num_pollfds + num_fds);
107 if (fds == NULL) {
108 DEBUG(10, ("talloc_realloc failed\n"));
109 return false;
113 memset(&fds[num_pollfds], 0, sizeof(struct pollfd) * num_fds);
116 * This needs tuning. We need to cope with multiple fde's for a file
117 * descriptor. The problem is that we need to re-use pollfd_idx across
118 * calls for efficiency. One way would be a direct bitmask that might
119 * be initialized quicker, but our bitmap_init implementation is
120 * pretty heavy-weight as well.
122 for (i=0; i<idx_len; i++) {
123 state->pollfd_idx[i] = -1;
126 for (fde = ev->fd_events; fde; fde = fde->next) {
127 struct pollfd *pfd;
129 if ((fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) == 0) {
130 continue;
133 if (state->pollfd_idx[fde->fd] == -1) {
135 * We haven't seen this fd yet. Allocate a new pollfd.
137 state->pollfd_idx[fde->fd] = num_pollfds;
138 pfd = &fds[num_pollfds];
139 num_pollfds += 1;
140 } else {
142 * We have already seen this fd. OR in the flags.
144 pfd = &fds[state->pollfd_idx[fde->fd]];
147 pfd->fd = fde->fd;
149 if (fde->flags & TEVENT_FD_READ) {
150 pfd->events |= (POLLIN|POLLHUP);
152 if (fde->flags & TEVENT_FD_WRITE) {
153 pfd->events |= POLLOUT;
156 *pfds = fds;
157 *pnum_pfds = num_pollfds;
159 if (ev->immediate_events != NULL) {
160 *ptimeout = 0;
161 return true;
163 if (ev->timer_events == NULL) {
164 *ptimeout = MIN(*ptimeout, INT_MAX);
165 return true;
168 now = timeval_current();
169 diff = timeval_until(&now, &ev->timer_events->next_event);
170 timeout = timeval_to_msec(diff);
172 if (timeout < *ptimeout) {
173 *ptimeout = timeout;
176 return true;
179 bool run_events_poll(struct tevent_context *ev, int pollrtn,
180 struct pollfd *pfds, int num_pfds)
182 struct tevent_poll_private *state;
183 int *pollfd_idx;
184 struct tevent_fd *fde;
186 if (ev->signal_events &&
187 tevent_common_check_signal(ev)) {
188 return true;
191 if (ev->immediate_events &&
192 tevent_common_loop_immediate(ev)) {
193 return true;
196 if (pollrtn <= 0) {
197 struct timeval tval;
199 tval = tevent_common_loop_timer_delay(ev);
200 if (tevent_timeval_is_zero(&tval)) {
201 return true;
205 * No fd ready
207 return false;
210 state = (struct tevent_poll_private *)ev->additional_data;
211 pollfd_idx = state->pollfd_idx;
213 for (fde = ev->fd_events; fde; fde = fde->next) {
214 struct pollfd *pfd;
215 uint16 flags = 0;
217 if ((fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) == 0) {
218 continue;
221 if (pollfd_idx[fde->fd] >= num_pfds) {
222 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
223 ">= num_pfds (%d)\n", pollfd_idx[fde->fd],
224 num_pfds));
225 return false;
227 pfd = &pfds[pollfd_idx[fde->fd]];
229 if (pfd->fd != fde->fd) {
230 DEBUG(1, ("internal error: pfd->fd (%d) "
231 "!= fde->fd (%d)\n", pollfd_idx[fde->fd],
232 num_pfds));
233 return false;
236 if (pfd->revents & (POLLHUP|POLLERR)) {
237 /* If we only wait for TEVENT_FD_WRITE, we
238 should not tell the event handler about it,
239 and remove the writable flag, as we only
240 report errors when waiting for read events
241 to match the select behavior. */
242 if (!(fde->flags & TEVENT_FD_READ)) {
243 TEVENT_FD_NOT_WRITEABLE(fde);
244 continue;
246 flags |= TEVENT_FD_READ;
249 if (pfd->revents & POLLIN) {
250 flags |= TEVENT_FD_READ;
252 if (pfd->revents & POLLOUT) {
253 flags |= TEVENT_FD_WRITE;
255 if (flags & fde->flags) {
256 DLIST_DEMOTE(ev->fd_events, fde, struct tevent_fd);
257 fde->handler(ev, fde, flags, fde->private_data);
258 return true;
262 return false;
265 struct timeval *get_timed_events_timeout(struct tevent_context *ev,
266 struct timeval *to_ret)
268 struct timeval now;
270 if ((ev->timer_events == NULL) && (ev->immediate_events == NULL)) {
271 return NULL;
273 if (ev->immediate_events != NULL) {
274 *to_ret = timeval_zero();
275 return to_ret;
278 now = timeval_current();
279 *to_ret = timeval_until(&now, &ev->timer_events->next_event);
281 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
282 (int)to_ret->tv_usec));
284 return to_ret;
287 static int s3_event_loop_once(struct tevent_context *ev, const char *location)
289 struct tevent_poll_private *state;
290 int timeout;
291 int num_pfds;
292 int ret;
293 int poll_errno;
295 timeout = INT_MAX;
297 state = tevent_get_poll_private(ev);
298 if (state == NULL) {
299 errno = ENOMEM;
300 return -1;
303 if (run_events_poll(ev, 0, NULL, 0)) {
304 return 0;
307 num_pfds = 0;
308 if (!event_add_to_poll_args(ev, state,
309 &state->pfds, &num_pfds, &timeout)) {
310 return -1;
313 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_WAIT);
314 ret = poll(state->pfds, num_pfds, timeout);
315 poll_errno = errno;
316 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_WAIT);
317 errno = poll_errno;
319 if (ret == -1 && errno != EINTR) {
320 tevent_debug(ev, TEVENT_DEBUG_FATAL,
321 "poll() failed: %d:%s\n",
322 errno, strerror(errno));
323 return -1;
326 run_events_poll(ev, ret, state->pfds, num_pfds);
327 return 0;
330 static int s3_event_context_init(struct tevent_context *ev)
332 return 0;
335 void dump_event_list(struct tevent_context *ev)
337 struct tevent_timer *te;
338 struct tevent_fd *fe;
339 struct timeval evt, now;
341 if (!ev) {
342 return;
345 now = timeval_current();
347 DEBUG(10,("dump_event_list:\n"));
349 for (te = ev->timer_events; te; te = te->next) {
351 evt = timeval_until(&now, &te->next_event);
353 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
354 te->handler_name,
356 (int)evt.tv_sec,
357 http_timestring(talloc_tos(), te->next_event.tv_sec)));
360 for (fe = ev->fd_events; fe; fe = fe->next) {
362 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
363 fe->fd,
365 fe->flags));
369 static const struct tevent_ops s3_event_ops = {
370 .context_init = s3_event_context_init,
371 .add_fd = tevent_common_add_fd,
372 .set_fd_close_fn = tevent_common_fd_set_close_fn,
373 .get_fd_flags = tevent_common_fd_get_flags,
374 .set_fd_flags = tevent_common_fd_set_flags,
375 .add_timer = tevent_common_add_timer,
376 .schedule_immediate = tevent_common_schedule_immediate,
377 .add_signal = tevent_common_add_signal,
378 .loop_once = s3_event_loop_once,
379 .loop_wait = tevent_common_loop_wait,
382 static bool s3_tevent_init(void)
384 static bool initialized;
385 if (initialized) {
386 return true;
388 initialized = tevent_register_backend("s3", &s3_event_ops);
389 tevent_set_default_backend("s3");
390 return initialized;
393 struct tevent_context *s3_tevent_context_init(TALLOC_CTX *mem_ctx)
395 struct tevent_context *ev;
397 s3_tevent_init();
399 ev = tevent_context_init_byname(mem_ctx, "s3");
400 if (ev) {
401 samba_tevent_set_debug(ev, "s3_tevent");
404 return ev;
407 struct idle_event {
408 struct tevent_timer *te;
409 struct timeval interval;
410 char *name;
411 bool (*handler)(const struct timeval *now, void *private_data);
412 void *private_data;
415 static void smbd_idle_event_handler(struct tevent_context *ctx,
416 struct tevent_timer *te,
417 struct timeval now,
418 void *private_data)
420 struct idle_event *event =
421 talloc_get_type_abort(private_data, struct idle_event);
423 TALLOC_FREE(event->te);
425 DEBUG(10,("smbd_idle_event_handler: %s %p called\n",
426 event->name, event->te));
428 if (!event->handler(&now, event->private_data)) {
429 DEBUG(10,("smbd_idle_event_handler: %s %p stopped\n",
430 event->name, event->te));
431 /* Don't repeat, delete ourselves */
432 TALLOC_FREE(event);
433 return;
436 DEBUG(10,("smbd_idle_event_handler: %s %p rescheduled\n",
437 event->name, event->te));
439 event->te = tevent_add_timer(ctx, event,
440 timeval_sum(&now, &event->interval),
441 smbd_idle_event_handler, event);
443 /* We can't do much but fail here. */
444 SMB_ASSERT(event->te != NULL);
447 struct idle_event *event_add_idle(struct tevent_context *event_ctx,
448 TALLOC_CTX *mem_ctx,
449 struct timeval interval,
450 const char *name,
451 bool (*handler)(const struct timeval *now,
452 void *private_data),
453 void *private_data)
455 struct idle_event *result;
456 struct timeval now = timeval_current();
458 result = talloc(mem_ctx, struct idle_event);
459 if (result == NULL) {
460 DEBUG(0, ("talloc failed\n"));
461 return NULL;
464 result->interval = interval;
465 result->handler = handler;
466 result->private_data = private_data;
468 if (!(result->name = talloc_asprintf(result, "idle_evt(%s)", name))) {
469 DEBUG(0, ("talloc failed\n"));
470 TALLOC_FREE(result);
471 return NULL;
474 result->te = tevent_add_timer(event_ctx, result,
475 timeval_sum(&now, &interval),
476 smbd_idle_event_handler, result);
477 if (result->te == NULL) {
478 DEBUG(0, ("event_add_timed failed\n"));
479 TALLOC_FREE(result);
480 return NULL;
483 DEBUG(10,("event_add_idle: %s %p\n", result->name, result->te));
484 return result;