s3: server: s3_tevent_context_init() -> samba_tevent_context_init()
[Samba.git] / source3 / lib / events.c
bloba866ef5dd4a5f06e9e6a060d25eca69ed64deebb
1 /*
2 Unix SMB/CIFS implementation.
3 Timed event library.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005-2007
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "lib/tevent/tevent_internal.h"
23 #include "../lib/util/select.h"
24 #include "system/select.h"
26 struct tevent_poll_private {
28 * Index from file descriptor into the pollfd array
30 int *pollfd_idx;
33 * Cache for s3_event_loop_once to avoid reallocs
35 struct pollfd *pfds;
38 static struct tevent_poll_private *tevent_get_poll_private(
39 struct tevent_context *ev)
41 struct tevent_poll_private *state;
43 state = (struct tevent_poll_private *)ev->additional_data;
44 if (state == NULL) {
45 state = talloc_zero(ev, struct tevent_poll_private);
46 ev->additional_data = (void *)state;
47 if (state == NULL) {
48 DEBUG(10, ("talloc failed\n"));
51 return state;
54 static void count_fds(struct tevent_context *ev,
55 int *pnum_fds, int *pmax_fd)
57 struct tevent_fd *fde;
58 int num_fds = 0;
59 int max_fd = 0;
61 for (fde = ev->fd_events; fde != NULL; fde = fde->next) {
62 if (fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) {
63 num_fds += 1;
64 if (fde->fd > max_fd) {
65 max_fd = fde->fd;
69 *pnum_fds = num_fds;
70 *pmax_fd = max_fd;
73 bool event_add_to_poll_args(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
74 struct pollfd **pfds, int *pnum_pfds,
75 int *ptimeout)
77 struct tevent_poll_private *state;
78 struct tevent_fd *fde;
79 int i, num_fds, max_fd, num_pollfds, idx_len;
80 struct pollfd *fds;
81 struct timeval now, diff;
82 int timeout;
84 state = tevent_get_poll_private(ev);
85 if (state == NULL) {
86 return false;
88 count_fds(ev, &num_fds, &max_fd);
90 idx_len = max_fd+1;
92 if (talloc_array_length(state->pollfd_idx) < idx_len) {
93 state->pollfd_idx = talloc_realloc(
94 state, state->pollfd_idx, int, idx_len);
95 if (state->pollfd_idx == NULL) {
96 DEBUG(10, ("talloc_realloc failed\n"));
97 return false;
101 fds = *pfds;
102 num_pollfds = *pnum_pfds;
104 if (talloc_array_length(fds) < num_pollfds + num_fds) {
105 fds = talloc_realloc(mem_ctx, fds, struct pollfd,
106 num_pollfds + num_fds);
107 if (fds == NULL) {
108 DEBUG(10, ("talloc_realloc failed\n"));
109 return false;
113 memset(&fds[num_pollfds], 0, sizeof(struct pollfd) * num_fds);
116 * This needs tuning. We need to cope with multiple fde's for a file
117 * descriptor. The problem is that we need to re-use pollfd_idx across
118 * calls for efficiency. One way would be a direct bitmask that might
119 * be initialized quicker, but our bitmap_init implementation is
120 * pretty heavy-weight as well.
122 for (i=0; i<idx_len; i++) {
123 state->pollfd_idx[i] = -1;
126 for (fde = ev->fd_events; fde; fde = fde->next) {
127 struct pollfd *pfd;
129 if ((fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) == 0) {
130 continue;
133 if (state->pollfd_idx[fde->fd] == -1) {
135 * We haven't seen this fd yet. Allocate a new pollfd.
137 state->pollfd_idx[fde->fd] = num_pollfds;
138 pfd = &fds[num_pollfds];
139 num_pollfds += 1;
140 } else {
142 * We have already seen this fd. OR in the flags.
144 pfd = &fds[state->pollfd_idx[fde->fd]];
147 pfd->fd = fde->fd;
149 if (fde->flags & TEVENT_FD_READ) {
150 pfd->events |= (POLLIN|POLLHUP);
152 if (fde->flags & TEVENT_FD_WRITE) {
153 pfd->events |= POLLOUT;
156 *pfds = fds;
157 *pnum_pfds = num_pollfds;
159 if (ev->immediate_events != NULL) {
160 *ptimeout = 0;
161 return true;
163 if (ev->timer_events == NULL) {
164 *ptimeout = MIN(*ptimeout, INT_MAX);
165 return true;
168 now = timeval_current();
169 diff = timeval_until(&now, &ev->timer_events->next_event);
170 timeout = timeval_to_msec(diff);
172 if (timeout < *ptimeout) {
173 *ptimeout = timeout;
176 return true;
179 bool run_events_poll(struct tevent_context *ev, int pollrtn,
180 struct pollfd *pfds, int num_pfds)
182 struct tevent_poll_private *state;
183 int *pollfd_idx;
184 struct tevent_fd *fde;
186 if (ev->signal_events &&
187 tevent_common_check_signal(ev)) {
188 return true;
191 if (ev->threaded_contexts != NULL) {
192 tevent_common_threaded_activate_immediate(ev);
195 if (ev->immediate_events &&
196 tevent_common_loop_immediate(ev)) {
197 return true;
200 if (pollrtn <= 0) {
201 struct timeval tval;
203 tval = tevent_common_loop_timer_delay(ev);
204 if (tevent_timeval_is_zero(&tval)) {
205 return true;
209 * No fd ready
211 return false;
214 state = (struct tevent_poll_private *)ev->additional_data;
215 pollfd_idx = state->pollfd_idx;
217 for (fde = ev->fd_events; fde; fde = fde->next) {
218 struct pollfd *pfd;
219 uint16_t flags = 0;
221 if ((fde->flags & (TEVENT_FD_READ|TEVENT_FD_WRITE)) == 0) {
222 continue;
225 if (pollfd_idx[fde->fd] >= num_pfds) {
226 DEBUG(1, ("internal error: pollfd_idx[fde->fd] (%d) "
227 ">= num_pfds (%d)\n", pollfd_idx[fde->fd],
228 num_pfds));
229 return false;
231 pfd = &pfds[pollfd_idx[fde->fd]];
233 if (pfd->fd != fde->fd) {
234 DEBUG(1, ("internal error: pfd->fd (%d) "
235 "!= fde->fd (%d)\n", pollfd_idx[fde->fd],
236 num_pfds));
237 return false;
240 if (pfd->revents & (POLLHUP|POLLERR)) {
241 /* If we only wait for TEVENT_FD_WRITE, we
242 should not tell the event handler about it,
243 and remove the writable flag, as we only
244 report errors when waiting for read events
245 to match the select behavior. */
246 if (!(fde->flags & TEVENT_FD_READ)) {
247 TEVENT_FD_NOT_WRITEABLE(fde);
248 continue;
250 flags |= TEVENT_FD_READ;
253 if (pfd->revents & POLLIN) {
254 flags |= TEVENT_FD_READ;
256 if (pfd->revents & POLLOUT) {
257 flags |= TEVENT_FD_WRITE;
259 if (flags & fde->flags) {
260 DLIST_DEMOTE(ev->fd_events, fde);
261 fde->handler(ev, fde, flags, fde->private_data);
262 return true;
266 return false;
269 struct timeval *get_timed_events_timeout(struct tevent_context *ev,
270 struct timeval *to_ret)
272 struct timeval now;
274 if ((ev->timer_events == NULL) && (ev->immediate_events == NULL)) {
275 return NULL;
277 if (ev->immediate_events != NULL) {
278 *to_ret = timeval_zero();
279 return to_ret;
282 now = timeval_current();
283 *to_ret = timeval_until(&now, &ev->timer_events->next_event);
285 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
286 (int)to_ret->tv_usec));
288 return to_ret;
291 static int s3_event_loop_once(struct tevent_context *ev, const char *location)
293 struct tevent_poll_private *state;
294 int timeout;
295 int num_pfds;
296 int ret;
297 int poll_errno;
299 timeout = INT_MAX;
301 state = tevent_get_poll_private(ev);
302 if (state == NULL) {
303 errno = ENOMEM;
304 return -1;
307 if (run_events_poll(ev, 0, NULL, 0)) {
308 return 0;
311 num_pfds = 0;
312 if (!event_add_to_poll_args(ev, state,
313 &state->pfds, &num_pfds, &timeout)) {
314 return -1;
317 tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_WAIT);
318 ret = poll(state->pfds, num_pfds, timeout);
319 poll_errno = errno;
320 tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_WAIT);
321 errno = poll_errno;
323 if (ret == -1 && errno != EINTR) {
324 tevent_debug(ev, TEVENT_DEBUG_FATAL,
325 "poll() failed: %d:%s\n",
326 errno, strerror(errno));
327 return -1;
330 run_events_poll(ev, ret, state->pfds, num_pfds);
331 return 0;
334 static int s3_event_context_init(struct tevent_context *ev)
336 return 0;
339 void dump_event_list(struct tevent_context *ev)
341 struct tevent_timer *te;
342 struct tevent_fd *fe;
343 struct timeval evt, now;
345 if (!ev) {
346 return;
349 now = timeval_current();
351 DEBUG(10,("dump_event_list:\n"));
353 for (te = ev->timer_events; te; te = te->next) {
355 evt = timeval_until(&now, &te->next_event);
357 DEBUGADD(10,("Timed Event \"%s\" %p handled in %d seconds (at %s)\n",
358 te->handler_name,
360 (int)evt.tv_sec,
361 http_timestring(talloc_tos(), te->next_event.tv_sec)));
364 for (fe = ev->fd_events; fe; fe = fe->next) {
366 DEBUGADD(10,("FD Event %d %p, flags: 0x%04x\n",
367 fe->fd,
369 fe->flags));
373 static const struct tevent_ops s3_event_ops = {
374 .context_init = s3_event_context_init,
375 .add_fd = tevent_common_add_fd,
376 .set_fd_close_fn = tevent_common_fd_set_close_fn,
377 .get_fd_flags = tevent_common_fd_get_flags,
378 .set_fd_flags = tevent_common_fd_set_flags,
379 .add_timer = tevent_common_add_timer,
380 .schedule_immediate = tevent_common_schedule_immediate,
381 .add_signal = tevent_common_add_signal,
382 .loop_once = s3_event_loop_once,
383 .loop_wait = tevent_common_loop_wait,
386 static bool s3_tevent_init(void)
388 static bool initialized;
389 if (initialized) {
390 return true;
392 initialized = tevent_register_backend("s3", &s3_event_ops);
393 tevent_set_default_backend("s3");
394 return initialized;
397 struct tevent_context *s3_tevent_context_init(TALLOC_CTX *mem_ctx)
399 struct tevent_context *ev;
401 s3_tevent_init();
403 ev = tevent_context_init_byname(mem_ctx, "s3");
404 if (ev) {
405 samba_tevent_set_debug(ev, "s3_tevent");
408 return ev;
411 struct idle_event {
412 struct tevent_timer *te;
413 struct timeval interval;
414 char *name;
415 bool (*handler)(const struct timeval *now, void *private_data);
416 void *private_data;
419 static void smbd_idle_event_handler(struct tevent_context *ctx,
420 struct tevent_timer *te,
421 struct timeval now,
422 void *private_data)
424 struct idle_event *event =
425 talloc_get_type_abort(private_data, struct idle_event);
427 TALLOC_FREE(event->te);
429 DEBUG(10,("smbd_idle_event_handler: %s %p called\n",
430 event->name, event->te));
432 if (!event->handler(&now, event->private_data)) {
433 DEBUG(10,("smbd_idle_event_handler: %s %p stopped\n",
434 event->name, event->te));
435 /* Don't repeat, delete ourselves */
436 TALLOC_FREE(event);
437 return;
440 DEBUG(10,("smbd_idle_event_handler: %s %p rescheduled\n",
441 event->name, event->te));
443 event->te = tevent_add_timer(ctx, event,
444 timeval_sum(&now, &event->interval),
445 smbd_idle_event_handler, event);
447 /* We can't do much but fail here. */
448 SMB_ASSERT(event->te != NULL);
451 struct idle_event *event_add_idle(struct tevent_context *event_ctx,
452 TALLOC_CTX *mem_ctx,
453 struct timeval interval,
454 const char *name,
455 bool (*handler)(const struct timeval *now,
456 void *private_data),
457 void *private_data)
459 struct idle_event *result;
460 struct timeval now = timeval_current();
462 result = talloc(mem_ctx, struct idle_event);
463 if (result == NULL) {
464 DEBUG(0, ("talloc failed\n"));
465 return NULL;
468 result->interval = interval;
469 result->handler = handler;
470 result->private_data = private_data;
472 if (!(result->name = talloc_asprintf(result, "idle_evt(%s)", name))) {
473 DEBUG(0, ("talloc failed\n"));
474 TALLOC_FREE(result);
475 return NULL;
478 result->te = tevent_add_timer(event_ctx, result,
479 timeval_sum(&now, &interval),
480 smbd_idle_event_handler, result);
481 if (result->te == NULL) {
482 DEBUG(0, ("event_add_timed failed\n"));
483 TALLOC_FREE(result);
484 return NULL;
487 DEBUG(10,("event_add_idle: %s %p\n", result->name, result->te));
488 return result;