wait_for_aio_completion() should return 0 on non-aio compiled case.
[Samba/ekacnet.git] / lib / tevent / tevent_epoll.c
blob3b99d47d9b9c036ac305d01614223b93705037e7
1 /*
2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
11 ** under the LGPL
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "replace.h"
28 #include "system/filesys.h"
29 #include "system/select.h"
30 #include "tevent.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
39 int epoll_fd;
41 pid_t pid;
45 called when a epoll call fails, and we should fallback
46 to using select
48 static void epoll_panic(struct epoll_event_context *epoll_ev, const char *reason)
50 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
51 "%s (%s) - calling abort()\n", reason, strerror(errno));
52 abort();
56 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
58 static uint32_t epoll_map_flags(uint16_t flags)
60 uint32_t ret = 0;
61 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
62 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
63 return ret;
67 free the epoll fd
69 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
71 close(epoll_ev->epoll_fd);
72 epoll_ev->epoll_fd = -1;
73 return 0;
77 init the epoll fd
79 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
81 epoll_ev->epoll_fd = epoll_create(64);
82 epoll_ev->pid = getpid();
83 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
84 if (epoll_ev->epoll_fd == -1) {
85 return -1;
87 return 0;
90 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
93 reopen the epoll handle when our pid changes
94 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
95 demonstration of why this is needed
97 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
99 struct tevent_fd *fde;
101 if (epoll_ev->pid == getpid()) {
102 return;
105 close(epoll_ev->epoll_fd);
106 epoll_ev->epoll_fd = epoll_create(64);
107 if (epoll_ev->epoll_fd == -1) {
108 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
109 "Failed to recreate epoll handle after fork\n");
110 return;
112 epoll_ev->pid = getpid();
113 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
114 epoll_add_event(epoll_ev, fde);
118 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
119 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
120 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
123 add the epoll event to the given fd_event
125 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
127 struct epoll_event event;
129 if (epoll_ev->epoll_fd == -1) return;
131 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
133 /* if we don't want events yet, don't add an epoll_event */
134 if (fde->flags == 0) return;
136 ZERO_STRUCT(event);
137 event.events = epoll_map_flags(fde->flags);
138 event.data.ptr = fde;
139 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
140 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed");
142 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
144 /* only if we want to read we want to tell the event handler about errors */
145 if (fde->flags & TEVENT_FD_READ) {
146 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
151 delete the epoll event for given fd_event
153 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
155 struct epoll_event event;
157 if (epoll_ev->epoll_fd == -1) return;
159 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
161 /* if there's no epoll_event, we don't need to delete it */
162 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
164 ZERO_STRUCT(event);
165 event.events = epoll_map_flags(fde->flags);
166 event.data.ptr = fde;
167 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
168 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
169 "epoll_del_event failed! probable early close bug (%s)\n",
170 strerror(errno));
172 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
176 change the epoll event to the given fd_event
178 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
180 struct epoll_event event;
181 if (epoll_ev->epoll_fd == -1) return;
183 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
185 ZERO_STRUCT(event);
186 event.events = epoll_map_flags(fde->flags);
187 event.data.ptr = fde;
188 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
189 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed");
192 /* only if we want to read we want to tell the event handler about errors */
193 if (fde->flags & TEVENT_FD_READ) {
194 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
198 static void epoll_change_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
200 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
201 bool want_read = (fde->flags & TEVENT_FD_READ);
202 bool want_write= (fde->flags & TEVENT_FD_WRITE);
204 if (epoll_ev->epoll_fd == -1) return;
206 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
208 /* there's already an event */
209 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
210 if (want_read || (want_write && !got_error)) {
211 epoll_mod_event(epoll_ev, fde);
212 return;
215 * if we want to match the select behavior, we need to remove the epoll_event
216 * when the caller isn't interested in events.
218 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
220 epoll_del_event(epoll_ev, fde);
221 return;
224 /* there's no epoll_event attached to the fde */
225 if (want_read || (want_write && !got_error)) {
226 epoll_add_event(epoll_ev, fde);
227 return;
232 event loop handling using epoll
234 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
236 int ret, i;
237 #define MAXEVENTS 1
238 struct epoll_event events[MAXEVENTS];
239 int timeout = -1;
241 if (epoll_ev->epoll_fd == -1) return -1;
243 if (tvalp) {
244 /* it's better to trigger timed events a bit later than to early */
245 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
248 if (epoll_ev->ev->signal_events &&
249 tevent_common_check_signal(epoll_ev->ev)) {
250 return 0;
253 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
255 if (ret == -1 && errno == EINTR && epoll_ev->ev->signal_events) {
256 if (tevent_common_check_signal(epoll_ev->ev)) {
257 return 0;
261 if (ret == -1 && errno != EINTR) {
262 epoll_panic(epoll_ev, "epoll_wait() failed");
263 return -1;
266 if (ret == 0 && tvalp) {
267 /* we don't care about a possible delay here */
268 tevent_common_loop_timer_delay(epoll_ev->ev);
269 return 0;
272 for (i=0;i<ret;i++) {
273 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
274 struct tevent_fd);
275 uint16_t flags = 0;
277 if (fde == NULL) {
278 epoll_panic(epoll_ev, "epoll_wait() gave bad data");
279 return -1;
281 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
282 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
284 * if we only wait for TEVENT_FD_WRITE, we should not tell the
285 * event handler about it, and remove the epoll_event,
286 * as we only report errors when waiting for read events,
287 * to match the select() behavior
289 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
290 epoll_del_event(epoll_ev, fde);
291 continue;
293 flags |= TEVENT_FD_READ;
295 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
296 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
297 if (flags) {
298 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
299 break;
303 return 0;
307 create a epoll_event_context structure.
309 static int epoll_event_context_init(struct tevent_context *ev)
311 int ret;
312 struct epoll_event_context *epoll_ev;
314 epoll_ev = talloc_zero(ev, struct epoll_event_context);
315 if (!epoll_ev) return -1;
316 epoll_ev->ev = ev;
317 epoll_ev->epoll_fd = -1;
319 ret = epoll_init_ctx(epoll_ev);
320 if (ret != 0) {
321 talloc_free(epoll_ev);
322 return ret;
325 ev->additional_data = epoll_ev;
326 return 0;
330 destroy an fd_event
332 static int epoll_event_fd_destructor(struct tevent_fd *fde)
334 struct tevent_context *ev = fde->event_ctx;
335 struct epoll_event_context *epoll_ev = NULL;
337 if (ev) {
338 epoll_ev = talloc_get_type(ev->additional_data,
339 struct epoll_event_context);
341 epoll_check_reopen(epoll_ev);
343 epoll_del_event(epoll_ev, fde);
346 return tevent_common_fd_destructor(fde);
350 add a fd based event
351 return NULL on failure (memory allocation error)
353 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
354 int fd, uint16_t flags,
355 tevent_fd_handler_t handler,
356 void *private_data,
357 const char *handler_name,
358 const char *location)
360 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
361 struct epoll_event_context);
362 struct tevent_fd *fde;
364 epoll_check_reopen(epoll_ev);
366 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
367 handler, private_data,
368 handler_name, location);
369 if (!fde) return NULL;
371 talloc_set_destructor(fde, epoll_event_fd_destructor);
373 epoll_add_event(epoll_ev, fde);
375 return fde;
379 set the fd event flags
381 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
383 struct tevent_context *ev;
384 struct epoll_event_context *epoll_ev;
386 if (fde->flags == flags) return;
388 ev = fde->event_ctx;
389 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
391 fde->flags = flags;
393 epoll_check_reopen(epoll_ev);
395 epoll_change_event(epoll_ev, fde);
399 do a single event loop using the events defined in ev
401 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
403 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
404 struct epoll_event_context);
405 struct timeval tval;
407 if (ev->signal_events &&
408 tevent_common_check_signal(ev)) {
409 return 0;
412 if (ev->immediate_events &&
413 tevent_common_loop_immediate(ev)) {
414 return 0;
417 tval = tevent_common_loop_timer_delay(ev);
418 if (tevent_timeval_is_zero(&tval)) {
419 return 0;
422 epoll_check_reopen(epoll_ev);
424 return epoll_event_loop(epoll_ev, &tval);
427 static const struct tevent_ops epoll_event_ops = {
428 .context_init = epoll_event_context_init,
429 .add_fd = epoll_event_add_fd,
430 .set_fd_close_fn = tevent_common_fd_set_close_fn,
431 .get_fd_flags = tevent_common_fd_get_flags,
432 .set_fd_flags = epoll_event_set_fd_flags,
433 .add_timer = tevent_common_add_timer,
434 .schedule_immediate = tevent_common_schedule_immediate,
435 .add_signal = tevent_common_add_signal,
436 .loop_once = epoll_event_loop_once,
437 .loop_wait = tevent_common_loop_wait,
440 _PRIVATE_ bool tevent_epoll_init(void)
442 return tevent_register_backend("epoll", &epoll_event_ops);