ipmi-bt-test: force ipv4
[qemu/kevin.git] / util / fdmon-epoll.c
blobc6413cb18fe76094214380c26b3b18c7bf2a69e9
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * epoll(7) file descriptor monitoring
4 */
6 #include "qemu/osdep.h"
7 #include <sys/epoll.h>
8 #include "qemu/rcu_queue.h"
9 #include "aio-posix.h"
11 /* The fd number threshold to switch to epoll */
12 #define EPOLL_ENABLE_THRESHOLD 64
14 void fdmon_epoll_disable(AioContext *ctx)
16 if (ctx->epollfd >= 0) {
17 close(ctx->epollfd);
18 ctx->epollfd = -1;
21 /* Switch back */
22 ctx->fdmon_ops = &fdmon_poll_ops;
25 static inline int epoll_events_from_pfd(int pfd_events)
27 return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
28 (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
29 (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
30 (pfd_events & G_IO_ERR ? EPOLLERR : 0);
33 static void fdmon_epoll_update(AioContext *ctx,
34 AioHandler *old_node,
35 AioHandler *new_node)
37 struct epoll_event event = {
38 .data.ptr = new_node,
39 .events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0,
41 int r;
43 if (!new_node) {
44 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event);
45 } else if (!old_node) {
46 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event);
47 } else {
48 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event);
51 if (r) {
52 fdmon_epoll_disable(ctx);
56 static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
57 int64_t timeout)
59 GPollFD pfd = {
60 .fd = ctx->epollfd,
61 .events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
63 AioHandler *node;
64 int i, ret = 0;
65 struct epoll_event events[128];
67 if (timeout > 0) {
68 ret = qemu_poll_ns(&pfd, 1, timeout);
69 if (ret > 0) {
70 timeout = 0;
73 if (timeout <= 0 || ret > 0) {
74 ret = epoll_wait(ctx->epollfd, events,
75 ARRAY_SIZE(events),
76 timeout);
77 if (ret <= 0) {
78 goto out;
80 for (i = 0; i < ret; i++) {
81 int ev = events[i].events;
82 int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
83 (ev & EPOLLOUT ? G_IO_OUT : 0) |
84 (ev & EPOLLHUP ? G_IO_HUP : 0) |
85 (ev & EPOLLERR ? G_IO_ERR : 0);
87 node = events[i].data.ptr;
88 aio_add_ready_handler(ready_list, node, revents);
91 out:
92 return ret;
95 static const FDMonOps fdmon_epoll_ops = {
96 .update = fdmon_epoll_update,
97 .wait = fdmon_epoll_wait,
98 .need_wait = aio_poll_disabled,
101 static bool fdmon_epoll_try_enable(AioContext *ctx)
103 AioHandler *node;
104 struct epoll_event event;
106 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
107 int r;
108 if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
109 continue;
111 event.events = epoll_events_from_pfd(node->pfd.events);
112 event.data.ptr = node;
113 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
114 if (r) {
115 return false;
119 ctx->fdmon_ops = &fdmon_epoll_ops;
120 return true;
123 bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
125 bool ok;
127 if (ctx->epollfd < 0) {
128 return false;
131 if (npfd < EPOLL_ENABLE_THRESHOLD) {
132 return false;
135 /* The list must not change while we add fds to epoll */
136 if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
137 return false;
140 ok = fdmon_epoll_try_enable(ctx);
142 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
144 if (!ok) {
145 fdmon_epoll_disable(ctx);
147 return ok;
150 void fdmon_epoll_setup(AioContext *ctx)
152 ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
153 if (ctx->epollfd == -1) {
154 fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));