1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * epoll(7) file descriptor monitoring
6 #include "qemu/osdep.h"
8 #include "qemu/rcu_queue.h"
11 /* The fd number threshold to switch to epoll */
12 #define EPOLL_ENABLE_THRESHOLD 64
14 void fdmon_epoll_disable(AioContext
*ctx
)
16 if (ctx
->epollfd
>= 0) {
22 ctx
->fdmon_ops
= &fdmon_poll_ops
;
25 static inline int epoll_events_from_pfd(int pfd_events
)
27 return (pfd_events
& G_IO_IN
? EPOLLIN
: 0) |
28 (pfd_events
& G_IO_OUT
? EPOLLOUT
: 0) |
29 (pfd_events
& G_IO_HUP
? EPOLLHUP
: 0) |
30 (pfd_events
& G_IO_ERR
? EPOLLERR
: 0);
33 static void fdmon_epoll_update(AioContext
*ctx
,
37 struct epoll_event event
= {
39 .events
= new_node
? epoll_events_from_pfd(new_node
->pfd
.events
) : 0,
44 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_DEL
, old_node
->pfd
.fd
, &event
);
45 } else if (!old_node
) {
46 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, new_node
->pfd
.fd
, &event
);
48 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_MOD
, new_node
->pfd
.fd
, &event
);
52 fdmon_epoll_disable(ctx
);
56 static int fdmon_epoll_wait(AioContext
*ctx
, AioHandlerList
*ready_list
,
61 .events
= G_IO_IN
| G_IO_OUT
| G_IO_HUP
| G_IO_ERR
,
65 struct epoll_event events
[128];
67 /* Fall back while external clients are disabled */
68 if (atomic_read(&ctx
->external_disable_cnt
)) {
69 return fdmon_poll_ops
.wait(ctx
, ready_list
, timeout
);
73 ret
= qemu_poll_ns(&pfd
, 1, timeout
);
78 if (timeout
<= 0 || ret
> 0) {
79 ret
= epoll_wait(ctx
->epollfd
, events
,
85 for (i
= 0; i
< ret
; i
++) {
86 int ev
= events
[i
].events
;
87 int revents
= (ev
& EPOLLIN
? G_IO_IN
: 0) |
88 (ev
& EPOLLOUT
? G_IO_OUT
: 0) |
89 (ev
& EPOLLHUP
? G_IO_HUP
: 0) |
90 (ev
& EPOLLERR
? G_IO_ERR
: 0);
92 node
= events
[i
].data
.ptr
;
93 aio_add_ready_handler(ready_list
, node
, revents
);
100 static const FDMonOps fdmon_epoll_ops
= {
101 .update
= fdmon_epoll_update
,
102 .wait
= fdmon_epoll_wait
,
103 .need_wait
= aio_poll_disabled
,
106 static bool fdmon_epoll_try_enable(AioContext
*ctx
)
109 struct epoll_event event
;
111 QLIST_FOREACH_RCU(node
, &ctx
->aio_handlers
, node
) {
113 if (QLIST_IS_INSERTED(node
, node_deleted
) || !node
->pfd
.events
) {
116 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
117 event
.data
.ptr
= node
;
118 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
124 ctx
->fdmon_ops
= &fdmon_epoll_ops
;
128 bool fdmon_epoll_try_upgrade(AioContext
*ctx
, unsigned npfd
)
130 if (ctx
->epollfd
< 0) {
134 /* Do not upgrade while external clients are disabled */
135 if (atomic_read(&ctx
->external_disable_cnt
)) {
139 if (npfd
>= EPOLL_ENABLE_THRESHOLD
) {
140 if (fdmon_epoll_try_enable(ctx
)) {
143 fdmon_epoll_disable(ctx
);
149 void fdmon_epoll_setup(AioContext
*ctx
)
151 ctx
->epollfd
= epoll_create1(EPOLL_CLOEXEC
);
152 if (ctx
->epollfd
== -1) {
153 fprintf(stderr
, "Failed to create epoll instance: %s", strerror(errno
));