2 * Use the io_uring of Linux (>= 5.1)
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
6 * Copyright (C) Stefan Metzmacher 2019
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * liburing.h only needs a forward declaration
29 * If struct open_how is defined in liburing/compat.h
30 * itself, hide it away in order to avoid conflicts
31 * with including linux/openat2.h or defining 'struct open_how'
35 #ifdef HAVE_STRUCT_OPEN_HOW_LIBURING_COMPAT_H
36 #define open_how __ignore_liburing_compat_h_open_how
37 #include <liburing/compat.h>
39 #endif /* HAVE_STRUCT_OPEN_HOW_LIBURING_COMPAT_H */
42 #include "system/filesys.h"
43 #include "smbd/smbd.h"
44 #include "smbd/globals.h"
45 #include "lib/util/tevent_unix.h"
46 #include "lib/util/sys_rw.h"
47 #include "lib/util/iov_buf.h"
48 #include "smbprofile.h"
51 struct vfs_io_uring_request
;
53 struct vfs_io_uring_config
{
54 struct io_uring uring
;
55 struct tevent_fd
*fde
;
56 /* recursion guard. See comment above vfs_io_uring_queue_run() */
58 /* recursion guard. See comment above vfs_io_uring_queue_run() */
60 struct vfs_io_uring_request
*queue
;
61 struct vfs_io_uring_request
*pending
;
64 struct vfs_io_uring_request
{
65 struct vfs_io_uring_request
*prev
, *next
;
66 struct vfs_io_uring_request
**list_head
;
67 struct vfs_io_uring_config
*config
;
68 struct tevent_req
*req
;
69 void (*completion_fn
)(struct vfs_io_uring_request
*cur
,
70 const char *location
);
71 struct timespec start_time
;
72 struct timespec end_time
;
73 SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes
);
74 struct io_uring_sqe sqe
;
75 struct io_uring_cqe cqe
;
78 static void vfs_io_uring_finish_req(struct vfs_io_uring_request
*cur
,
79 const struct io_uring_cqe
*cqe
,
80 struct timespec end_time
,
83 struct tevent_req
*req
=
84 talloc_get_type_abort(cur
->req
,
86 void *state
= _tevent_req_data(req
);
88 talloc_set_destructor(state
, NULL
);
89 if (cur
->list_head
!= NULL
) {
90 DLIST_REMOVE((*cur
->list_head
), cur
);
91 cur
->list_head
= NULL
;
95 SMBPROFILE_BYTES_ASYNC_SET_IDLE(cur
->profile_bytes
);
96 cur
->end_time
= end_time
;
99 * We rely on being inside the _send() function
100 * or tevent_req_defer_callback() being called
103 cur
->completion_fn(cur
, location
);
106 static void vfs_io_uring_config_destroy(struct vfs_io_uring_config
*config
,
108 const char *location
)
110 struct vfs_io_uring_request
*cur
= NULL
, *next
= NULL
;
111 struct timespec start_time
;
112 struct timespec end_time
;
113 struct io_uring_cqe err_cqe
= {
117 PROFILE_TIMESTAMP(&start_time
);
119 if (config
->uring
.ring_fd
!= -1) {
120 /* TODO: cancel queued and pending requests */
121 TALLOC_FREE(config
->fde
);
122 io_uring_queue_exit(&config
->uring
);
123 config
->uring
.ring_fd
= -1;
126 PROFILE_TIMESTAMP(&end_time
);
128 for (cur
= config
->pending
; cur
!= NULL
; cur
= next
) {
130 err_cqe
.user_data
= (uintptr_t)(void *)cur
;
131 vfs_io_uring_finish_req(cur
, &err_cqe
, end_time
, location
);
134 for (cur
= config
->queue
; cur
!= NULL
; cur
= next
) {
136 err_cqe
.user_data
= (uintptr_t)(void *)cur
;
137 cur
->start_time
= start_time
;
138 vfs_io_uring_finish_req(cur
, &err_cqe
, end_time
, location
);
142 static int vfs_io_uring_config_destructor(struct vfs_io_uring_config
*config
)
144 vfs_io_uring_config_destroy(config
, -EUCLEAN
, __location__
);
148 static int vfs_io_uring_request_state_deny_destructor(void *_state
)
150 struct __vfs_io_uring_generic_state
{
151 struct vfs_io_uring_request ur
;
152 } *state
= (struct __vfs_io_uring_generic_state
*)_state
;
153 struct vfs_io_uring_request
*cur
= &state
->ur
;
155 /* our parent is gone */
158 /* remove ourself from any list */
159 DLIST_REMOVE((*cur
->list_head
), cur
);
160 cur
->list_head
= NULL
;
163 * Our state is about to go away,
164 * all we can do is shutting down the whole uring.
165 * But that's ok as we're most likely called from exit_server()
167 vfs_io_uring_config_destroy(cur
->config
, -ESHUTDOWN
, __location__
);
171 static void vfs_io_uring_fd_handler(struct tevent_context
*ev
,
172 struct tevent_fd
*fde
,
176 static int vfs_io_uring_connect(vfs_handle_struct
*handle
, const char *service
,
180 struct vfs_io_uring_config
*config
;
181 unsigned num_entries
;
185 config
= talloc_zero(handle
->conn
, struct vfs_io_uring_config
);
186 if (config
== NULL
) {
187 DEBUG(0, ("talloc_zero() failed\n"));
191 SMB_VFS_HANDLE_SET_DATA(handle
, config
,
192 NULL
, struct vfs_io_uring_config
,
195 ret
= SMB_VFS_NEXT_CONNECT(handle
, service
, user
);
200 num_entries
= lp_parm_ulong(SNUM(handle
->conn
),
204 num_entries
= MAX(num_entries
, 1);
206 sqpoll
= lp_parm_bool(SNUM(handle
->conn
),
211 flags
|= IORING_SETUP_SQPOLL
;
214 ret
= io_uring_queue_init(num_entries
, &config
->uring
, flags
);
216 SMB_VFS_NEXT_DISCONNECT(handle
);
221 talloc_set_destructor(config
, vfs_io_uring_config_destructor
);
223 #ifdef HAVE_IO_URING_RING_DONTFORK
224 ret
= io_uring_ring_dontfork(&config
->uring
);
226 SMB_VFS_NEXT_DISCONNECT(handle
);
230 #endif /* HAVE_IO_URING_RING_DONTFORK */
232 config
->fde
= tevent_add_fd(handle
->conn
->sconn
->ev_ctx
,
234 config
->uring
.ring_fd
,
236 vfs_io_uring_fd_handler
,
238 if (config
->fde
== NULL
) {
240 SMB_VFS_NEXT_DISCONNECT(handle
);
248 static void _vfs_io_uring_queue_run(struct vfs_io_uring_config
*config
)
250 struct vfs_io_uring_request
*cur
= NULL
, *next
= NULL
;
251 struct io_uring_cqe
*cqe
= NULL
;
254 struct timespec start_time
;
255 struct timespec end_time
;
258 PROFILE_TIMESTAMP(&start_time
);
260 if (config
->uring
.ring_fd
== -1) {
261 vfs_io_uring_config_destroy(config
, -ESTALE
, __location__
);
265 for (cur
= config
->queue
; cur
!= NULL
; cur
= next
) {
266 struct io_uring_sqe
*sqe
= NULL
;
267 void *state
= _tevent_req_data(cur
->req
);
271 sqe
= io_uring_get_sqe(&config
->uring
);
276 talloc_set_destructor(state
,
277 vfs_io_uring_request_state_deny_destructor
);
278 DLIST_REMOVE(config
->queue
, cur
);
280 DLIST_ADD_END(config
->pending
, cur
);
281 cur
->list_head
= &config
->pending
;
282 SMBPROFILE_BYTES_ASYNC_SET_BUSY(cur
->profile_bytes
);
284 cur
->start_time
= start_time
;
287 ret
= io_uring_submit(&config
->uring
);
288 if (ret
== -EAGAIN
|| ret
== -EBUSY
) {
289 /* We just retry later */
290 } else if (ret
< 0) {
291 vfs_io_uring_config_destroy(config
, ret
, __location__
);
295 PROFILE_TIMESTAMP(&end_time
);
297 io_uring_for_each_cqe(&config
->uring
, cqhead
, cqe
) {
298 cur
= (struct vfs_io_uring_request
*)io_uring_cqe_get_data(cqe
);
299 vfs_io_uring_finish_req(cur
, cqe
, end_time
, __location__
);
303 io_uring_cq_advance(&config
->uring
, nr
);
307 * Wrapper function to prevent recursion which could happen
308 * if we called _vfs_io_uring_queue_run() directly without
311 * Looking at the pread call, we can have:
313 * vfs_io_uring_pread_send()
314 * ->vfs_io_uring_pread_submit() <-----------------------------------
315 * ->vfs_io_uring_request_submit() |
316 * ->vfs_io_uring_queue_run() |
317 * ->_vfs_io_uring_queue_run() |
319 * But inside _vfs_io_uring_queue_run() looks like: |
321 * _vfs_io_uring_queue_run() { |
322 * if (THIS_IO_COMPLETED) { |
323 * ->vfs_io_uring_finish_req() |
324 * ->cur->completion_fn() |
328 * cur->completion_fn() for pread is set to vfs_io_uring_pread_completion() |
330 * vfs_io_uring_pread_completion() { |
331 * if (READ_TERMINATED) { |
332 * -> tevent_req_done() - We're done, go back up the stack. |
336 * We have a short read - adjust the io vectors |
338 * ->vfs_io_uring_pread_submit() ---------------------------------------
341 * So before calling _vfs_io_uring_queue_run() we backet it with setting
342 * a flag config->busy, and unset it once _vfs_io_uring_queue_run() finally
343 * exits the retry loop.
345 * If we end up back into vfs_io_uring_queue_run() we notice we've done so
346 * as config->busy is set and don't recurse into _vfs_io_uring_queue_run().
348 * We set the second flag config->need_retry that tells us to loop in the
349 * vfs_io_uring_queue_run() call above us in the stack and return.
351 * When the outer call to _vfs_io_uring_queue_run() returns we are in
352 * a loop checking if config->need_retry was set. That happens if
353 * the short read case occurs and _vfs_io_uring_queue_run() ended up
354 * recursing into vfs_io_uring_queue_run().
356 * Once vfs_io_uring_pread_completion() finishes without a short
357 * read (the READ_TERMINATED case, tevent_req_done() is called)
358 * then config->need_retry is left as false, we exit the loop,
359 * set config->busy to false so the next top level call into
360 * vfs_io_uring_queue_run() won't think it's a recursed call
365 static void vfs_io_uring_queue_run(struct vfs_io_uring_config
*config
)
369 * We've recursed due to short read/write.
370 * Set need_retry to ensure we retry the
373 config
->need_retry
= true;
378 * Bracket the loop calling _vfs_io_uring_queue_run()
379 * with busy = true / busy = false.
380 * so we can detect recursion above.
386 config
->need_retry
= false;
387 _vfs_io_uring_queue_run(config
);
388 } while (config
->need_retry
);
390 config
->busy
= false;
393 static void vfs_io_uring_request_submit(struct vfs_io_uring_request
*cur
)
395 struct vfs_io_uring_config
*config
= cur
->config
;
397 io_uring_sqe_set_data(&cur
->sqe
, cur
);
398 DLIST_ADD_END(config
->queue
, cur
);
399 cur
->list_head
= &config
->queue
;
401 vfs_io_uring_queue_run(config
);
404 static void vfs_io_uring_fd_handler(struct tevent_context
*ev
,
405 struct tevent_fd
*fde
,
409 vfs_handle_struct
*handle
= (vfs_handle_struct
*)private_data
;
410 struct vfs_io_uring_config
*config
= NULL
;
412 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
413 struct vfs_io_uring_config
,
414 smb_panic(__location__
));
416 vfs_io_uring_queue_run(config
);
419 struct vfs_io_uring_pread_state
{
420 struct files_struct
*fsp
;
424 struct vfs_io_uring_request ur
;
427 static void vfs_io_uring_pread_submit(struct vfs_io_uring_pread_state
*state
);
428 static void vfs_io_uring_pread_completion(struct vfs_io_uring_request
*cur
,
429 const char *location
);
431 static struct tevent_req
*vfs_io_uring_pread_send(struct vfs_handle_struct
*handle
,
433 struct tevent_context
*ev
,
434 struct files_struct
*fsp
,
436 size_t n
, off_t offset
)
438 struct tevent_req
*req
= NULL
;
439 struct vfs_io_uring_pread_state
*state
= NULL
;
440 struct vfs_io_uring_config
*config
= NULL
;
443 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
444 struct vfs_io_uring_config
,
445 smb_panic(__location__
));
447 req
= tevent_req_create(mem_ctx
, &state
,
448 struct vfs_io_uring_pread_state
);
452 state
->ur
.config
= config
;
454 state
->ur
.completion_fn
= vfs_io_uring_pread_completion
;
456 SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pread
, profile_p
,
457 state
->ur
.profile_bytes
, n
);
458 SMBPROFILE_BYTES_ASYNC_SET_IDLE(state
->ur
.profile_bytes
);
460 ok
= sys_valid_io_range(offset
, n
);
462 tevent_req_error(req
, EINVAL
);
463 return tevent_req_post(req
, ev
);
467 state
->offset
= offset
;
468 state
->iov
.iov_base
= (void *)data
;
469 state
->iov
.iov_len
= n
;
470 vfs_io_uring_pread_submit(state
);
472 if (!tevent_req_is_in_progress(req
)) {
473 return tevent_req_post(req
, ev
);
476 tevent_req_defer_callback(req
, ev
);
480 static void vfs_io_uring_pread_submit(struct vfs_io_uring_pread_state
*state
)
482 io_uring_prep_readv(&state
->ur
.sqe
,
483 fsp_get_io_fd(state
->fsp
),
486 vfs_io_uring_request_submit(&state
->ur
);
489 static void vfs_io_uring_pread_completion(struct vfs_io_uring_request
*cur
,
490 const char *location
)
492 struct vfs_io_uring_pread_state
*state
= tevent_req_data(
493 cur
->req
, struct vfs_io_uring_pread_state
);
494 struct iovec
*iov
= &state
->iov
;
499 * We rely on being inside the _send() function
500 * or tevent_req_defer_callback() being called
504 if (cur
->cqe
.res
< 0) {
505 int err
= -cur
->cqe
.res
;
506 _tevent_req_error(cur
->req
, err
, location
);
510 if (cur
->cqe
.res
== 0) {
512 * We reached EOF, we're done
514 tevent_req_done(cur
->req
);
518 ok
= iov_advance(&iov
, &num_iov
, cur
->cqe
.res
);
520 /* This is not expected! */
521 DBG_ERR("iov_advance() failed cur->cqe.res=%d > iov_len=%d\n",
523 (int)state
->iov
.iov_len
);
524 tevent_req_error(cur
->req
, EIO
);
528 /* sys_valid_io_range() already checked the boundaries */
529 state
->nread
+= state
->ur
.cqe
.res
;
532 tevent_req_done(cur
->req
);
537 * sys_valid_io_range() already checked the boundaries
538 * now try to get the rest.
540 state
->offset
+= state
->ur
.cqe
.res
;
541 vfs_io_uring_pread_submit(state
);
544 static ssize_t
vfs_io_uring_pread_recv(struct tevent_req
*req
,
545 struct vfs_aio_state
*vfs_aio_state
)
547 struct vfs_io_uring_pread_state
*state
= tevent_req_data(
548 req
, struct vfs_io_uring_pread_state
);
551 SMBPROFILE_BYTES_ASYNC_END(state
->ur
.profile_bytes
);
552 vfs_aio_state
->duration
= nsec_time_diff(&state
->ur
.end_time
,
553 &state
->ur
.start_time
);
555 if (tevent_req_is_unix_error(req
, &vfs_aio_state
->error
)) {
556 tevent_req_received(req
);
560 vfs_aio_state
->error
= 0;
563 tevent_req_received(req
);
567 struct vfs_io_uring_pwrite_state
{
568 struct files_struct
*fsp
;
572 struct vfs_io_uring_request ur
;
575 static void vfs_io_uring_pwrite_submit(struct vfs_io_uring_pwrite_state
*state
);
576 static void vfs_io_uring_pwrite_completion(struct vfs_io_uring_request
*cur
,
577 const char *location
);
579 static struct tevent_req
*vfs_io_uring_pwrite_send(struct vfs_handle_struct
*handle
,
581 struct tevent_context
*ev
,
582 struct files_struct
*fsp
,
584 size_t n
, off_t offset
)
586 struct tevent_req
*req
= NULL
;
587 struct vfs_io_uring_pwrite_state
*state
= NULL
;
588 struct vfs_io_uring_config
*config
= NULL
;
591 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
592 struct vfs_io_uring_config
,
593 smb_panic(__location__
));
595 req
= tevent_req_create(mem_ctx
, &state
,
596 struct vfs_io_uring_pwrite_state
);
600 state
->ur
.config
= config
;
602 state
->ur
.completion_fn
= vfs_io_uring_pwrite_completion
;
604 SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pwrite
, profile_p
,
605 state
->ur
.profile_bytes
, n
);
606 SMBPROFILE_BYTES_ASYNC_SET_IDLE(state
->ur
.profile_bytes
);
608 ok
= sys_valid_io_range(offset
, n
);
610 tevent_req_error(req
, EINVAL
);
611 return tevent_req_post(req
, ev
);
615 state
->offset
= offset
;
616 state
->iov
.iov_base
= discard_const(data
);
617 state
->iov
.iov_len
= n
;
618 vfs_io_uring_pwrite_submit(state
);
620 if (!tevent_req_is_in_progress(req
)) {
621 return tevent_req_post(req
, ev
);
624 tevent_req_defer_callback(req
, ev
);
628 static void vfs_io_uring_pwrite_submit(struct vfs_io_uring_pwrite_state
*state
)
630 io_uring_prep_writev(&state
->ur
.sqe
,
631 fsp_get_io_fd(state
->fsp
),
634 vfs_io_uring_request_submit(&state
->ur
);
637 static void vfs_io_uring_pwrite_completion(struct vfs_io_uring_request
*cur
,
638 const char *location
)
640 struct vfs_io_uring_pwrite_state
*state
= tevent_req_data(
641 cur
->req
, struct vfs_io_uring_pwrite_state
);
642 struct iovec
*iov
= &state
->iov
;
647 * We rely on being inside the _send() function
648 * or tevent_req_defer_callback() being called
652 if (cur
->cqe
.res
< 0) {
653 int err
= -cur
->cqe
.res
;
654 _tevent_req_error(cur
->req
, err
, location
);
658 if (cur
->cqe
.res
== 0) {
660 * Ensure we can never spin.
662 tevent_req_error(cur
->req
, ENOSPC
);
666 ok
= iov_advance(&iov
, &num_iov
, cur
->cqe
.res
);
668 /* This is not expected! */
669 DBG_ERR("iov_advance() failed cur->cqe.res=%d > iov_len=%d\n",
671 (int)state
->iov
.iov_len
);
672 tevent_req_error(cur
->req
, EIO
);
676 /* sys_valid_io_range() already checked the boundaries */
677 state
->nwritten
+= state
->ur
.cqe
.res
;
680 tevent_req_done(cur
->req
);
685 * sys_valid_io_range() already checked the boundaries
686 * now try to write the rest.
688 state
->offset
+= state
->ur
.cqe
.res
;
689 vfs_io_uring_pwrite_submit(state
);
692 static ssize_t
vfs_io_uring_pwrite_recv(struct tevent_req
*req
,
693 struct vfs_aio_state
*vfs_aio_state
)
695 struct vfs_io_uring_pwrite_state
*state
= tevent_req_data(
696 req
, struct vfs_io_uring_pwrite_state
);
699 SMBPROFILE_BYTES_ASYNC_END(state
->ur
.profile_bytes
);
700 vfs_aio_state
->duration
= nsec_time_diff(&state
->ur
.end_time
,
701 &state
->ur
.start_time
);
703 if (tevent_req_is_unix_error(req
, &vfs_aio_state
->error
)) {
704 tevent_req_received(req
);
708 vfs_aio_state
->error
= 0;
709 ret
= state
->nwritten
;
711 tevent_req_received(req
);
715 struct vfs_io_uring_fsync_state
{
716 struct vfs_io_uring_request ur
;
719 static void vfs_io_uring_fsync_completion(struct vfs_io_uring_request
*cur
,
720 const char *location
);
722 static struct tevent_req
*vfs_io_uring_fsync_send(struct vfs_handle_struct
*handle
,
724 struct tevent_context
*ev
,
725 struct files_struct
*fsp
)
727 struct tevent_req
*req
= NULL
;
728 struct vfs_io_uring_fsync_state
*state
= NULL
;
729 struct vfs_io_uring_config
*config
= NULL
;
731 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
732 struct vfs_io_uring_config
,
733 smb_panic(__location__
));
735 req
= tevent_req_create(mem_ctx
, &state
,
736 struct vfs_io_uring_fsync_state
);
740 state
->ur
.config
= config
;
742 state
->ur
.completion_fn
= vfs_io_uring_fsync_completion
;
744 SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync
, profile_p
,
745 state
->ur
.profile_bytes
, 0);
746 SMBPROFILE_BYTES_ASYNC_SET_IDLE(state
->ur
.profile_bytes
);
748 io_uring_prep_fsync(&state
->ur
.sqe
,
750 0); /* fsync_flags */
751 vfs_io_uring_request_submit(&state
->ur
);
753 if (!tevent_req_is_in_progress(req
)) {
754 return tevent_req_post(req
, ev
);
757 tevent_req_defer_callback(req
, ev
);
761 static void vfs_io_uring_fsync_completion(struct vfs_io_uring_request
*cur
,
762 const char *location
)
765 * We rely on being inside the _send() function
766 * or tevent_req_defer_callback() being called
770 if (cur
->cqe
.res
< 0) {
771 int err
= -cur
->cqe
.res
;
772 _tevent_req_error(cur
->req
, err
, location
);
776 if (cur
->cqe
.res
> 0) {
777 /* This is not expected! */
778 DBG_ERR("got cur->cqe.res=%d\n", (int)cur
->cqe
.res
);
779 tevent_req_error(cur
->req
, EIO
);
783 tevent_req_done(cur
->req
);
786 static int vfs_io_uring_fsync_recv(struct tevent_req
*req
,
787 struct vfs_aio_state
*vfs_aio_state
)
789 struct vfs_io_uring_fsync_state
*state
= tevent_req_data(
790 req
, struct vfs_io_uring_fsync_state
);
792 SMBPROFILE_BYTES_ASYNC_END(state
->ur
.profile_bytes
);
793 vfs_aio_state
->duration
= nsec_time_diff(&state
->ur
.end_time
,
794 &state
->ur
.start_time
);
796 if (tevent_req_is_unix_error(req
, &vfs_aio_state
->error
)) {
797 tevent_req_received(req
);
801 vfs_aio_state
->error
= 0;
803 tevent_req_received(req
);
807 static struct vfs_fn_pointers vfs_io_uring_fns
= {
808 .connect_fn
= vfs_io_uring_connect
,
809 .pread_send_fn
= vfs_io_uring_pread_send
,
810 .pread_recv_fn
= vfs_io_uring_pread_recv
,
811 .pwrite_send_fn
= vfs_io_uring_pwrite_send
,
812 .pwrite_recv_fn
= vfs_io_uring_pwrite_recv
,
813 .fsync_send_fn
= vfs_io_uring_fsync_send
,
814 .fsync_recv_fn
= vfs_io_uring_fsync_recv
,
818 NTSTATUS
vfs_io_uring_init(TALLOC_CTX
*ctx
)
820 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION
,
821 "io_uring", &vfs_io_uring_fns
);