2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
11 #include <sys/syscall.h>
19 struct aio_thread_req
{
22 unsigned long long offset
;
25 struct aio_context
*aio
;
28 static int aio_req_fd_r
= -1;
29 static int aio_req_fd_w
= -1;
31 #if defined(HAVE_AIO_ABI)
32 #include <linux/aio_abi.h>
34 /* If we have the headers, we are going to build with AIO enabled.
35 * If we don't have aio in libc, we define the necessary stubs here.
38 #if !defined(HAVE_AIO_LIBC)
40 static long io_setup(int n
, aio_context_t
*ctxp
)
42 return syscall(__NR_io_setup
, n
, ctxp
);
45 static long io_submit(aio_context_t ctx
, long nr
, struct iocb
**iocbpp
)
47 return syscall(__NR_io_submit
, ctx
, nr
, iocbpp
);
50 static long io_getevents(aio_context_t ctx_id
, long min_nr
, long nr
,
51 struct io_event
*events
, struct timespec
*timeout
)
53 return syscall(__NR_io_getevents
, ctx_id
, min_nr
, nr
, events
, timeout
);
58 /* The AIO_MMAP cases force the mmapped page into memory here
59 * rather than in whatever place first touches the data. I used
60 * to do this by touching the page, but that's delicate because
61 * gcc is prone to optimizing that away. So, what's done here
62 * is we read from the descriptor from which the page was
63 * mapped. The caller is required to pass an offset which is
64 * inside the page that was mapped. Thus, when the read
65 * returns, we know that the page is in the page cache, and
66 * that it now backs the mmapped area.
69 static int do_aio(aio_context_t ctx
, enum aio_type type
, int fd
, char *buf
,
70 int len
, unsigned long long offset
, struct aio_context
*aio
)
72 struct iocb iocb
, *iocbp
= &iocb
;
76 iocb
= ((struct iocb
) { .aio_data
= (unsigned long) aio
,
79 .aio_buf
= (unsigned long) buf
,
84 .aio_reserved3
= 0 });
88 iocb
.aio_lio_opcode
= IOCB_CMD_PREAD
;
89 err
= io_submit(ctx
, 1, &iocbp
);
92 iocb
.aio_lio_opcode
= IOCB_CMD_PWRITE
;
93 err
= io_submit(ctx
, 1, &iocbp
);
96 iocb
.aio_lio_opcode
= IOCB_CMD_PREAD
;
97 iocb
.aio_buf
= (unsigned long) &c
;
98 iocb
.aio_nbytes
= sizeof(c
);
99 err
= io_submit(ctx
, 1, &iocbp
);
102 printk("Bogus op in do_aio - %d\n", type
);
115 static aio_context_t ctx
= 0;
117 static int aio_thread(void *arg
)
119 struct aio_thread_reply reply
;
120 struct io_event event
;
121 int err
, n
, reply_fd
;
123 signal(SIGWINCH
, SIG_IGN
);
126 n
= io_getevents(ctx
, 1, 1, &event
, NULL
);
130 printk("aio_thread - io_getevents failed, "
131 "errno = %d\n", errno
);
134 reply
= ((struct aio_thread_reply
)
135 { .data
= (void *) (long) event
.data
,
137 reply_fd
= ((struct aio_context
*) reply
.data
)->reply_fd
;
138 err
= os_write_file(reply_fd
, &reply
, sizeof(reply
));
139 if(err
!= sizeof(reply
))
140 printk("aio_thread - write failed, fd = %d, "
141 "err = %d\n", aio_req_fd_r
, -err
);
149 static int do_not_aio(struct aio_thread_req
*req
)
156 err
= os_seek_file(req
->io_fd
, req
->offset
);
160 err
= os_read_file(req
->io_fd
, req
->buf
, req
->len
);
163 err
= os_seek_file(req
->io_fd
, req
->offset
);
167 err
= os_write_file(req
->io_fd
, req
->buf
, req
->len
);
170 err
= os_seek_file(req
->io_fd
, req
->offset
);
174 err
= os_read_file(req
->io_fd
, &c
, sizeof(c
));
177 printk("do_not_aio - bad request type : %d\n", req
->type
);
186 static int not_aio_thread(void *arg
)
188 struct aio_thread_req req
;
189 struct aio_thread_reply reply
;
192 signal(SIGWINCH
, SIG_IGN
);
194 err
= os_read_file(aio_req_fd_r
, &req
, sizeof(req
));
195 if(err
!= sizeof(req
)){
197 printk("not_aio_thread - read failed, "
198 "fd = %d, err = %d\n", aio_req_fd_r
,
201 printk("not_aio_thread - short read, fd = %d, "
202 "length = %d\n", aio_req_fd_r
, err
);
206 err
= do_not_aio(&req
);
207 reply
= ((struct aio_thread_reply
) { .data
= req
.aio
,
209 err
= os_write_file(req
.aio
->reply_fd
, &reply
, sizeof(reply
));
210 if(err
!= sizeof(reply
))
211 printk("not_aio_thread - write failed, fd = %d, "
212 "err = %d\n", aio_req_fd_r
, -err
);
216 static int aio_pid
= -1;
218 static int init_aio_24(void)
223 err
= os_pipe(fds
, 1, 1);
227 aio_req_fd_w
= fds
[0];
228 aio_req_fd_r
= fds
[1];
229 err
= run_helper_thread(not_aio_thread
, NULL
,
230 CLONE_FILES
| CLONE_VM
| SIGCHLD
, &stack
, 0);
238 os_close_file(fds
[0]);
239 os_close_file(fds
[1]);
244 printk("/usr/include/linux/aio_abi.h not present during build\n");
246 printk("2.6 host AIO support not used - falling back to I/O "
252 #define DEFAULT_24_AIO 0
253 static int init_aio_26(void)
258 if(io_setup(256, &ctx
)){
260 printk("aio_thread failed to initialize context, err = %d\n",
265 err
= run_helper_thread(aio_thread
, NULL
,
266 CLONE_FILES
| CLONE_VM
| SIGCHLD
, &stack
, 0);
272 printk("Using 2.6 host AIO\n");
276 static int submit_aio_26(enum aio_type type
, int io_fd
, char *buf
, int len
,
277 unsigned long long offset
, struct aio_context
*aio
)
279 struct aio_thread_reply reply
;
282 err
= do_aio(ctx
, type
, io_fd
, buf
, len
, offset
, aio
);
284 reply
= ((struct aio_thread_reply
) { .data
= aio
,
286 err
= os_write_file(aio
->reply_fd
, &reply
, sizeof(reply
));
287 if(err
!= sizeof(reply
))
288 printk("submit_aio_26 - write failed, "
289 "fd = %d, err = %d\n", aio
->reply_fd
, -err
);
297 #define DEFAULT_24_AIO 1
298 static int init_aio_26(void)
303 static int submit_aio_26(enum aio_type type
, int io_fd
, char *buf
, int len
,
304 unsigned long long offset
, struct aio_context
*aio
)
310 static int aio_24
= DEFAULT_24_AIO
;
312 static int __init
set_aio_24(char *name
, int *add
)
318 __uml_setup("aio=2.4", set_aio_24
,
320 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
321 " available. 2.4 AIO is a single thread that handles one request at a\n"
322 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
323 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
324 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
325 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
326 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
327 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
330 static int init_aio(void)
336 printk("Disabling 2.6 AIO in tt mode\n");
342 if(err
&& (errno
== ENOSYS
)){
343 printk("2.6 AIO not supported on the host - "
344 "reverting to 2.4 AIO\n");
351 return init_aio_24();
356 /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
357 * needs to be called when the kernel is running because it calls run_helper,
358 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
359 * kernel does not run __exitcalls on shutdown, and can't because many of them
360 * break when called outside of module unloading.
362 __initcall(init_aio
);
364 static void exit_aio(void)
367 os_kill_process(aio_pid
, 1);
370 __uml_exitcall(exit_aio
);
372 static int submit_aio_24(enum aio_type type
, int io_fd
, char *buf
, int len
,
373 unsigned long long offset
, struct aio_context
*aio
)
375 struct aio_thread_req req
= { .type
= type
,
384 err
= os_write_file(aio_req_fd_w
, &req
, sizeof(req
));
385 if(err
== sizeof(req
))
391 int submit_aio(enum aio_type type
, int io_fd
, char *buf
, int len
,
392 unsigned long long offset
, int reply_fd
,
393 struct aio_context
*aio
)
395 aio
->reply_fd
= reply_fd
;
397 return submit_aio_24(type
, io_fd
, buf
, len
, offset
, aio
);
399 return submit_aio_26(type
, io_fd
, buf
, len
, offset
, aio
);