Backup: export interfaces for extra serialization
[qemu/ar7.git] / block / linux-aio.c
blobd4e19d444cf4030fa9ae99f78e70b85c8c278984
1 /*
2 * Linux native AIO support.
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
19 #include <libaio.h>
22 * Queue size (per-device).
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
29 #define MAX_EVENTS 128
31 struct qemu_laiocb {
32 BlockAIOCB common;
33 Coroutine *co;
34 LinuxAioState *ctx;
35 struct iocb iocb;
36 ssize_t ret;
37 size_t nbytes;
38 QEMUIOVector *qiov;
39 bool is_read;
40 QSIMPLEQ_ENTRY(qemu_laiocb) next;
43 typedef struct {
44 int plugged;
45 unsigned int in_queue;
46 unsigned int in_flight;
47 bool blocked;
48 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
49 } LaioQueue;
51 struct LinuxAioState {
52 AioContext *aio_context;
54 io_context_t ctx;
55 EventNotifier e;
57 /* io queue for submit at batch */
58 LaioQueue io_q;
60 /* I/O completion processing */
61 QEMUBH *completion_bh;
62 int event_idx;
63 int event_max;
66 static void ioq_submit(LinuxAioState *s);
68 static inline ssize_t io_event_ret(struct io_event *ev)
70 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
74 * Completes an AIO request (calls the callback and frees the ACB).
76 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
78 int ret;
80 ret = laiocb->ret;
81 if (ret != -ECANCELED) {
82 if (ret == laiocb->nbytes) {
83 ret = 0;
84 } else if (ret >= 0) {
85 /* Short reads mean EOF, pad with zeros. */
86 if (laiocb->is_read) {
87 qemu_iovec_memset(laiocb->qiov, ret, 0,
88 laiocb->qiov->size - ret);
89 } else {
90 ret = -ENOSPC;
95 laiocb->ret = ret;
96 if (laiocb->co) {
97 /* Jump and continue completion for foreign requests, don't do
98 * anything for current request, it will be completed shortly. */
99 if (laiocb->co != qemu_coroutine_self()) {
100 qemu_coroutine_enter(laiocb->co);
102 } else {
103 laiocb->common.cb(laiocb->common.opaque, ret);
104 qemu_aio_unref(laiocb);
109 * aio_ring buffer which is shared between userspace and kernel.
111 * This copied from linux/fs/aio.c, common header does not exist
112 * but AIO exists for ages so we assume ABI is stable.
114 struct aio_ring {
115 unsigned id; /* kernel internal index number */
116 unsigned nr; /* number of io_events */
117 unsigned head; /* Written to by userland or by kernel. */
118 unsigned tail;
120 unsigned magic;
121 unsigned compat_features;
122 unsigned incompat_features;
123 unsigned header_length; /* size of aio_ring */
125 struct io_event io_events[0];
129 * io_getevents_peek:
130 * @ctx: AIO context
131 * @events: pointer on events array, output value
133 * Returns the number of completed events and sets a pointer
134 * on events array. This function does not update the internal
135 * ring buffer, only reads head and tail. When @events has been
136 * processed io_getevents_commit() must be called.
138 static inline unsigned int io_getevents_peek(io_context_t ctx,
139 struct io_event **events)
141 struct aio_ring *ring = (struct aio_ring *)ctx;
142 unsigned int head = ring->head, tail = ring->tail;
143 unsigned int nr;
145 nr = tail >= head ? tail - head : ring->nr - head;
146 *events = ring->io_events + head;
147 /* To avoid speculative loads of s->events[i] before observing tail.
148 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
149 smp_rmb();
151 return nr;
155 * io_getevents_commit:
156 * @ctx: AIO context
157 * @nr: the number of events on which head should be advanced
159 * Advances head of a ring buffer.
161 static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
163 struct aio_ring *ring = (struct aio_ring *)ctx;
165 if (nr) {
166 ring->head = (ring->head + nr) % ring->nr;
171 * io_getevents_advance_and_peek:
172 * @ctx: AIO context
173 * @events: pointer on events array, output value
174 * @nr: the number of events on which head should be advanced
176 * Advances head of a ring buffer and returns number of elements left.
178 static inline unsigned int
179 io_getevents_advance_and_peek(io_context_t ctx,
180 struct io_event **events,
181 unsigned int nr)
183 io_getevents_commit(ctx, nr);
184 return io_getevents_peek(ctx, events);
188 * qemu_laio_process_completions:
189 * @s: AIO state
191 * Fetches completed I/O requests and invokes their callbacks.
193 * The function is somewhat tricky because it supports nested event loops, for
194 * example when a request callback invokes aio_poll(). In order to do this,
195 * indices are kept in LinuxAioState. Function schedules BH completion so it
196 * can be called again in a nested event loop. When there are no events left
197 * to complete the BH is being canceled.
199 static void qemu_laio_process_completions(LinuxAioState *s)
201 struct io_event *events;
203 /* Reschedule so nested event loops see currently pending completions */
204 qemu_bh_schedule(s->completion_bh);
206 while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
207 s->event_idx))) {
208 for (s->event_idx = 0; s->event_idx < s->event_max; ) {
209 struct iocb *iocb = events[s->event_idx].obj;
210 struct qemu_laiocb *laiocb =
211 container_of(iocb, struct qemu_laiocb, iocb);
213 laiocb->ret = io_event_ret(&events[s->event_idx]);
215 /* Change counters one-by-one because we can be nested. */
216 s->io_q.in_flight--;
217 s->event_idx++;
218 qemu_laio_process_completion(laiocb);
222 qemu_bh_cancel(s->completion_bh);
224 /* If we are nested we have to notify the level above that we are done
225 * by setting event_max to zero, upper level will then jump out of it's
226 * own `for` loop. If we are the last all counters droped to zero. */
227 s->event_max = 0;
228 s->event_idx = 0;
231 static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
233 qemu_laio_process_completions(s);
234 if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
235 ioq_submit(s);
239 static void qemu_laio_completion_bh(void *opaque)
241 LinuxAioState *s = opaque;
243 qemu_laio_process_completions_and_submit(s);
246 static void qemu_laio_completion_cb(EventNotifier *e)
248 LinuxAioState *s = container_of(e, LinuxAioState, e);
250 if (event_notifier_test_and_clear(&s->e)) {
251 qemu_laio_process_completions_and_submit(s);
255 static void laio_cancel(BlockAIOCB *blockacb)
257 struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
258 struct io_event event;
259 int ret;
261 if (laiocb->ret != -EINPROGRESS) {
262 return;
264 ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
265 laiocb->ret = -ECANCELED;
266 if (ret != 0) {
267 /* iocb is not cancelled, cb will be called by the event loop later */
268 return;
271 laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
274 static const AIOCBInfo laio_aiocb_info = {
275 .aiocb_size = sizeof(struct qemu_laiocb),
276 .cancel_async = laio_cancel,
279 static void ioq_init(LaioQueue *io_q)
281 QSIMPLEQ_INIT(&io_q->pending);
282 io_q->plugged = 0;
283 io_q->in_queue = 0;
284 io_q->in_flight = 0;
285 io_q->blocked = false;
288 static void ioq_submit(LinuxAioState *s)
290 int ret, len;
291 struct qemu_laiocb *aiocb;
292 struct iocb *iocbs[MAX_EVENTS];
293 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
295 do {
296 if (s->io_q.in_flight >= MAX_EVENTS) {
297 break;
299 len = 0;
300 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
301 iocbs[len++] = &aiocb->iocb;
302 if (s->io_q.in_flight + len >= MAX_EVENTS) {
303 break;
307 ret = io_submit(s->ctx, len, iocbs);
308 if (ret == -EAGAIN) {
309 break;
311 if (ret < 0) {
312 /* Fail the first request, retry the rest */
313 aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
314 QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
315 s->io_q.in_queue--;
316 aiocb->ret = ret;
317 qemu_laio_process_completion(aiocb);
318 continue;
321 s->io_q.in_flight += ret;
322 s->io_q.in_queue -= ret;
323 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
324 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
325 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
326 s->io_q.blocked = (s->io_q.in_queue > 0);
328 if (s->io_q.in_flight) {
329 /* We can try to complete something just right away if there are
330 * still requests in-flight. */
331 qemu_laio_process_completions(s);
333 * Even we have completed everything (in_flight == 0), the queue can
334 * have still pended requests (in_queue > 0). We do not attempt to
335 * repeat submission to avoid IO hang. The reason is simple: s->e is
336 * still set and completion callback will be called shortly and all
337 * pended requests will be submitted from there.
342 void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
344 s->io_q.plugged++;
347 void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
349 assert(s->io_q.plugged);
350 if (--s->io_q.plugged == 0 &&
351 !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
352 ioq_submit(s);
356 static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
357 int type)
359 LinuxAioState *s = laiocb->ctx;
360 struct iocb *iocbs = &laiocb->iocb;
361 QEMUIOVector *qiov = laiocb->qiov;
363 switch (type) {
364 case QEMU_AIO_WRITE:
365 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
366 break;
367 case QEMU_AIO_READ:
368 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
369 break;
370 /* Currently Linux kernel does not support other operations */
371 default:
372 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
373 __func__, type);
374 return -EIO;
376 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
378 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
379 s->io_q.in_queue++;
380 if (!s->io_q.blocked &&
381 (!s->io_q.plugged ||
382 s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
383 ioq_submit(s);
386 return 0;
389 int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
390 uint64_t offset, QEMUIOVector *qiov, int type)
392 int ret;
393 struct qemu_laiocb laiocb = {
394 .co = qemu_coroutine_self(),
395 .nbytes = qiov->size,
396 .ctx = s,
397 .ret = -EINPROGRESS,
398 .is_read = (type == QEMU_AIO_READ),
399 .qiov = qiov,
402 ret = laio_do_submit(fd, &laiocb, offset, type);
403 if (ret < 0) {
404 return ret;
407 if (laiocb.ret == -EINPROGRESS) {
408 qemu_coroutine_yield();
410 return laiocb.ret;
413 BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
414 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
415 BlockCompletionFunc *cb, void *opaque, int type)
417 struct qemu_laiocb *laiocb;
418 off_t offset = sector_num * BDRV_SECTOR_SIZE;
419 int ret;
421 laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
422 laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
423 laiocb->ctx = s;
424 laiocb->ret = -EINPROGRESS;
425 laiocb->is_read = (type == QEMU_AIO_READ);
426 laiocb->qiov = qiov;
428 ret = laio_do_submit(fd, laiocb, offset, type);
429 if (ret < 0) {
430 qemu_aio_unref(laiocb);
431 return NULL;
434 return &laiocb->common;
437 void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
439 aio_set_event_notifier(old_context, &s->e, false, NULL);
440 qemu_bh_delete(s->completion_bh);
443 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
445 s->aio_context = new_context;
446 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
447 aio_set_event_notifier(new_context, &s->e, false,
448 qemu_laio_completion_cb);
451 LinuxAioState *laio_init(void)
453 LinuxAioState *s;
455 s = g_malloc0(sizeof(*s));
456 if (event_notifier_init(&s->e, false) < 0) {
457 goto out_free_state;
460 if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
461 goto out_close_efd;
464 ioq_init(&s->io_q);
466 return s;
468 out_close_efd:
469 event_notifier_cleanup(&s->e);
470 out_free_state:
471 g_free(s);
472 return NULL;
475 void laio_cleanup(LinuxAioState *s)
477 event_notifier_cleanup(&s->e);
479 if (io_destroy(s->ctx) != 0) {
480 fprintf(stderr, "%s: destroy AIO context %p failed\n",
481 __func__, &s->ctx);
483 g_free(s);