2 * Dedicated thread for virtio-blk I/O processing
4 * Copyright 2012 IBM, Corp.
5 * Copyright 2012 Red Hat, Inc. and/or its affiliates
8 * Stefan Hajnoczi <stefanha@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
19 #include "qemu/main-loop.h"
20 #include "qemu/thread.h"
21 #include "qemu/error-report.h"
22 #include "hw/virtio/virtio-access.h"
23 #include "hw/virtio/virtio-blk.h"
24 #include "virtio-blk.h"
25 #include "block/aio.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "qom/object_interfaces.h"
29 struct VirtIOBlockDataPlane
{
35 QEMUBH
*bh
; /* bh for guest notification */
36 unsigned long *batch_notify_vqs
;
37 bool batch_notifications
;
39 /* Note that these EventNotifiers are assigned by value. This is
40 * fine as long as you do not call event_notifier_cleanup on them
41 * (because you don't own the file descriptor or handle; you just
48 /* Raise an interrupt to signal guest, if necessary */
49 void virtio_blk_data_plane_notify(VirtIOBlockDataPlane
*s
, VirtQueue
*vq
)
51 if (s
->batch_notifications
) {
52 set_bit(virtio_get_queue_index(vq
), s
->batch_notify_vqs
);
53 qemu_bh_schedule(s
->bh
);
55 virtio_notify_irqfd(s
->vdev
, vq
);
59 static void notify_guest_bh(void *opaque
)
61 VirtIOBlockDataPlane
*s
= opaque
;
62 unsigned nvqs
= s
->conf
->num_queues
;
63 unsigned long bitmap
[BITS_TO_LONGS(nvqs
)];
66 memcpy(bitmap
, s
->batch_notify_vqs
, sizeof(bitmap
));
67 memset(s
->batch_notify_vqs
, 0, sizeof(bitmap
));
69 for (j
= 0; j
< nvqs
; j
+= BITS_PER_LONG
) {
70 unsigned long bits
= bitmap
[j
/ BITS_PER_LONG
];
73 unsigned i
= j
+ ctzl(bits
);
74 VirtQueue
*vq
= virtio_get_queue(s
->vdev
, i
);
76 virtio_notify_irqfd(s
->vdev
, vq
);
78 bits
&= bits
- 1; /* clear right-most bit */
83 /* Context: QEMU global mutex held */
84 bool virtio_blk_data_plane_create(VirtIODevice
*vdev
, VirtIOBlkConf
*conf
,
85 VirtIOBlockDataPlane
**dataplane
,
88 VirtIOBlockDataPlane
*s
;
89 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
90 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
95 if (!k
->set_guest_notifiers
|| !k
->ioeventfd_assign
) {
97 "device is incompatible with iothread "
98 "(transport does not support notifiers)");
101 if (!virtio_device_ioeventfd_enabled(vdev
)) {
102 error_setg(errp
, "ioeventfd is required for iothread");
106 /* If dataplane is (re-)enabled while the guest is running there could
107 * be block jobs that can conflict.
109 if (blk_op_is_blocked(conf
->conf
.blk
, BLOCK_OP_TYPE_DATAPLANE
, errp
)) {
110 error_prepend(errp
, "cannot start virtio-blk dataplane: ");
114 /* Don't try if transport does not support notifiers. */
115 if (!virtio_device_ioeventfd_enabled(vdev
)) {
119 s
= g_new0(VirtIOBlockDataPlane
, 1);
123 if (conf
->iothread
) {
124 s
->iothread
= conf
->iothread
;
125 object_ref(OBJECT(s
->iothread
));
126 s
->ctx
= iothread_get_aio_context(s
->iothread
);
128 s
->ctx
= qemu_get_aio_context();
130 s
->bh
= aio_bh_new(s
->ctx
, notify_guest_bh
, s
);
131 s
->batch_notify_vqs
= bitmap_new(conf
->num_queues
);
138 /* Context: QEMU global mutex held */
139 void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane
*s
)
147 vblk
= VIRTIO_BLK(s
->vdev
);
148 assert(!vblk
->dataplane_started
);
149 g_free(s
->batch_notify_vqs
);
150 qemu_bh_delete(s
->bh
);
152 object_unref(OBJECT(s
->iothread
));
157 static bool virtio_blk_data_plane_handle_output(VirtIODevice
*vdev
,
160 VirtIOBlock
*s
= (VirtIOBlock
*)vdev
;
162 assert(s
->dataplane
);
163 assert(s
->dataplane_started
);
165 return virtio_blk_handle_vq(s
, vq
);
168 /* Context: QEMU global mutex held */
169 int virtio_blk_data_plane_start(VirtIODevice
*vdev
)
171 VirtIOBlock
*vblk
= VIRTIO_BLK(vdev
);
172 VirtIOBlockDataPlane
*s
= vblk
->dataplane
;
173 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vblk
)));
174 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
176 unsigned nvqs
= s
->conf
->num_queues
;
177 Error
*local_err
= NULL
;
180 if (vblk
->dataplane_started
|| s
->starting
) {
186 if (!virtio_vdev_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
)) {
187 s
->batch_notifications
= true;
189 s
->batch_notifications
= false;
192 /* Set up guest notifier (irq) */
193 r
= k
->set_guest_notifiers(qbus
->parent
, nvqs
, true);
195 error_report("virtio-blk failed to set guest notifier (%d), "
196 "ensure -accel kvm is set.", r
);
197 goto fail_guest_notifiers
;
200 /* Set up virtqueue notify */
201 for (i
= 0; i
< nvqs
; i
++) {
202 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, true);
204 fprintf(stderr
, "virtio-blk failed to set host notifier (%d)\n", r
);
206 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
207 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), i
);
209 goto fail_guest_notifiers
;
214 vblk
->dataplane_started
= true;
215 trace_virtio_blk_data_plane_start(s
);
217 r
= blk_set_aio_context(s
->conf
->conf
.blk
, s
->ctx
, &local_err
);
219 error_report_err(local_err
);
220 goto fail_guest_notifiers
;
223 /* Process queued requests before the ones in vring */
224 virtio_blk_process_queued_requests(vblk
, false);
226 /* Kick right away to begin processing requests already in vring */
227 for (i
= 0; i
< nvqs
; i
++) {
228 VirtQueue
*vq
= virtio_get_queue(s
->vdev
, i
);
230 event_notifier_set(virtio_queue_get_host_notifier(vq
));
233 /* Get this show started by hooking up our callbacks */
234 aio_context_acquire(s
->ctx
);
235 for (i
= 0; i
< nvqs
; i
++) {
236 VirtQueue
*vq
= virtio_get_queue(s
->vdev
, i
);
238 virtio_queue_aio_set_host_notifier_handler(vq
, s
->ctx
,
239 virtio_blk_data_plane_handle_output
);
241 aio_context_release(s
->ctx
);
244 fail_guest_notifiers
:
246 * If we failed to set up the guest notifiers queued requests will be
247 * processed on the main context.
249 virtio_blk_process_queued_requests(vblk
, false);
250 vblk
->dataplane_disabled
= true;
252 vblk
->dataplane_started
= true;
256 /* Stop notifications for new requests from guest.
258 * Context: BH in IOThread
260 static void virtio_blk_data_plane_stop_bh(void *opaque
)
262 VirtIOBlockDataPlane
*s
= opaque
;
265 for (i
= 0; i
< s
->conf
->num_queues
; i
++) {
266 VirtQueue
*vq
= virtio_get_queue(s
->vdev
, i
);
268 virtio_queue_aio_set_host_notifier_handler(vq
, s
->ctx
, NULL
);
272 /* Context: QEMU global mutex held */
273 void virtio_blk_data_plane_stop(VirtIODevice
*vdev
)
275 VirtIOBlock
*vblk
= VIRTIO_BLK(vdev
);
276 VirtIOBlockDataPlane
*s
= vblk
->dataplane
;
277 BusState
*qbus
= qdev_get_parent_bus(DEVICE(vblk
));
278 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
280 unsigned nvqs
= s
->conf
->num_queues
;
282 if (!vblk
->dataplane_started
|| s
->stopping
) {
286 /* Better luck next time. */
287 if (vblk
->dataplane_disabled
) {
288 vblk
->dataplane_disabled
= false;
289 vblk
->dataplane_started
= false;
293 trace_virtio_blk_data_plane_stop(s
);
295 aio_context_acquire(s
->ctx
);
296 aio_wait_bh_oneshot(s
->ctx
, virtio_blk_data_plane_stop_bh
, s
);
298 /* Drain and try to switch bs back to the QEMU main loop. If other users
299 * keep the BlockBackend in the iothread, that's ok */
300 blk_set_aio_context(s
->conf
->conf
.blk
, qemu_get_aio_context(), NULL
);
302 aio_context_release(s
->ctx
);
304 for (i
= 0; i
< nvqs
; i
++) {
305 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
306 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), i
);
309 qemu_bh_cancel(s
->bh
);
310 notify_guest_bh(s
); /* final chance to notify guest */
312 /* Clean up guest notifier (irq) */
313 k
->set_guest_notifiers(qbus
->parent
, nvqs
, false);
315 vblk
->dataplane_started
= false;