target/arm: Deliver BKPT/BRK exceptions to correct exception level
[qemu/ar7.git] / hw / block / dataplane / virtio-blk.c
blob158c78f85200e50e5dd403d47d953bf1ae26a8ff
1 /*
2 * Dedicated thread for virtio-blk I/O processing
4 * Copyright 2012 IBM, Corp.
5 * Copyright 2012 Red Hat, Inc. and/or its affiliates
7 * Authors:
8 * Stefan Hajnoczi <stefanha@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "trace.h"
18 #include "qemu/iov.h"
19 #include "qemu/thread.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio-access.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "virtio-blk.h"
24 #include "block/aio.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "qom/object_interfaces.h"
28 struct VirtIOBlockDataPlane {
29 bool starting;
30 bool stopping;
32 VirtIOBlkConf *conf;
33 VirtIODevice *vdev;
34 QEMUBH *bh; /* bh for guest notification */
35 unsigned long *batch_notify_vqs;
36 bool batch_notifications;
38 /* Note that these EventNotifiers are assigned by value. This is
39 * fine as long as you do not call event_notifier_cleanup on them
40 * (because you don't own the file descriptor or handle; you just
41 * use it).
43 IOThread *iothread;
44 AioContext *ctx;
47 /* Raise an interrupt to signal guest, if necessary */
48 void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
50 if (s->batch_notifications) {
51 set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
52 qemu_bh_schedule(s->bh);
53 } else {
54 virtio_notify_irqfd(s->vdev, vq);
58 static void notify_guest_bh(void *opaque)
60 VirtIOBlockDataPlane *s = opaque;
61 unsigned nvqs = s->conf->num_queues;
62 unsigned long bitmap[BITS_TO_LONGS(nvqs)];
63 unsigned j;
65 memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
66 memset(s->batch_notify_vqs, 0, sizeof(bitmap));
68 for (j = 0; j < nvqs; j += BITS_PER_LONG) {
69 unsigned long bits = bitmap[j];
71 while (bits != 0) {
72 unsigned i = j + ctzl(bits);
73 VirtQueue *vq = virtio_get_queue(s->vdev, i);
75 virtio_notify_irqfd(s->vdev, vq);
77 bits &= bits - 1; /* clear right-most bit */
82 /* Context: QEMU global mutex held */
83 bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
84 VirtIOBlockDataPlane **dataplane,
85 Error **errp)
87 VirtIOBlockDataPlane *s;
88 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
89 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
91 *dataplane = NULL;
93 if (conf->iothread) {
94 if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
95 error_setg(errp,
96 "device is incompatible with iothread "
97 "(transport does not support notifiers)");
98 return false;
100 if (!virtio_device_ioeventfd_enabled(vdev)) {
101 error_setg(errp, "ioeventfd is required for iothread");
102 return false;
105 /* If dataplane is (re-)enabled while the guest is running there could
106 * be block jobs that can conflict.
108 if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
109 error_prepend(errp, "cannot start virtio-blk dataplane: ");
110 return false;
113 /* Don't try if transport does not support notifiers. */
114 if (!virtio_device_ioeventfd_enabled(vdev)) {
115 return false;
118 s = g_new0(VirtIOBlockDataPlane, 1);
119 s->vdev = vdev;
120 s->conf = conf;
122 if (conf->iothread) {
123 s->iothread = conf->iothread;
124 object_ref(OBJECT(s->iothread));
125 s->ctx = iothread_get_aio_context(s->iothread);
126 } else {
127 s->ctx = qemu_get_aio_context();
129 s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
130 s->batch_notify_vqs = bitmap_new(conf->num_queues);
132 *dataplane = s;
134 return true;
137 /* Context: QEMU global mutex held */
138 void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
140 VirtIOBlock *vblk;
142 if (!s) {
143 return;
146 vblk = VIRTIO_BLK(s->vdev);
147 assert(!vblk->dataplane_started);
148 g_free(s->batch_notify_vqs);
149 qemu_bh_delete(s->bh);
150 if (s->iothread) {
151 object_unref(OBJECT(s->iothread));
153 g_free(s);
156 static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
157 VirtQueue *vq)
159 VirtIOBlock *s = (VirtIOBlock *)vdev;
161 assert(s->dataplane);
162 assert(s->dataplane_started);
164 return virtio_blk_handle_vq(s, vq);
167 /* Context: QEMU global mutex held */
168 int virtio_blk_data_plane_start(VirtIODevice *vdev)
170 VirtIOBlock *vblk = VIRTIO_BLK(vdev);
171 VirtIOBlockDataPlane *s = vblk->dataplane;
172 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
173 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
174 unsigned i;
175 unsigned nvqs = s->conf->num_queues;
176 Error *local_err = NULL;
177 int r;
179 if (vblk->dataplane_started || s->starting) {
180 return 0;
183 s->starting = true;
185 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
186 s->batch_notifications = true;
187 } else {
188 s->batch_notifications = false;
191 /* Set up guest notifier (irq) */
192 r = k->set_guest_notifiers(qbus->parent, nvqs, true);
193 if (r != 0) {
194 error_report("virtio-blk failed to set guest notifier (%d), "
195 "ensure -accel kvm is set.", r);
196 goto fail_guest_notifiers;
199 /* Set up virtqueue notify */
200 for (i = 0; i < nvqs; i++) {
201 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
202 if (r != 0) {
203 fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
204 while (i--) {
205 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
206 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
208 goto fail_guest_notifiers;
212 s->starting = false;
213 vblk->dataplane_started = true;
214 trace_virtio_blk_data_plane_start(s);
216 r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
217 if (r < 0) {
218 error_report_err(local_err);
219 goto fail_guest_notifiers;
222 /* Kick right away to begin processing requests already in vring */
223 for (i = 0; i < nvqs; i++) {
224 VirtQueue *vq = virtio_get_queue(s->vdev, i);
226 event_notifier_set(virtio_queue_get_host_notifier(vq));
229 /* Get this show started by hooking up our callbacks */
230 aio_context_acquire(s->ctx);
231 for (i = 0; i < nvqs; i++) {
232 VirtQueue *vq = virtio_get_queue(s->vdev, i);
234 virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
235 virtio_blk_data_plane_handle_output);
237 aio_context_release(s->ctx);
238 return 0;
240 fail_guest_notifiers:
241 vblk->dataplane_disabled = true;
242 s->starting = false;
243 vblk->dataplane_started = true;
244 return -ENOSYS;
247 /* Stop notifications for new requests from guest.
249 * Context: BH in IOThread
251 static void virtio_blk_data_plane_stop_bh(void *opaque)
253 VirtIOBlockDataPlane *s = opaque;
254 unsigned i;
256 for (i = 0; i < s->conf->num_queues; i++) {
257 VirtQueue *vq = virtio_get_queue(s->vdev, i);
259 virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
263 /* Context: QEMU global mutex held */
264 void virtio_blk_data_plane_stop(VirtIODevice *vdev)
266 VirtIOBlock *vblk = VIRTIO_BLK(vdev);
267 VirtIOBlockDataPlane *s = vblk->dataplane;
268 BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
269 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
270 unsigned i;
271 unsigned nvqs = s->conf->num_queues;
273 if (!vblk->dataplane_started || s->stopping) {
274 return;
277 /* Better luck next time. */
278 if (vblk->dataplane_disabled) {
279 vblk->dataplane_disabled = false;
280 vblk->dataplane_started = false;
281 return;
283 s->stopping = true;
284 trace_virtio_blk_data_plane_stop(s);
286 aio_context_acquire(s->ctx);
287 aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
289 /* Drain and try to switch bs back to the QEMU main loop. If other users
290 * keep the BlockBackend in the iothread, that's ok */
291 blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
293 aio_context_release(s->ctx);
295 for (i = 0; i < nvqs; i++) {
296 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
297 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
300 /* Clean up guest notifier (irq) */
301 k->set_guest_notifiers(qbus->parent, nvqs, false);
303 vblk->dataplane_started = false;
304 s->stopping = false;