util/hbitmap: update orig_size on truncate
[qemu/ar7.git] / hw / virtio / virtio-mmio.c
blob97b7f35496ae15455f2f9265b5e5922e05d3f8eb
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
24 #include "hw/sysbus.h"
25 #include "hw/virtio/virtio.h"
26 #include "qemu/host-utils.h"
27 #include "qemu/module.h"
28 #include "sysemu/kvm.h"
29 #include "hw/virtio/virtio-bus.h"
30 #include "qemu/error-report.h"
31 #include "qemu/log.h"
32 #include "trace.h"
34 /* QOM macros */
35 /* virtio-mmio-bus */
36 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
37 #define VIRTIO_MMIO_BUS(obj) \
38 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
39 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
40 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
41 #define VIRTIO_MMIO_BUS_CLASS(klass) \
42 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
44 /* virtio-mmio */
45 #define TYPE_VIRTIO_MMIO "virtio-mmio"
46 #define VIRTIO_MMIO(obj) \
47 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
49 #define VIRT_MAGIC 0x74726976 /* 'virt' */
50 #define VIRT_VERSION 1
51 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
53 typedef struct {
54 /* Generic */
55 SysBusDevice parent_obj;
56 MemoryRegion iomem;
57 qemu_irq irq;
58 /* Guest accessible state needing migration and reset */
59 uint32_t host_features_sel;
60 uint32_t guest_features_sel;
61 uint32_t guest_page_shift;
62 /* virtio-bus */
63 VirtioBusState bus;
64 bool format_transport_address;
65 } VirtIOMMIOProxy;
67 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
69 return kvm_eventfds_enabled();
72 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
73 EventNotifier *notifier,
74 int n, bool assign)
76 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
78 if (assign) {
79 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
80 true, n, notifier);
81 } else {
82 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
83 true, n, notifier);
85 return 0;
88 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
90 virtio_bus_start_ioeventfd(&proxy->bus);
93 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
95 virtio_bus_stop_ioeventfd(&proxy->bus);
98 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
100 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
101 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
103 trace_virtio_mmio_read(offset);
105 if (!vdev) {
106 /* If no backend is present, we treat most registers as
107 * read-as-zero, except for the magic number, version and
108 * vendor ID. This is not strictly sanctioned by the virtio
109 * spec, but it allows us to provide transports with no backend
110 * plugged in which don't confuse Linux's virtio code: the
111 * probe won't complain about the bad magic number, but the
112 * device ID of zero means no backend will claim it.
114 switch (offset) {
115 case VIRTIO_MMIO_MAGIC_VALUE:
116 return VIRT_MAGIC;
117 case VIRTIO_MMIO_VERSION:
118 return VIRT_VERSION;
119 case VIRTIO_MMIO_VENDOR_ID:
120 return VIRT_VENDOR;
121 default:
122 return 0;
126 if (offset >= VIRTIO_MMIO_CONFIG) {
127 offset -= VIRTIO_MMIO_CONFIG;
128 switch (size) {
129 case 1:
130 return virtio_config_readb(vdev, offset);
131 case 2:
132 return virtio_config_readw(vdev, offset);
133 case 4:
134 return virtio_config_readl(vdev, offset);
135 default:
136 abort();
139 if (size != 4) {
140 qemu_log_mask(LOG_GUEST_ERROR,
141 "%s: wrong size access to register!\n",
142 __func__);
143 return 0;
145 switch (offset) {
146 case VIRTIO_MMIO_MAGIC_VALUE:
147 return VIRT_MAGIC;
148 case VIRTIO_MMIO_VERSION:
149 return VIRT_VERSION;
150 case VIRTIO_MMIO_DEVICE_ID:
151 return vdev->device_id;
152 case VIRTIO_MMIO_VENDOR_ID:
153 return VIRT_VENDOR;
154 case VIRTIO_MMIO_DEVICE_FEATURES:
155 if (proxy->host_features_sel) {
156 return 0;
158 return vdev->host_features;
159 case VIRTIO_MMIO_QUEUE_NUM_MAX:
160 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
161 return 0;
163 return VIRTQUEUE_MAX_SIZE;
164 case VIRTIO_MMIO_QUEUE_PFN:
165 return virtio_queue_get_addr(vdev, vdev->queue_sel)
166 >> proxy->guest_page_shift;
167 case VIRTIO_MMIO_INTERRUPT_STATUS:
168 return atomic_read(&vdev->isr);
169 case VIRTIO_MMIO_STATUS:
170 return vdev->status;
171 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
172 case VIRTIO_MMIO_DRIVER_FEATURES:
173 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
174 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
175 case VIRTIO_MMIO_QUEUE_SEL:
176 case VIRTIO_MMIO_QUEUE_NUM:
177 case VIRTIO_MMIO_QUEUE_ALIGN:
178 case VIRTIO_MMIO_QUEUE_NOTIFY:
179 case VIRTIO_MMIO_INTERRUPT_ACK:
180 qemu_log_mask(LOG_GUEST_ERROR,
181 "%s: read of write-only register\n",
182 __func__);
183 return 0;
184 default:
185 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__);
186 return 0;
188 return 0;
191 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
192 unsigned size)
194 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
195 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
197 trace_virtio_mmio_write_offset(offset, value);
199 if (!vdev) {
200 /* If no backend is present, we just make all registers
201 * write-ignored. This allows us to provide transports with
202 * no backend plugged in.
204 return;
207 if (offset >= VIRTIO_MMIO_CONFIG) {
208 offset -= VIRTIO_MMIO_CONFIG;
209 switch (size) {
210 case 1:
211 virtio_config_writeb(vdev, offset, value);
212 break;
213 case 2:
214 virtio_config_writew(vdev, offset, value);
215 break;
216 case 4:
217 virtio_config_writel(vdev, offset, value);
218 break;
219 default:
220 abort();
222 return;
224 if (size != 4) {
225 qemu_log_mask(LOG_GUEST_ERROR,
226 "%s: wrong size access to register!\n",
227 __func__);
228 return;
230 switch (offset) {
231 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
232 proxy->host_features_sel = value;
233 break;
234 case VIRTIO_MMIO_DRIVER_FEATURES:
235 if (!proxy->guest_features_sel) {
236 virtio_set_features(vdev, value);
238 break;
239 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
240 proxy->guest_features_sel = value;
241 break;
242 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
243 proxy->guest_page_shift = ctz32(value);
244 if (proxy->guest_page_shift > 31) {
245 proxy->guest_page_shift = 0;
247 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
248 break;
249 case VIRTIO_MMIO_QUEUE_SEL:
250 if (value < VIRTIO_QUEUE_MAX) {
251 vdev->queue_sel = value;
253 break;
254 case VIRTIO_MMIO_QUEUE_NUM:
255 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
256 virtio_queue_set_num(vdev, vdev->queue_sel, value);
257 /* Note: only call this function for legacy devices */
258 virtio_queue_update_rings(vdev, vdev->queue_sel);
259 break;
260 case VIRTIO_MMIO_QUEUE_ALIGN:
261 /* Note: this is only valid for legacy devices */
262 virtio_queue_set_align(vdev, vdev->queue_sel, value);
263 break;
264 case VIRTIO_MMIO_QUEUE_PFN:
265 if (value == 0) {
266 virtio_reset(vdev);
267 } else {
268 virtio_queue_set_addr(vdev, vdev->queue_sel,
269 value << proxy->guest_page_shift);
271 break;
272 case VIRTIO_MMIO_QUEUE_NOTIFY:
273 if (value < VIRTIO_QUEUE_MAX) {
274 virtio_queue_notify(vdev, value);
276 break;
277 case VIRTIO_MMIO_INTERRUPT_ACK:
278 atomic_and(&vdev->isr, ~value);
279 virtio_update_irq(vdev);
280 break;
281 case VIRTIO_MMIO_STATUS:
282 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
283 virtio_mmio_stop_ioeventfd(proxy);
286 virtio_set_status(vdev, value & 0xff);
288 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
289 virtio_mmio_start_ioeventfd(proxy);
292 if (vdev->status == 0) {
293 virtio_reset(vdev);
295 break;
296 case VIRTIO_MMIO_MAGIC_VALUE:
297 case VIRTIO_MMIO_VERSION:
298 case VIRTIO_MMIO_DEVICE_ID:
299 case VIRTIO_MMIO_VENDOR_ID:
300 case VIRTIO_MMIO_DEVICE_FEATURES:
301 case VIRTIO_MMIO_QUEUE_NUM_MAX:
302 case VIRTIO_MMIO_INTERRUPT_STATUS:
303 qemu_log_mask(LOG_GUEST_ERROR,
304 "%s: write to readonly register\n",
305 __func__);
306 break;
308 default:
309 qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__);
313 static const MemoryRegionOps virtio_mem_ops = {
314 .read = virtio_mmio_read,
315 .write = virtio_mmio_write,
316 .endianness = DEVICE_NATIVE_ENDIAN,
319 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
321 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
322 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
323 int level;
325 if (!vdev) {
326 return;
328 level = (atomic_read(&vdev->isr) != 0);
329 trace_virtio_mmio_setting_irq(level);
330 qemu_set_irq(proxy->irq, level);
333 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
335 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
337 proxy->host_features_sel = qemu_get_be32(f);
338 proxy->guest_features_sel = qemu_get_be32(f);
339 proxy->guest_page_shift = qemu_get_be32(f);
340 return 0;
343 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
345 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
347 qemu_put_be32(f, proxy->host_features_sel);
348 qemu_put_be32(f, proxy->guest_features_sel);
349 qemu_put_be32(f, proxy->guest_page_shift);
352 static void virtio_mmio_reset(DeviceState *d)
354 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
356 virtio_mmio_stop_ioeventfd(proxy);
357 virtio_bus_reset(&proxy->bus);
358 proxy->host_features_sel = 0;
359 proxy->guest_features_sel = 0;
360 proxy->guest_page_shift = 0;
363 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
364 bool with_irqfd)
366 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
367 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
368 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
369 VirtQueue *vq = virtio_get_queue(vdev, n);
370 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
372 if (assign) {
373 int r = event_notifier_init(notifier, 0);
374 if (r < 0) {
375 return r;
377 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
378 } else {
379 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
380 event_notifier_cleanup(notifier);
383 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
384 vdc->guest_notifier_mask(vdev, n, !assign);
387 return 0;
390 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
391 bool assign)
393 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
394 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
395 /* TODO: need to check if kvm-arm supports irqfd */
396 bool with_irqfd = false;
397 int r, n;
399 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
401 for (n = 0; n < nvqs; n++) {
402 if (!virtio_queue_get_num(vdev, n)) {
403 break;
406 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
407 if (r < 0) {
408 goto assign_error;
412 return 0;
414 assign_error:
415 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
416 assert(assign);
417 while (--n >= 0) {
418 virtio_mmio_set_guest_notifier(d, n, !assign, false);
420 return r;
423 /* virtio-mmio device */
425 static Property virtio_mmio_properties[] = {
426 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
427 format_transport_address, true),
428 DEFINE_PROP_END_OF_LIST(),
431 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
433 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
434 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
436 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
437 d, NULL);
438 sysbus_init_irq(sbd, &proxy->irq);
439 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
440 TYPE_VIRTIO_MMIO, 0x200);
441 sysbus_init_mmio(sbd, &proxy->iomem);
444 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
446 DeviceClass *dc = DEVICE_CLASS(klass);
448 dc->realize = virtio_mmio_realizefn;
449 dc->reset = virtio_mmio_reset;
450 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
451 dc->props = virtio_mmio_properties;
454 static const TypeInfo virtio_mmio_info = {
455 .name = TYPE_VIRTIO_MMIO,
456 .parent = TYPE_SYS_BUS_DEVICE,
457 .instance_size = sizeof(VirtIOMMIOProxy),
458 .class_init = virtio_mmio_class_init,
461 /* virtio-mmio-bus. */
463 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
465 BusState *virtio_mmio_bus;
466 VirtIOMMIOProxy *virtio_mmio_proxy;
467 char *proxy_path;
468 SysBusDevice *proxy_sbd;
469 char *path;
471 virtio_mmio_bus = qdev_get_parent_bus(dev);
472 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
473 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
476 * If @format_transport_address is false, then we just perform the same as
477 * virtio_bus_get_dev_path(): we delegate the address formatting for the
478 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
479 * (i.e., the device that implements the virtio-mmio bus) resides on. In
480 * this case the base address of the virtio-mmio transport will be
481 * invisible.
483 if (!virtio_mmio_proxy->format_transport_address) {
484 return proxy_path;
487 /* Otherwise, we append the base address of the transport. */
488 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
489 assert(proxy_sbd->num_mmio == 1);
490 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
492 if (proxy_path) {
493 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
494 proxy_sbd->mmio[0].addr);
495 } else {
496 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
497 proxy_sbd->mmio[0].addr);
499 g_free(proxy_path);
500 return path;
503 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
505 BusClass *bus_class = BUS_CLASS(klass);
506 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
508 k->notify = virtio_mmio_update_irq;
509 k->save_config = virtio_mmio_save_config;
510 k->load_config = virtio_mmio_load_config;
511 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
512 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
513 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
514 k->has_variable_vring_alignment = true;
515 bus_class->max_dev = 1;
516 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
519 static const TypeInfo virtio_mmio_bus_info = {
520 .name = TYPE_VIRTIO_MMIO_BUS,
521 .parent = TYPE_VIRTIO_BUS,
522 .instance_size = sizeof(VirtioBusState),
523 .class_init = virtio_mmio_bus_class_init,
526 static void virtio_mmio_register_types(void)
528 type_register_static(&virtio_mmio_bus_info);
529 type_register_static(&virtio_mmio_info);
532 type_init(virtio_mmio_register_types)