hw/arm/mps2-tz: Support running APB peripherals on different clock
[qemu/ar7.git] / hw / vfio / platform.c
blobcc3f66f7e44c747a5dfc336fc0c6142f8559fdf3
1 /*
2 * vfio based device assignment support - platform devices
4 * Copyright Linaro Limited, 2014
6 * Authors:
7 * Kim Phillips <kim.phillips@linaro.org>
8 * Eric Auger <eric.auger@linaro.org>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Based on vfio based PCI device assignment support:
14 * Copyright Red Hat, Inc. 2012
17 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include <sys/ioctl.h>
20 #include <linux/vfio.h>
22 #include "hw/vfio/vfio-platform.h"
23 #include "migration/vmstate.h"
24 #include "qemu/error-report.h"
25 #include "qemu/lockable.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/range.h"
29 #include "exec/memory.h"
30 #include "exec/address-spaces.h"
31 #include "qemu/queue.h"
32 #include "hw/sysbus.h"
33 #include "trace.h"
34 #include "hw/irq.h"
35 #include "hw/platform-bus.h"
36 #include "hw/qdev-properties.h"
37 #include "sysemu/kvm.h"
40 * Functions used whatever the injection method
43 static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
45 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
48 /**
49 * vfio_init_intp - allocate, initialize the IRQ struct pointer
50 * and add it into the list of IRQs
51 * @vbasedev: the VFIO device handle
52 * @info: irq info struct retrieved from VFIO driver
53 * @errp: error object
55 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
56 struct vfio_irq_info info, Error **errp)
58 int ret;
59 VFIOPlatformDevice *vdev =
60 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
61 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
62 VFIOINTp *intp;
64 intp = g_malloc0(sizeof(*intp));
65 intp->vdev = vdev;
66 intp->pin = info.index;
67 intp->flags = info.flags;
68 intp->state = VFIO_IRQ_INACTIVE;
69 intp->kvm_accel = false;
71 sysbus_init_irq(sbdev, &intp->qemuirq);
73 /* Get an eventfd for trigger */
74 intp->interrupt = g_malloc0(sizeof(EventNotifier));
75 ret = event_notifier_init(intp->interrupt, 0);
76 if (ret) {
77 g_free(intp->interrupt);
78 g_free(intp);
79 error_setg_errno(errp, -ret,
80 "failed to initialize trigger eventfd notifier");
81 return NULL;
83 if (vfio_irq_is_automasked(intp)) {
84 /* Get an eventfd for resample/unmask */
85 intp->unmask = g_malloc0(sizeof(EventNotifier));
86 ret = event_notifier_init(intp->unmask, 0);
87 if (ret) {
88 g_free(intp->interrupt);
89 g_free(intp->unmask);
90 g_free(intp);
91 error_setg_errno(errp, -ret,
92 "failed to initialize resample eventfd notifier");
93 return NULL;
97 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
98 return intp;
102 * vfio_set_trigger_eventfd - set VFIO eventfd handling
104 * @intp: IRQ struct handle
105 * @handler: handler to be called on eventfd signaling
107 * Setup VFIO signaling and attach an optional user-side handler
108 * to the eventfd
110 static int vfio_set_trigger_eventfd(VFIOINTp *intp,
111 eventfd_user_side_handler_t handler)
113 VFIODevice *vbasedev = &intp->vdev->vbasedev;
114 int32_t fd = event_notifier_get_fd(intp->interrupt);
115 Error *err = NULL;
116 int ret;
118 qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp);
120 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
121 VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err);
122 if (ret) {
123 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
124 qemu_set_fd_handler(fd, NULL, NULL, NULL);
127 return ret;
131 * Functions only used when eventfds are handled on user-side
132 * ie. without irqfd
136 * vfio_mmap_set_enabled - enable/disable the fast path mode
137 * @vdev: the VFIO platform device
138 * @enabled: the target mmap state
140 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
141 * enabled = false ~ slow path = MMIO region is trapped and region callbacks
142 * are called; slow path enables to trap the device IRQ status register reset
145 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
147 int i;
149 for (i = 0; i < vdev->vbasedev.num_regions; i++) {
150 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
155 * vfio_intp_mmap_enable - timer function, restores the fast path
156 * if there is no more active IRQ
157 * @opaque: actually points to the VFIO platform device
159 * Called on mmap timer timout, this function checks whether the
160 * IRQ is still active and if not, restores the fast path.
161 * by construction a single eventfd is handled at a time.
162 * if the IRQ is still active, the timer is re-programmed.
164 static void vfio_intp_mmap_enable(void *opaque)
166 VFIOINTp *tmp;
167 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
169 QEMU_LOCK_GUARD(&vdev->intp_mutex);
170 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
171 if (tmp->state == VFIO_IRQ_ACTIVE) {
172 trace_vfio_platform_intp_mmap_enable(tmp->pin);
173 /* re-program the timer to check active status later */
174 timer_mod(vdev->mmap_timer,
175 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
176 vdev->mmap_timeout);
177 return;
180 vfio_mmap_set_enabled(vdev, true);
184 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
185 * @opaque: opaque pointer, in practice the VFIOINTp handle
187 * The function is called on a previous IRQ completion, from
188 * vfio_platform_eoi, while the intp_mutex is locked.
189 * Also in such situation, the slow path already is set and
190 * the mmap timer was already programmed.
192 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
194 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
195 event_notifier_get_fd(intp->interrupt));
197 intp->state = VFIO_IRQ_ACTIVE;
199 /* trigger the virtual IRQ */
200 qemu_set_irq(intp->qemuirq, 1);
204 * vfio_intp_interrupt - The user-side eventfd handler
205 * @opaque: opaque pointer which in practice is the VFIOINTp handle
207 * the function is entered in event handler context:
208 * the vIRQ is injected into the guest if there is no other active
209 * or pending IRQ.
211 static void vfio_intp_interrupt(VFIOINTp *intp)
213 int ret;
214 VFIOINTp *tmp;
215 VFIOPlatformDevice *vdev = intp->vdev;
216 bool delay_handling = false;
218 QEMU_LOCK_GUARD(&vdev->intp_mutex);
219 if (intp->state == VFIO_IRQ_INACTIVE) {
220 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
221 if (tmp->state == VFIO_IRQ_ACTIVE ||
222 tmp->state == VFIO_IRQ_PENDING) {
223 delay_handling = true;
224 break;
228 if (delay_handling) {
230 * the new IRQ gets a pending status and is pushed in
231 * the pending queue
233 intp->state = VFIO_IRQ_PENDING;
234 trace_vfio_intp_interrupt_set_pending(intp->pin);
235 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
236 intp, pqnext);
237 event_notifier_test_and_clear(intp->interrupt);
238 return;
241 trace_vfio_platform_intp_interrupt(intp->pin,
242 event_notifier_get_fd(intp->interrupt));
244 ret = event_notifier_test_and_clear(intp->interrupt);
245 if (!ret) {
246 error_report("Error when clearing fd=%d (ret = %d)",
247 event_notifier_get_fd(intp->interrupt), ret);
250 intp->state = VFIO_IRQ_ACTIVE;
252 /* sets slow path */
253 vfio_mmap_set_enabled(vdev, false);
255 /* trigger the virtual IRQ */
256 qemu_set_irq(intp->qemuirq, 1);
259 * Schedule the mmap timer which will restore fastpath when no IRQ
260 * is active anymore
262 if (vdev->mmap_timeout) {
263 timer_mod(vdev->mmap_timer,
264 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
265 vdev->mmap_timeout);
270 * vfio_platform_eoi - IRQ completion routine
271 * @vbasedev: the VFIO device handle
273 * De-asserts the active virtual IRQ and unmasks the physical IRQ
274 * (effective for level sensitive IRQ auto-masked by the VFIO driver).
275 * Then it handles next pending IRQ if any.
276 * eoi function is called on the first access to any MMIO region
277 * after an IRQ was triggered, trapped since slow path was set.
278 * It is assumed this access corresponds to the IRQ status
279 * register reset. With such a mechanism, a single IRQ can be
280 * handled at a time since there is no way to know which IRQ
281 * was completed by the guest (we would need additional details
282 * about the IRQ status register mask).
284 static void vfio_platform_eoi(VFIODevice *vbasedev)
286 VFIOINTp *intp;
287 VFIOPlatformDevice *vdev =
288 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
290 QEMU_LOCK_GUARD(&vdev->intp_mutex);
291 QLIST_FOREACH(intp, &vdev->intp_list, next) {
292 if (intp->state == VFIO_IRQ_ACTIVE) {
293 trace_vfio_platform_eoi(intp->pin,
294 event_notifier_get_fd(intp->interrupt));
295 intp->state = VFIO_IRQ_INACTIVE;
297 /* deassert the virtual IRQ */
298 qemu_set_irq(intp->qemuirq, 0);
300 if (vfio_irq_is_automasked(intp)) {
301 /* unmasks the physical level-sensitive IRQ */
302 vfio_unmask_single_irqindex(vbasedev, intp->pin);
305 /* a single IRQ can be active at a time */
306 break;
309 /* in case there are pending IRQs, handle the first one */
310 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
311 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
312 vfio_intp_inject_pending_lockheld(intp);
313 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
318 * vfio_start_eventfd_injection - starts the virtual IRQ injection using
319 * user-side handled eventfds
320 * @sbdev: the sysbus device handle
321 * @irq: the qemu irq handle
324 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
326 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
327 VFIOINTp *intp;
329 QLIST_FOREACH(intp, &vdev->intp_list, next) {
330 if (intp->qemuirq == irq) {
331 break;
334 assert(intp);
336 if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) {
337 abort();
342 * Functions used for irqfd
346 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
347 * @intp: the IRQ struct handle
348 * programs the VFIO driver to unmask this IRQ when the
349 * intp->unmask eventfd is triggered
351 static int vfio_set_resample_eventfd(VFIOINTp *intp)
353 int32_t fd = event_notifier_get_fd(intp->unmask);
354 VFIODevice *vbasedev = &intp->vdev->vbasedev;
355 Error *err = NULL;
356 int ret;
358 qemu_set_fd_handler(fd, NULL, NULL, NULL);
359 ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0,
360 VFIO_IRQ_SET_ACTION_UNMASK, fd, &err);
361 if (ret) {
362 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name);
364 return ret;
368 * vfio_start_irqfd_injection - starts the virtual IRQ injection using
369 * irqfd
371 * @sbdev: the sysbus device handle
372 * @irq: the qemu irq handle
374 * In case the irqfd setup fails, we fallback to userspace handled eventfd
376 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
378 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
379 VFIOINTp *intp;
381 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
382 !vdev->irqfd_allowed) {
383 goto fail_irqfd;
386 QLIST_FOREACH(intp, &vdev->intp_list, next) {
387 if (intp->qemuirq == irq) {
388 break;
391 assert(intp);
393 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
394 intp->unmask, irq) < 0) {
395 goto fail_irqfd;
398 if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
399 goto fail_vfio;
401 if (vfio_irq_is_automasked(intp)) {
402 if (vfio_set_resample_eventfd(intp) < 0) {
403 goto fail_vfio;
405 trace_vfio_platform_start_level_irqfd_injection(intp->pin,
406 event_notifier_get_fd(intp->interrupt),
407 event_notifier_get_fd(intp->unmask));
408 } else {
409 trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
410 event_notifier_get_fd(intp->interrupt));
413 intp->kvm_accel = true;
415 return;
416 fail_vfio:
417 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
418 abort();
419 fail_irqfd:
420 vfio_start_eventfd_injection(sbdev, irq);
421 return;
424 /* VFIO skeleton */
426 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
428 vbasedev->needs_reset = true;
431 /* not implemented yet */
432 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
434 return -1;
438 * vfio_populate_device - Allocate and populate MMIO region
439 * and IRQ structs according to driver returned information
440 * @vbasedev: the VFIO device handle
441 * @errp: error object
444 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
446 VFIOINTp *intp, *tmp;
447 int i, ret = -1;
448 VFIOPlatformDevice *vdev =
449 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
451 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
452 error_setg(errp, "this isn't a platform device");
453 return ret;
456 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
458 for (i = 0; i < vbasedev->num_regions; i++) {
459 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
461 vdev->regions[i] = g_new0(VFIORegion, 1);
462 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
463 vdev->regions[i], i, name);
464 g_free(name);
465 if (ret) {
466 error_setg_errno(errp, -ret, "failed to get region %d info", i);
467 goto reg_error;
471 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
472 vfio_intp_mmap_enable, vdev);
474 QSIMPLEQ_INIT(&vdev->pending_intp_queue);
476 for (i = 0; i < vbasedev->num_irqs; i++) {
477 struct vfio_irq_info irq = { .argsz = sizeof(irq) };
479 irq.index = i;
480 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
481 if (ret) {
482 error_setg_errno(errp, -ret, "failed to get device irq info");
483 goto irq_err;
484 } else {
485 trace_vfio_platform_populate_interrupts(irq.index,
486 irq.count,
487 irq.flags);
488 intp = vfio_init_intp(vbasedev, irq, errp);
489 if (!intp) {
490 ret = -1;
491 goto irq_err;
495 return 0;
496 irq_err:
497 timer_del(vdev->mmap_timer);
498 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
499 QLIST_REMOVE(intp, next);
500 g_free(intp);
502 reg_error:
503 for (i = 0; i < vbasedev->num_regions; i++) {
504 if (vdev->regions[i]) {
505 vfio_region_finalize(vdev->regions[i]);
507 g_free(vdev->regions[i]);
509 g_free(vdev->regions);
510 return ret;
513 /* specialized functions for VFIO Platform devices */
514 static VFIODeviceOps vfio_platform_ops = {
515 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
516 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
517 .vfio_eoi = vfio_platform_eoi,
521 * vfio_base_device_init - perform preliminary VFIO setup
522 * @vbasedev: the VFIO device handle
523 * @errp: error object
525 * Implement the VFIO command sequence that allows to discover
526 * assigned device resources: group extraction, device
527 * fd retrieval, resource query.
528 * Precondition: the device name must be initialized
530 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
532 VFIOGroup *group;
533 VFIODevice *vbasedev_iter;
534 char *tmp, group_path[PATH_MAX], *group_name;
535 ssize_t len;
536 struct stat st;
537 int groupid;
538 int ret;
540 /* @sysfsdev takes precedence over @host */
541 if (vbasedev->sysfsdev) {
542 g_free(vbasedev->name);
543 vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
544 } else {
545 if (!vbasedev->name || strchr(vbasedev->name, '/')) {
546 error_setg(errp, "wrong host device name");
547 return -EINVAL;
550 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
551 vbasedev->name);
554 if (stat(vbasedev->sysfsdev, &st) < 0) {
555 error_setg_errno(errp, errno,
556 "failed to get the sysfs host device file status");
557 return -errno;
560 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
561 len = readlink(tmp, group_path, sizeof(group_path));
562 g_free(tmp);
564 if (len < 0 || len >= sizeof(group_path)) {
565 ret = len < 0 ? -errno : -ENAMETOOLONG;
566 error_setg_errno(errp, -ret, "no iommu_group found");
567 return ret;
570 group_path[len] = 0;
572 group_name = basename(group_path);
573 if (sscanf(group_name, "%d", &groupid) != 1) {
574 error_setg_errno(errp, errno, "failed to read %s", group_path);
575 return -errno;
578 trace_vfio_platform_base_device_init(vbasedev->name, groupid);
580 group = vfio_get_group(groupid, &address_space_memory, errp);
581 if (!group) {
582 return -ENOENT;
585 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
586 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
587 error_setg(errp, "device is already attached");
588 vfio_put_group(group);
589 return -EBUSY;
592 ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
593 if (ret) {
594 vfio_put_group(group);
595 return ret;
598 ret = vfio_populate_device(vbasedev, errp);
599 if (ret) {
600 vfio_put_group(group);
603 return ret;
607 * vfio_platform_realize - the device realize function
608 * @dev: device state pointer
609 * @errp: error
611 * initialize the device, its memory regions and IRQ structures
612 * IRQ are started separately
614 static void vfio_platform_realize(DeviceState *dev, Error **errp)
616 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
617 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
618 VFIODevice *vbasedev = &vdev->vbasedev;
619 int i, ret;
621 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
622 vbasedev->dev = dev;
623 vbasedev->ops = &vfio_platform_ops;
625 qemu_mutex_init(&vdev->intp_mutex);
627 trace_vfio_platform_realize(vbasedev->sysfsdev ?
628 vbasedev->sysfsdev : vbasedev->name,
629 vdev->compat);
631 ret = vfio_base_device_init(vbasedev, errp);
632 if (ret) {
633 goto out;
636 if (!vdev->compat) {
637 GError *gerr = NULL;
638 gchar *contents;
639 gsize length;
640 char *path;
642 path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev);
643 if (!g_file_get_contents(path, &contents, &length, &gerr)) {
644 error_setg(errp, "%s", gerr->message);
645 g_error_free(gerr);
646 g_free(path);
647 return;
649 g_free(path);
650 vdev->compat = contents;
651 for (vdev->num_compat = 0; length; vdev->num_compat++) {
652 size_t skip = strlen(contents) + 1;
653 contents += skip;
654 length -= skip;
658 for (i = 0; i < vbasedev->num_regions; i++) {
659 if (vfio_region_mmap(vdev->regions[i])) {
660 warn_report("%s mmap unsupported, performance may be slow",
661 memory_region_name(vdev->regions[i]->mem));
663 sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
665 out:
666 if (!ret) {
667 return;
670 if (vdev->vbasedev.name) {
671 error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name);
672 } else {
673 error_prepend(errp, "vfio error: ");
677 static const VMStateDescription vfio_platform_vmstate = {
678 .name = "vfio-platform",
679 .unmigratable = 1,
682 static Property vfio_platform_dev_properties[] = {
683 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
684 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
685 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
686 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
687 mmap_timeout, 1100),
688 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
689 DEFINE_PROP_END_OF_LIST(),
692 static void vfio_platform_class_init(ObjectClass *klass, void *data)
694 DeviceClass *dc = DEVICE_CLASS(klass);
695 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
697 dc->realize = vfio_platform_realize;
698 device_class_set_props(dc, vfio_platform_dev_properties);
699 dc->vmsd = &vfio_platform_vmstate;
700 dc->desc = "VFIO-based platform device assignment";
701 sbc->connect_irq_notifier = vfio_start_irqfd_injection;
702 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
703 /* Supported by TYPE_VIRT_MACHINE */
704 dc->user_creatable = true;
707 static const TypeInfo vfio_platform_dev_info = {
708 .name = TYPE_VFIO_PLATFORM,
709 .parent = TYPE_SYS_BUS_DEVICE,
710 .instance_size = sizeof(VFIOPlatformDevice),
711 .class_init = vfio_platform_class_init,
712 .class_size = sizeof(VFIOPlatformDeviceClass),
715 static void register_vfio_platform_dev_type(void)
717 type_register_static(&vfio_platform_dev_info);
720 type_init(register_vfio_platform_dev_type)