Update version for v2.8.0 release
[qemu/ar7.git] / hw / vfio / platform.c
bloba4663c918ea2ff5ca0fd71ce026863933cbefa34
1 /*
2 * vfio based device assignment support - platform devices
4 * Copyright Linaro Limited, 2014
6 * Authors:
7 * Kim Phillips <kim.phillips@linaro.org>
8 * Eric Auger <eric.auger@linaro.org>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Based on vfio based PCI device assignment support:
14 * Copyright Red Hat, Inc. 2012
17 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include <sys/ioctl.h>
20 #include <linux/vfio.h>
22 #include "hw/vfio/vfio-platform.h"
23 #include "qemu/error-report.h"
24 #include "qemu/range.h"
25 #include "sysemu/sysemu.h"
26 #include "exec/memory.h"
27 #include "qemu/queue.h"
28 #include "hw/sysbus.h"
29 #include "trace.h"
30 #include "hw/platform-bus.h"
31 #include "sysemu/kvm.h"
34 * Functions used whatever the injection method
37 static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
39 return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
42 /**
43 * vfio_init_intp - allocate, initialize the IRQ struct pointer
44 * and add it into the list of IRQs
45 * @vbasedev: the VFIO device handle
46 * @info: irq info struct retrieved from VFIO driver
47 * @errp: error object
49 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
50 struct vfio_irq_info info, Error **errp)
52 int ret;
53 VFIOPlatformDevice *vdev =
54 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
55 SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
56 VFIOINTp *intp;
58 intp = g_malloc0(sizeof(*intp));
59 intp->vdev = vdev;
60 intp->pin = info.index;
61 intp->flags = info.flags;
62 intp->state = VFIO_IRQ_INACTIVE;
63 intp->kvm_accel = false;
65 sysbus_init_irq(sbdev, &intp->qemuirq);
67 /* Get an eventfd for trigger */
68 intp->interrupt = g_malloc0(sizeof(EventNotifier));
69 ret = event_notifier_init(intp->interrupt, 0);
70 if (ret) {
71 g_free(intp->interrupt);
72 g_free(intp);
73 error_setg_errno(errp, -ret,
74 "failed to initialize trigger eventd notifier");
75 return NULL;
77 if (vfio_irq_is_automasked(intp)) {
78 /* Get an eventfd for resample/unmask */
79 intp->unmask = g_malloc0(sizeof(EventNotifier));
80 ret = event_notifier_init(intp->unmask, 0);
81 if (ret) {
82 g_free(intp->interrupt);
83 g_free(intp->unmask);
84 g_free(intp);
85 error_setg_errno(errp, -ret,
86 "failed to initialize resample eventd notifier");
87 return NULL;
91 QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
92 return intp;
95 /**
96 * vfio_set_trigger_eventfd - set VFIO eventfd handling
98 * @intp: IRQ struct handle
99 * @handler: handler to be called on eventfd signaling
101 * Setup VFIO signaling and attach an optional user-side handler
102 * to the eventfd
104 static int vfio_set_trigger_eventfd(VFIOINTp *intp,
105 eventfd_user_side_handler_t handler)
107 VFIODevice *vbasedev = &intp->vdev->vbasedev;
108 struct vfio_irq_set *irq_set;
109 int argsz, ret;
110 int32_t *pfd;
112 argsz = sizeof(*irq_set) + sizeof(*pfd);
113 irq_set = g_malloc0(argsz);
114 irq_set->argsz = argsz;
115 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
116 irq_set->index = intp->pin;
117 irq_set->start = 0;
118 irq_set->count = 1;
119 pfd = (int32_t *)&irq_set->data;
120 *pfd = event_notifier_get_fd(intp->interrupt);
121 qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp);
122 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
123 g_free(irq_set);
124 if (ret < 0) {
125 error_report("vfio: Failed to set trigger eventfd: %m");
126 qemu_set_fd_handler(*pfd, NULL, NULL, NULL);
128 return ret;
132 * Functions only used when eventfds are handled on user-side
133 * ie. without irqfd
137 * vfio_mmap_set_enabled - enable/disable the fast path mode
138 * @vdev: the VFIO platform device
139 * @enabled: the target mmap state
141 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
142 * enabled = false ~ slow path = MMIO region is trapped and region callbacks
143 * are called; slow path enables to trap the device IRQ status register reset
146 static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
148 int i;
150 for (i = 0; i < vdev->vbasedev.num_regions; i++) {
151 vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
156 * vfio_intp_mmap_enable - timer function, restores the fast path
157 * if there is no more active IRQ
158 * @opaque: actually points to the VFIO platform device
160 * Called on mmap timer timout, this function checks whether the
161 * IRQ is still active and if not, restores the fast path.
162 * by construction a single eventfd is handled at a time.
163 * if the IRQ is still active, the timer is re-programmed.
165 static void vfio_intp_mmap_enable(void *opaque)
167 VFIOINTp *tmp;
168 VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
170 qemu_mutex_lock(&vdev->intp_mutex);
171 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
172 if (tmp->state == VFIO_IRQ_ACTIVE) {
173 trace_vfio_platform_intp_mmap_enable(tmp->pin);
174 /* re-program the timer to check active status later */
175 timer_mod(vdev->mmap_timer,
176 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
177 vdev->mmap_timeout);
178 qemu_mutex_unlock(&vdev->intp_mutex);
179 return;
182 vfio_mmap_set_enabled(vdev, true);
183 qemu_mutex_unlock(&vdev->intp_mutex);
187 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
188 * @opaque: opaque pointer, in practice the VFIOINTp handle
190 * The function is called on a previous IRQ completion, from
191 * vfio_platform_eoi, while the intp_mutex is locked.
192 * Also in such situation, the slow path already is set and
193 * the mmap timer was already programmed.
195 static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
197 trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
198 event_notifier_get_fd(intp->interrupt));
200 intp->state = VFIO_IRQ_ACTIVE;
202 /* trigger the virtual IRQ */
203 qemu_set_irq(intp->qemuirq, 1);
207 * vfio_intp_interrupt - The user-side eventfd handler
208 * @opaque: opaque pointer which in practice is the VFIOINTp handle
210 * the function is entered in event handler context:
211 * the vIRQ is injected into the guest if there is no other active
212 * or pending IRQ.
214 static void vfio_intp_interrupt(VFIOINTp *intp)
216 int ret;
217 VFIOINTp *tmp;
218 VFIOPlatformDevice *vdev = intp->vdev;
219 bool delay_handling = false;
221 qemu_mutex_lock(&vdev->intp_mutex);
222 if (intp->state == VFIO_IRQ_INACTIVE) {
223 QLIST_FOREACH(tmp, &vdev->intp_list, next) {
224 if (tmp->state == VFIO_IRQ_ACTIVE ||
225 tmp->state == VFIO_IRQ_PENDING) {
226 delay_handling = true;
227 break;
231 if (delay_handling) {
233 * the new IRQ gets a pending status and is pushed in
234 * the pending queue
236 intp->state = VFIO_IRQ_PENDING;
237 trace_vfio_intp_interrupt_set_pending(intp->pin);
238 QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
239 intp, pqnext);
240 ret = event_notifier_test_and_clear(intp->interrupt);
241 qemu_mutex_unlock(&vdev->intp_mutex);
242 return;
245 trace_vfio_platform_intp_interrupt(intp->pin,
246 event_notifier_get_fd(intp->interrupt));
248 ret = event_notifier_test_and_clear(intp->interrupt);
249 if (!ret) {
250 error_report("Error when clearing fd=%d (ret = %d)",
251 event_notifier_get_fd(intp->interrupt), ret);
254 intp->state = VFIO_IRQ_ACTIVE;
256 /* sets slow path */
257 vfio_mmap_set_enabled(vdev, false);
259 /* trigger the virtual IRQ */
260 qemu_set_irq(intp->qemuirq, 1);
263 * Schedule the mmap timer which will restore fastpath when no IRQ
264 * is active anymore
266 if (vdev->mmap_timeout) {
267 timer_mod(vdev->mmap_timer,
268 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
269 vdev->mmap_timeout);
271 qemu_mutex_unlock(&vdev->intp_mutex);
275 * vfio_platform_eoi - IRQ completion routine
276 * @vbasedev: the VFIO device handle
278 * De-asserts the active virtual IRQ and unmasks the physical IRQ
279 * (effective for level sensitive IRQ auto-masked by the VFIO driver).
280 * Then it handles next pending IRQ if any.
281 * eoi function is called on the first access to any MMIO region
282 * after an IRQ was triggered, trapped since slow path was set.
283 * It is assumed this access corresponds to the IRQ status
284 * register reset. With such a mechanism, a single IRQ can be
285 * handled at a time since there is no way to know which IRQ
286 * was completed by the guest (we would need additional details
287 * about the IRQ status register mask).
289 static void vfio_platform_eoi(VFIODevice *vbasedev)
291 VFIOINTp *intp;
292 VFIOPlatformDevice *vdev =
293 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
295 qemu_mutex_lock(&vdev->intp_mutex);
296 QLIST_FOREACH(intp, &vdev->intp_list, next) {
297 if (intp->state == VFIO_IRQ_ACTIVE) {
298 trace_vfio_platform_eoi(intp->pin,
299 event_notifier_get_fd(intp->interrupt));
300 intp->state = VFIO_IRQ_INACTIVE;
302 /* deassert the virtual IRQ */
303 qemu_set_irq(intp->qemuirq, 0);
305 if (vfio_irq_is_automasked(intp)) {
306 /* unmasks the physical level-sensitive IRQ */
307 vfio_unmask_single_irqindex(vbasedev, intp->pin);
310 /* a single IRQ can be active at a time */
311 break;
314 /* in case there are pending IRQs, handle the first one */
315 if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
316 intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
317 vfio_intp_inject_pending_lockheld(intp);
318 QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
320 qemu_mutex_unlock(&vdev->intp_mutex);
324 * vfio_start_eventfd_injection - starts the virtual IRQ injection using
325 * user-side handled eventfds
326 * @sbdev: the sysbus device handle
327 * @irq: the qemu irq handle
330 static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
332 int ret;
333 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
334 VFIOINTp *intp;
336 QLIST_FOREACH(intp, &vdev->intp_list, next) {
337 if (intp->qemuirq == irq) {
338 break;
341 assert(intp);
343 ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt);
344 if (ret) {
345 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m",
346 intp->pin);
347 abort();
352 * Functions used for irqfd
356 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
357 * @intp: the IRQ struct handle
358 * programs the VFIO driver to unmask this IRQ when the
359 * intp->unmask eventfd is triggered
361 static int vfio_set_resample_eventfd(VFIOINTp *intp)
363 VFIODevice *vbasedev = &intp->vdev->vbasedev;
364 struct vfio_irq_set *irq_set;
365 int argsz, ret;
366 int32_t *pfd;
368 argsz = sizeof(*irq_set) + sizeof(*pfd);
369 irq_set = g_malloc0(argsz);
370 irq_set->argsz = argsz;
371 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
372 irq_set->index = intp->pin;
373 irq_set->start = 0;
374 irq_set->count = 1;
375 pfd = (int32_t *)&irq_set->data;
376 *pfd = event_notifier_get_fd(intp->unmask);
377 qemu_set_fd_handler(*pfd, NULL, NULL, NULL);
378 ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
379 g_free(irq_set);
380 if (ret < 0) {
381 error_report("vfio: Failed to set resample eventfd: %m");
383 return ret;
387 * vfio_start_irqfd_injection - starts the virtual IRQ injection using
388 * irqfd
390 * @sbdev: the sysbus device handle
391 * @irq: the qemu irq handle
393 * In case the irqfd setup fails, we fallback to userspace handled eventfd
395 static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
397 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
398 VFIOINTp *intp;
400 if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
401 !vdev->irqfd_allowed) {
402 goto fail_irqfd;
405 QLIST_FOREACH(intp, &vdev->intp_list, next) {
406 if (intp->qemuirq == irq) {
407 break;
410 assert(intp);
412 if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
413 intp->unmask, irq) < 0) {
414 goto fail_irqfd;
417 if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
418 goto fail_vfio;
420 if (vfio_irq_is_automasked(intp)) {
421 if (vfio_set_resample_eventfd(intp) < 0) {
422 goto fail_vfio;
424 trace_vfio_platform_start_level_irqfd_injection(intp->pin,
425 event_notifier_get_fd(intp->interrupt),
426 event_notifier_get_fd(intp->unmask));
427 } else {
428 trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
429 event_notifier_get_fd(intp->interrupt));
432 intp->kvm_accel = true;
434 return;
435 fail_vfio:
436 kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
437 error_report("vfio: failed to start eventfd signaling for IRQ %d: %m",
438 intp->pin);
439 abort();
440 fail_irqfd:
441 vfio_start_eventfd_injection(sbdev, irq);
442 return;
445 /* VFIO skeleton */
447 static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
449 vbasedev->needs_reset = true;
452 /* not implemented yet */
453 static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
455 return -1;
459 * vfio_populate_device - Allocate and populate MMIO region
460 * and IRQ structs according to driver returned information
461 * @vbasedev: the VFIO device handle
462 * @errp: error object
465 static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
467 VFIOINTp *intp, *tmp;
468 int i, ret = -1;
469 VFIOPlatformDevice *vdev =
470 container_of(vbasedev, VFIOPlatformDevice, vbasedev);
472 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
473 error_setg(errp, "this isn't a platform device");
474 return ret;
477 vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
479 for (i = 0; i < vbasedev->num_regions; i++) {
480 char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
482 vdev->regions[i] = g_new0(VFIORegion, 1);
483 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
484 vdev->regions[i], i, name);
485 g_free(name);
486 if (ret) {
487 error_setg_errno(errp, -ret, "failed to get region %d info", i);
488 goto reg_error;
492 vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
493 vfio_intp_mmap_enable, vdev);
495 QSIMPLEQ_INIT(&vdev->pending_intp_queue);
497 for (i = 0; i < vbasedev->num_irqs; i++) {
498 struct vfio_irq_info irq = { .argsz = sizeof(irq) };
500 irq.index = i;
501 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
502 if (ret) {
503 error_setg_errno(errp, -ret, "failed to get device irq info");
504 goto irq_err;
505 } else {
506 trace_vfio_platform_populate_interrupts(irq.index,
507 irq.count,
508 irq.flags);
509 intp = vfio_init_intp(vbasedev, irq, errp);
510 if (!intp) {
511 ret = -1;
512 goto irq_err;
516 return 0;
517 irq_err:
518 timer_del(vdev->mmap_timer);
519 QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
520 QLIST_REMOVE(intp, next);
521 g_free(intp);
523 reg_error:
524 for (i = 0; i < vbasedev->num_regions; i++) {
525 if (vdev->regions[i]) {
526 vfio_region_finalize(vdev->regions[i]);
528 g_free(vdev->regions[i]);
530 g_free(vdev->regions);
531 return ret;
534 /* specialized functions for VFIO Platform devices */
535 static VFIODeviceOps vfio_platform_ops = {
536 .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
537 .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
538 .vfio_eoi = vfio_platform_eoi,
542 * vfio_base_device_init - perform preliminary VFIO setup
543 * @vbasedev: the VFIO device handle
544 * @errp: error object
546 * Implement the VFIO command sequence that allows to discover
547 * assigned device resources: group extraction, device
548 * fd retrieval, resource query.
549 * Precondition: the device name must be initialized
551 static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
553 VFIOGroup *group;
554 VFIODevice *vbasedev_iter;
555 char *tmp, group_path[PATH_MAX], *group_name;
556 ssize_t len;
557 struct stat st;
558 int groupid;
559 int ret;
561 /* @sysfsdev takes precedence over @host */
562 if (vbasedev->sysfsdev) {
563 g_free(vbasedev->name);
564 vbasedev->name = g_strdup(basename(vbasedev->sysfsdev));
565 } else {
566 if (!vbasedev->name || strchr(vbasedev->name, '/')) {
567 error_setg(errp, "wrong host device name");
568 return -EINVAL;
571 vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
572 vbasedev->name);
575 if (stat(vbasedev->sysfsdev, &st) < 0) {
576 error_setg_errno(errp, errno,
577 "failed to get the sysfs host device file status");
578 return -errno;
581 tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
582 len = readlink(tmp, group_path, sizeof(group_path));
583 g_free(tmp);
585 if (len < 0 || len >= sizeof(group_path)) {
586 ret = len < 0 ? -errno : -ENAMETOOLONG;
587 error_setg_errno(errp, -ret, "no iommu_group found");
588 return ret;
591 group_path[len] = 0;
593 group_name = basename(group_path);
594 if (sscanf(group_name, "%d", &groupid) != 1) {
595 error_setg_errno(errp, errno, "failed to read %s", group_path);
596 return -errno;
599 trace_vfio_platform_base_device_init(vbasedev->name, groupid);
601 group = vfio_get_group(groupid, &address_space_memory, errp);
602 if (!group) {
603 return -ENOENT;
606 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
607 if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
608 error_setg(errp, "device is already attached");
609 vfio_put_group(group);
610 return -EBUSY;
613 ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
614 if (ret) {
615 vfio_put_group(group);
616 return ret;
619 ret = vfio_populate_device(vbasedev, errp);
620 if (ret) {
621 vfio_put_group(group);
624 return ret;
628 * vfio_platform_realize - the device realize function
629 * @dev: device state pointer
630 * @errp: error
632 * initialize the device, its memory regions and IRQ structures
633 * IRQ are started separately
635 static void vfio_platform_realize(DeviceState *dev, Error **errp)
637 VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
638 SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
639 VFIODevice *vbasedev = &vdev->vbasedev;
640 int i, ret;
642 vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
643 vbasedev->ops = &vfio_platform_ops;
645 trace_vfio_platform_realize(vbasedev->sysfsdev ?
646 vbasedev->sysfsdev : vbasedev->name,
647 vdev->compat);
649 ret = vfio_base_device_init(vbasedev, errp);
650 if (ret) {
651 goto out;
654 for (i = 0; i < vbasedev->num_regions; i++) {
655 if (vfio_region_mmap(vdev->regions[i])) {
656 error_report("%s mmap unsupported. Performance may be slow",
657 memory_region_name(vdev->regions[i]->mem));
659 sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
661 out:
662 if (!ret) {
663 return;
666 if (vdev->vbasedev.name) {
667 error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
668 } else {
669 error_prepend(errp, "vfio error: ");
673 static const VMStateDescription vfio_platform_vmstate = {
674 .name = TYPE_VFIO_PLATFORM,
675 .unmigratable = 1,
678 static Property vfio_platform_dev_properties[] = {
679 DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
680 DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
681 DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
682 DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
683 mmap_timeout, 1100),
684 DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
685 DEFINE_PROP_END_OF_LIST(),
688 static void vfio_platform_class_init(ObjectClass *klass, void *data)
690 DeviceClass *dc = DEVICE_CLASS(klass);
691 SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
693 dc->realize = vfio_platform_realize;
694 dc->props = vfio_platform_dev_properties;
695 dc->vmsd = &vfio_platform_vmstate;
696 dc->desc = "VFIO-based platform device assignment";
697 sbc->connect_irq_notifier = vfio_start_irqfd_injection;
698 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
701 static const TypeInfo vfio_platform_dev_info = {
702 .name = TYPE_VFIO_PLATFORM,
703 .parent = TYPE_SYS_BUS_DEVICE,
704 .instance_size = sizeof(VFIOPlatformDevice),
705 .class_init = vfio_platform_class_init,
706 .class_size = sizeof(VFIOPlatformDeviceClass),
707 .abstract = true,
710 static void register_vfio_platform_dev_type(void)
712 type_register_static(&vfio_platform_dev_info);
715 type_init(register_vfio_platform_dev_type)