2 * QEMU vfio-user-server server object
4 * Copyright © 2022 Oracle and/or its affiliates.
6 * This work is licensed under the terms of the GNU GPL-v2, version 2 or later.
8 * See the COPYING file in the top-level directory.
14 * -machine x-remote,vfio-user=on,auto-shutdown=on
15 * -device <PCI-device>,id=<pci-dev-id>
16 * -object x-vfio-user-server,id=<id>,type=unix,path=<socket-path>,
19 * Note that x-vfio-user-server object must be used with x-remote machine only.
20 * This server could only support PCI devices for now.
22 * type - SocketAddress type - presently "unix" alone is supported. Required
25 * path - named unix socket, it will be created by the server. It is
28 * device - id of a device on the server, a required option. PCI devices
29 * alone are supported presently.
31 * notes - x-vfio-user-server could block IO and monitor during the
32 * initialization phase.
35 #include "qemu/osdep.h"
37 #include "qom/object.h"
38 #include "qom/object_interfaces.h"
39 #include "qemu/error-report.h"
41 #include "sysemu/runstate.h"
42 #include "hw/boards.h"
43 #include "hw/remote/machine.h"
44 #include "qapi/error.h"
45 #include "qapi/qapi-visit-sockets.h"
46 #include "qapi/qapi-events-misc.h"
47 #include "qemu/notify.h"
48 #include "qemu/thread.h"
49 #include "qemu/main-loop.h"
50 #include "sysemu/sysemu.h"
51 #include "libvfio-user.h"
52 #include "hw/qdev-core.h"
53 #include "hw/pci/pci.h"
54 #include "qemu/timer.h"
55 #include "exec/memory.h"
56 #include "hw/pci/msi.h"
57 #include "hw/pci/msix.h"
58 #include "hw/remote/vfio-user-obj.h"
60 #define TYPE_VFU_OBJECT "x-vfio-user-server"
61 OBJECT_DECLARE_TYPE(VfuObject
, VfuObjectClass
, VFU_OBJECT
)
64 * VFU_OBJECT_ERROR - reports an error message. If auto_shutdown
65 * is set, it aborts the machine on error. Otherwise, it logs an
66 * error message without aborting.
68 #define VFU_OBJECT_ERROR(o, fmt, ...) \
70 if (vfu_object_auto_shutdown()) { \
71 error_setg(&error_abort, (fmt), ## __VA_ARGS__); \
73 error_report((fmt), ## __VA_ARGS__); \
77 struct VfuObjectClass {
78 ObjectClass parent_class
;
87 SocketAddress
*socket
;
93 Notifier machine_done
;
99 Error
*unplug_blocker
;
103 MSITriggerFunc
*default_msi_trigger
;
104 MSIPrepareMessageFunc
*default_msi_prepare_message
;
105 MSIxPrepareMessageFunc
*default_msix_prepare_message
;
108 static void vfu_object_init_ctx(VfuObject
*o
, Error
**errp
);
110 static bool vfu_object_auto_shutdown(void)
112 bool auto_shutdown
= true;
113 Error
*local_err
= NULL
;
115 if (!current_machine
) {
116 return auto_shutdown
;
119 auto_shutdown
= object_property_get_bool(OBJECT(current_machine
),
124 * local_err would be set if no such property exists - safe to ignore.
125 * Unlikely scenario as auto-shutdown is always defined for
126 * TYPE_REMOTE_MACHINE, and TYPE_VFU_OBJECT only works with
127 * TYPE_REMOTE_MACHINE
130 auto_shutdown
= true;
131 error_free(local_err
);
134 return auto_shutdown
;
137 static void vfu_object_set_socket(Object
*obj
, Visitor
*v
, const char *name
,
138 void *opaque
, Error
**errp
)
140 VfuObject
*o
= VFU_OBJECT(obj
);
143 error_setg(errp
, "vfu: Unable to set socket property - server busy");
147 qapi_free_SocketAddress(o
->socket
);
151 visit_type_SocketAddress(v
, name
, &o
->socket
, errp
);
153 if (o
->socket
->type
!= SOCKET_ADDRESS_TYPE_UNIX
) {
154 error_setg(errp
, "vfu: Unsupported socket type - %s",
155 SocketAddressType_str(o
->socket
->type
));
156 qapi_free_SocketAddress(o
->socket
);
161 trace_vfu_prop("socket", o
->socket
->u
.q_unix
.path
);
163 vfu_object_init_ctx(o
, errp
);
166 static void vfu_object_set_device(Object
*obj
, const char *str
, Error
**errp
)
168 VfuObject
*o
= VFU_OBJECT(obj
);
171 error_setg(errp
, "vfu: Unable to set device property - server busy");
177 o
->device
= g_strdup(str
);
179 trace_vfu_prop("device", str
);
181 vfu_object_init_ctx(o
, errp
);
184 static void vfu_object_ctx_run(void *opaque
)
186 VfuObject
*o
= opaque
;
188 char *vfu_path
, *pci_dev_path
;
192 ret
= vfu_run_ctx(o
->vfu_ctx
);
194 if (errno
== EINTR
) {
196 } else if (errno
== ENOTCONN
) {
197 vfu_id
= object_get_canonical_path_component(OBJECT(o
));
198 vfu_path
= object_get_canonical_path(OBJECT(o
));
199 g_assert(o
->pci_dev
);
200 pci_dev_path
= object_get_canonical_path(OBJECT(o
->pci_dev
));
201 /* o->device is a required property and is non-NULL here */
203 qapi_event_send_vfu_client_hangup(vfu_id
, vfu_path
,
204 o
->device
, pci_dev_path
);
205 qemu_set_fd_handler(o
->vfu_poll_fd
, NULL
, NULL
, NULL
);
207 object_unparent(OBJECT(o
));
209 g_free(pci_dev_path
);
212 VFU_OBJECT_ERROR(o
, "vfu: Failed to run device %s - %s",
213 o
->device
, strerror(errno
));
220 static void vfu_object_attach_ctx(void *opaque
)
222 VfuObject
*o
= opaque
;
226 qemu_set_fd_handler(o
->vfu_poll_fd
, NULL
, NULL
, NULL
);
228 pfds
[0].fd
= o
->vfu_poll_fd
;
229 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
232 ret
= vfu_attach_ctx(o
->vfu_ctx
);
233 if (ret
< 0 && (errno
== EAGAIN
|| errno
== EWOULDBLOCK
)) {
235 * vfu_object_attach_ctx can block QEMU's main loop
236 * during attach - the monitor and other IO
237 * could be unresponsive during this time.
239 (void)qemu_poll_ns(pfds
, 1, 500 * (int64_t)SCALE_MS
);
241 } else if (ret
< 0) {
242 VFU_OBJECT_ERROR(o
, "vfu: Failed to attach device %s to context - %s",
243 o
->device
, strerror(errno
));
247 o
->vfu_poll_fd
= vfu_get_poll_fd(o
->vfu_ctx
);
248 if (o
->vfu_poll_fd
< 0) {
249 VFU_OBJECT_ERROR(o
, "vfu: Failed to get poll fd %s", o
->device
);
253 qemu_set_fd_handler(o
->vfu_poll_fd
, vfu_object_ctx_run
, NULL
, o
);
256 static ssize_t
vfu_object_cfg_access(vfu_ctx_t
*vfu_ctx
, char * const buf
,
257 size_t count
, loff_t offset
,
260 VfuObject
*o
= vfu_get_private(vfu_ctx
);
261 uint32_t pci_access_width
= sizeof(uint32_t);
262 size_t bytes
= count
;
268 * Writes to the BAR registers would trigger an update to the
269 * global Memory and IO AddressSpaces. But the remote device
270 * never uses the global AddressSpaces, therefore overlapping
271 * memory regions are not a problem
274 len
= (bytes
> pci_access_width
) ? pci_access_width
: bytes
;
276 memcpy(&val
, ptr
, len
);
277 pci_host_config_write_common(o
->pci_dev
, offset
,
278 pci_config_size(o
->pci_dev
),
280 trace_vfu_cfg_write(offset
, val
);
282 val
= pci_host_config_read_common(o
->pci_dev
, offset
,
283 pci_config_size(o
->pci_dev
), len
);
284 memcpy(ptr
, &val
, len
);
285 trace_vfu_cfg_read(offset
, val
);
295 static void dma_register(vfu_ctx_t
*vfu_ctx
, vfu_dma_info_t
*info
)
297 VfuObject
*o
= vfu_get_private(vfu_ctx
);
298 AddressSpace
*dma_as
= NULL
;
299 MemoryRegion
*subregion
= NULL
;
300 g_autofree
char *name
= NULL
;
301 struct iovec
*iov
= &info
->iova
;
307 name
= g_strdup_printf("mem-%s-%"PRIx64
"", o
->device
,
308 (uint64_t)info
->vaddr
);
310 subregion
= g_new0(MemoryRegion
, 1);
312 memory_region_init_ram_ptr(subregion
, NULL
, name
,
313 iov
->iov_len
, info
->vaddr
);
315 dma_as
= pci_device_iommu_address_space(o
->pci_dev
);
317 memory_region_add_subregion(dma_as
->root
, (hwaddr
)iov
->iov_base
, subregion
);
319 trace_vfu_dma_register((uint64_t)iov
->iov_base
, iov
->iov_len
);
322 static void dma_unregister(vfu_ctx_t
*vfu_ctx
, vfu_dma_info_t
*info
)
324 VfuObject
*o
= vfu_get_private(vfu_ctx
);
325 AddressSpace
*dma_as
= NULL
;
326 MemoryRegion
*mr
= NULL
;
329 mr
= memory_region_from_host(info
->vaddr
, &offset
);
334 dma_as
= pci_device_iommu_address_space(o
->pci_dev
);
336 memory_region_del_subregion(dma_as
->root
, mr
);
338 object_unparent((OBJECT(mr
)));
340 trace_vfu_dma_unregister((uint64_t)info
->iova
.iov_base
);
343 static int vfu_object_mr_rw(MemoryRegion
*mr
, uint8_t *buf
, hwaddr offset
,
344 hwaddr size
, const bool is_write
)
347 bool release_lock
= false;
348 uint8_t *ram_ptr
= NULL
;
353 if (memory_access_is_direct(mr
, is_write
)) {
355 * Some devices expose a PCI expansion ROM, which could be buffer
356 * based as compared to other regions which are primarily based on
357 * MemoryRegionOps. memory_region_find() would already check
358 * for buffer overflow, we don't need to repeat it here.
360 ram_ptr
= memory_region_get_ram_ptr(mr
);
363 memcpy((ram_ptr
+ offset
), buf
, size
);
365 memcpy(buf
, (ram_ptr
+ offset
), size
);
373 * The read/write logic used below is similar to the ones in
374 * flatview_read/write_continue()
376 release_lock
= prepare_mmio_access(mr
);
378 access_size
= memory_access_size(mr
, size
, offset
);
381 val
= ldn_he_p(ptr
, access_size
);
383 result
= memory_region_dispatch_write(mr
, offset
, val
,
384 size_memop(access_size
),
385 MEMTXATTRS_UNSPECIFIED
);
387 result
= memory_region_dispatch_read(mr
, offset
, &val
,
388 size_memop(access_size
),
389 MEMTXATTRS_UNSPECIFIED
);
391 stn_he_p(ptr
, access_size
, val
);
395 qemu_mutex_unlock_iothread();
396 release_lock
= false;
399 if (result
!= MEMTX_OK
) {
405 offset
+= access_size
;
411 static size_t vfu_object_bar_rw(PCIDevice
*pci_dev
, int pci_bar
,
412 hwaddr bar_offset
, char * const buf
,
413 hwaddr len
, const bool is_write
)
415 MemoryRegionSection section
= { 0 };
416 uint8_t *ptr
= (uint8_t *)buf
;
417 MemoryRegion
*section_mr
= NULL
;
418 uint64_t section_size
;
419 hwaddr section_offset
;
423 section
= memory_region_find(pci_dev
->io_regions
[pci_bar
].memory
,
427 warn_report("vfu: invalid address 0x%"PRIx64
"", bar_offset
);
431 section_mr
= section
.mr
;
432 section_offset
= section
.offset_within_region
;
433 section_size
= int128_get64(section
.size
);
435 if (is_write
&& section_mr
->readonly
) {
436 warn_report("vfu: attempting to write to readonly region in "
437 "bar %d - [0x%"PRIx64
" - 0x%"PRIx64
"]",
439 (bar_offset
+ section_size
));
440 memory_region_unref(section_mr
);
444 if (vfu_object_mr_rw(section_mr
, ptr
, section_offset
,
445 section_size
, is_write
)) {
446 warn_report("vfu: failed to %s "
447 "[0x%"PRIx64
" - 0x%"PRIx64
"] in bar %d",
448 is_write
? "write to" : "read from", bar_offset
,
449 (bar_offset
+ section_size
), pci_bar
);
450 memory_region_unref(section_mr
);
454 size
+= section_size
;
455 bar_offset
+= section_size
;
459 memory_region_unref(section_mr
);
466 * VFU_OBJECT_BAR_HANDLER - macro for defining handlers for PCI BARs.
468 * To create handler for BAR number 2, VFU_OBJECT_BAR_HANDLER(2) would
469 * define vfu_object_bar2_handler
471 #define VFU_OBJECT_BAR_HANDLER(BAR_NO) \
472 static ssize_t vfu_object_bar##BAR_NO##_handler(vfu_ctx_t *vfu_ctx, \
473 char * const buf, size_t count, \
474 loff_t offset, const bool is_write) \
476 VfuObject *o = vfu_get_private(vfu_ctx); \
477 PCIDevice *pci_dev = o->pci_dev; \
479 return vfu_object_bar_rw(pci_dev, BAR_NO, offset, \
480 buf, count, is_write); \
483 VFU_OBJECT_BAR_HANDLER(0)
484 VFU_OBJECT_BAR_HANDLER(1)
485 VFU_OBJECT_BAR_HANDLER(2)
486 VFU_OBJECT_BAR_HANDLER(3)
487 VFU_OBJECT_BAR_HANDLER(4)
488 VFU_OBJECT_BAR_HANDLER(5)
489 VFU_OBJECT_BAR_HANDLER(6)
491 static vfu_region_access_cb_t
*vfu_object_bar_handlers
[PCI_NUM_REGIONS
] = {
492 &vfu_object_bar0_handler
,
493 &vfu_object_bar1_handler
,
494 &vfu_object_bar2_handler
,
495 &vfu_object_bar3_handler
,
496 &vfu_object_bar4_handler
,
497 &vfu_object_bar5_handler
,
498 &vfu_object_bar6_handler
,
502 * vfu_object_register_bars - Identify active BAR regions of pdev and setup
503 * callbacks to handle read/write accesses
505 static void vfu_object_register_bars(vfu_ctx_t
*vfu_ctx
, PCIDevice
*pdev
)
507 int flags
= VFU_REGION_FLAG_RW
;
510 for (i
= 0; i
< PCI_NUM_REGIONS
; i
++) {
511 if (!pdev
->io_regions
[i
].size
) {
515 if ((i
== VFU_PCI_DEV_ROM_REGION_IDX
) ||
516 pdev
->io_regions
[i
].memory
->readonly
) {
517 flags
&= ~VFU_REGION_FLAG_WRITE
;
520 vfu_setup_region(vfu_ctx
, VFU_PCI_DEV_BAR0_REGION_IDX
+ i
,
521 (size_t)pdev
->io_regions
[i
].size
,
522 vfu_object_bar_handlers
[i
],
523 flags
, NULL
, 0, -1, 0);
525 trace_vfu_bar_register(i
, pdev
->io_regions
[i
].addr
,
526 pdev
->io_regions
[i
].size
);
530 static int vfu_object_map_irq(PCIDevice
*pci_dev
, int intx
)
532 int pci_bdf
= PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev
)),
538 static void vfu_object_set_irq(void *opaque
, int pirq
, int level
)
540 PCIBus
*pci_bus
= opaque
;
541 PCIDevice
*pci_dev
= NULL
;
542 vfu_ctx_t
*vfu_ctx
= NULL
;
543 int pci_bus_num
, devfn
;
546 pci_bus_num
= PCI_BUS_NUM(pirq
);
547 devfn
= PCI_BDF_TO_DEVFN(pirq
);
550 * pci_find_device() performs at O(1) if the device is attached
551 * to the root PCI bus. Whereas, if the device is attached to a
552 * secondary PCI bus (such as when a root port is involved),
553 * finding the parent PCI bus could take O(n)
555 pci_dev
= pci_find_device(pci_bus
, pci_bus_num
, devfn
);
557 vfu_ctx
= pci_dev
->irq_opaque
;
561 vfu_irq_trigger(vfu_ctx
, 0);
565 static MSIMessage
vfu_object_msi_prepare_msg(PCIDevice
*pci_dev
,
576 static void vfu_object_msi_trigger(PCIDevice
*pci_dev
, MSIMessage msg
)
578 vfu_ctx_t
*vfu_ctx
= pci_dev
->irq_opaque
;
580 vfu_irq_trigger(vfu_ctx
, msg
.data
);
583 static void vfu_object_setup_msi_cbs(VfuObject
*o
)
585 o
->default_msi_trigger
= o
->pci_dev
->msi_trigger
;
586 o
->default_msi_prepare_message
= o
->pci_dev
->msi_prepare_message
;
587 o
->default_msix_prepare_message
= o
->pci_dev
->msix_prepare_message
;
589 o
->pci_dev
->msi_trigger
= vfu_object_msi_trigger
;
590 o
->pci_dev
->msi_prepare_message
= vfu_object_msi_prepare_msg
;
591 o
->pci_dev
->msix_prepare_message
= vfu_object_msi_prepare_msg
;
594 static void vfu_object_restore_msi_cbs(VfuObject
*o
)
596 o
->pci_dev
->msi_trigger
= o
->default_msi_trigger
;
597 o
->pci_dev
->msi_prepare_message
= o
->default_msi_prepare_message
;
598 o
->pci_dev
->msix_prepare_message
= o
->default_msix_prepare_message
;
601 static void vfu_msix_irq_state(vfu_ctx_t
*vfu_ctx
, uint32_t start
,
602 uint32_t count
, bool mask
)
604 VfuObject
*o
= vfu_get_private(vfu_ctx
);
608 for (vector
= start
; vector
< count
; vector
++) {
609 msix_set_mask(o
->pci_dev
, vector
, mask
, &err
);
611 VFU_OBJECT_ERROR(o
, "vfu: %s: %s", o
->device
,
612 error_get_pretty(err
));
619 static void vfu_msi_irq_state(vfu_ctx_t
*vfu_ctx
, uint32_t start
,
620 uint32_t count
, bool mask
)
622 VfuObject
*o
= vfu_get_private(vfu_ctx
);
626 for (vector
= start
; vector
< count
; vector
++) {
627 msi_set_mask(o
->pci_dev
, vector
, mask
, &err
);
629 VFU_OBJECT_ERROR(o
, "vfu: %s: %s", o
->device
,
630 error_get_pretty(err
));
637 static int vfu_object_setup_irqs(VfuObject
*o
, PCIDevice
*pci_dev
)
639 vfu_ctx_t
*vfu_ctx
= o
->vfu_ctx
;
642 ret
= vfu_setup_device_nr_irqs(vfu_ctx
, VFU_DEV_INTX_IRQ
, 1);
647 if (msix_nr_vectors_allocated(pci_dev
)) {
648 ret
= vfu_setup_device_nr_irqs(vfu_ctx
, VFU_DEV_MSIX_IRQ
,
649 msix_nr_vectors_allocated(pci_dev
));
650 vfu_setup_irq_state_callback(vfu_ctx
, VFU_DEV_MSIX_IRQ
,
651 &vfu_msix_irq_state
);
652 } else if (msi_nr_vectors_allocated(pci_dev
)) {
653 ret
= vfu_setup_device_nr_irqs(vfu_ctx
, VFU_DEV_MSI_IRQ
,
654 msi_nr_vectors_allocated(pci_dev
));
655 vfu_setup_irq_state_callback(vfu_ctx
, VFU_DEV_MSI_IRQ
,
663 vfu_object_setup_msi_cbs(o
);
665 pci_dev
->irq_opaque
= vfu_ctx
;
670 void vfu_object_set_bus_irq(PCIBus
*pci_bus
)
672 int bus_num
= pci_bus_num(pci_bus
);
673 int max_bdf
= PCI_BUILD_BDF(bus_num
, PCI_DEVFN_MAX
- 1);
675 pci_bus_irqs(pci_bus
, vfu_object_set_irq
, vfu_object_map_irq
, pci_bus
,
679 static int vfu_object_device_reset(vfu_ctx_t
*vfu_ctx
, vfu_reset_type_t type
)
681 VfuObject
*o
= vfu_get_private(vfu_ctx
);
683 /* vfu_object_ctx_run() handles lost connection */
684 if (type
== VFU_RESET_LOST_CONN
) {
688 qdev_reset_all(DEVICE(o
->pci_dev
));
694 * TYPE_VFU_OBJECT depends on the availability of the 'socket' and 'device'
695 * properties. It also depends on devices instantiated in QEMU. These
696 * dependencies are not available during the instance_init phase of this
697 * object's life-cycle. As such, the server is initialized after the
698 * machine is setup. machine_init_done_notifier notifies TYPE_VFU_OBJECT
699 * when the machine is setup, and the dependencies are available.
701 static void vfu_object_machine_done(Notifier
*notifier
, void *data
)
703 VfuObject
*o
= container_of(notifier
, VfuObject
, machine_done
);
706 vfu_object_init_ctx(o
, &err
);
709 error_propagate(&error_abort
, err
);
714 * vfu_object_init_ctx: Create and initialize libvfio-user context. Add
715 * an unplug blocker for the associated PCI device. Setup a FD handler
716 * to process incoming messages in the context's socket.
718 * The socket and device properties are mandatory, and this function
719 * will not create the context without them - the setters for these
720 * properties should call this function when the property is set. The
721 * machine should also be ready when this function is invoked - it is
722 * because QEMU objects are initialized before devices, and the
723 * associated PCI device wouldn't be available at the object
724 * initialization time. Until these conditions are satisfied, this
725 * function would return early without performing any task.
727 static void vfu_object_init_ctx(VfuObject
*o
, Error
**errp
)
730 DeviceState
*dev
= NULL
;
731 vfu_pci_type_t pci_type
= VFU_PCI_TYPE_CONVENTIONAL
;
734 if (o
->vfu_ctx
|| !o
->socket
|| !o
->device
||
735 !phase_check(PHASE_MACHINE_READY
)) {
740 error_propagate(errp
, o
->err
);
745 o
->vfu_ctx
= vfu_create_ctx(VFU_TRANS_SOCK
, o
->socket
->u
.q_unix
.path
,
746 LIBVFIO_USER_FLAG_ATTACH_NB
,
747 o
, VFU_DEV_TYPE_PCI
);
748 if (o
->vfu_ctx
== NULL
) {
749 error_setg(errp
, "vfu: Failed to create context - %s", strerror(errno
));
753 dev
= qdev_find_recursive(sysbus_get_default(), o
->device
);
755 error_setg(errp
, "vfu: Device %s not found", o
->device
);
759 if (!object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
760 error_setg(errp
, "vfu: %s not a PCI device", o
->device
);
764 o
->pci_dev
= PCI_DEVICE(dev
);
766 object_ref(OBJECT(o
->pci_dev
));
768 if (pci_is_express(o
->pci_dev
)) {
769 pci_type
= VFU_PCI_TYPE_EXPRESS
;
772 ret
= vfu_pci_init(o
->vfu_ctx
, pci_type
, PCI_HEADER_TYPE_NORMAL
, 0);
775 "vfu: Failed to attach PCI device %s to context - %s",
776 o
->device
, strerror(errno
));
780 error_setg(&o
->unplug_blocker
,
781 "vfu: %s for %s must be deleted before unplugging",
782 TYPE_VFU_OBJECT
, o
->device
);
783 qdev_add_unplug_blocker(DEVICE(o
->pci_dev
), o
->unplug_blocker
);
785 ret
= vfu_setup_region(o
->vfu_ctx
, VFU_PCI_DEV_CFG_REGION_IDX
,
786 pci_config_size(o
->pci_dev
), &vfu_object_cfg_access
,
787 VFU_REGION_FLAG_RW
| VFU_REGION_FLAG_ALWAYS_CB
,
791 "vfu: Failed to setup config space handlers for %s- %s",
792 o
->device
, strerror(errno
));
796 ret
= vfu_setup_device_dma(o
->vfu_ctx
, &dma_register
, &dma_unregister
);
798 error_setg(errp
, "vfu: Failed to setup DMA handlers for %s",
803 vfu_object_register_bars(o
->vfu_ctx
, o
->pci_dev
);
805 ret
= vfu_object_setup_irqs(o
, o
->pci_dev
);
807 error_setg(errp
, "vfu: Failed to setup interrupts for %s",
812 ret
= vfu_setup_device_reset_cb(o
->vfu_ctx
, &vfu_object_device_reset
);
814 error_setg(errp
, "vfu: Failed to setup reset callback");
818 ret
= vfu_realize_ctx(o
->vfu_ctx
);
820 error_setg(errp
, "vfu: Failed to realize device %s- %s",
821 o
->device
, strerror(errno
));
825 o
->vfu_poll_fd
= vfu_get_poll_fd(o
->vfu_ctx
);
826 if (o
->vfu_poll_fd
< 0) {
827 error_setg(errp
, "vfu: Failed to get poll fd %s", o
->device
);
831 qemu_set_fd_handler(o
->vfu_poll_fd
, vfu_object_attach_ctx
, NULL
, o
);
836 vfu_destroy_ctx(o
->vfu_ctx
);
837 if (o
->unplug_blocker
&& o
->pci_dev
) {
838 qdev_del_unplug_blocker(DEVICE(o
->pci_dev
), o
->unplug_blocker
);
839 error_free(o
->unplug_blocker
);
840 o
->unplug_blocker
= NULL
;
843 vfu_object_restore_msi_cbs(o
);
844 o
->pci_dev
->irq_opaque
= NULL
;
845 object_unref(OBJECT(o
->pci_dev
));
851 static void vfu_object_init(Object
*obj
)
853 VfuObjectClass
*k
= VFU_OBJECT_GET_CLASS(obj
);
854 VfuObject
*o
= VFU_OBJECT(obj
);
858 if (!object_dynamic_cast(OBJECT(current_machine
), TYPE_REMOTE_MACHINE
)) {
859 error_setg(&o
->err
, "vfu: %s only compatible with %s machine",
860 TYPE_VFU_OBJECT
, TYPE_REMOTE_MACHINE
);
864 if (!phase_check(PHASE_MACHINE_READY
)) {
865 o
->machine_done
.notify
= vfu_object_machine_done
;
866 qemu_add_machine_init_done_notifier(&o
->machine_done
);
872 static void vfu_object_finalize(Object
*obj
)
874 VfuObjectClass
*k
= VFU_OBJECT_GET_CLASS(obj
);
875 VfuObject
*o
= VFU_OBJECT(obj
);
879 qapi_free_SocketAddress(o
->socket
);
883 if (o
->vfu_poll_fd
!= -1) {
884 qemu_set_fd_handler(o
->vfu_poll_fd
, NULL
, NULL
, NULL
);
889 vfu_destroy_ctx(o
->vfu_ctx
);
897 if (o
->unplug_blocker
&& o
->pci_dev
) {
898 qdev_del_unplug_blocker(DEVICE(o
->pci_dev
), o
->unplug_blocker
);
899 error_free(o
->unplug_blocker
);
900 o
->unplug_blocker
= NULL
;
904 vfu_object_restore_msi_cbs(o
);
905 o
->pci_dev
->irq_opaque
= NULL
;
906 object_unref(OBJECT(o
->pci_dev
));
910 if (!k
->nr_devs
&& vfu_object_auto_shutdown()) {
911 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
914 if (o
->machine_done
.notify
) {
915 qemu_remove_machine_init_done_notifier(&o
->machine_done
);
916 o
->machine_done
.notify
= NULL
;
920 static void vfu_object_class_init(ObjectClass
*klass
, void *data
)
922 VfuObjectClass
*k
= VFU_OBJECT_CLASS(klass
);
926 object_class_property_add(klass
, "socket", "SocketAddress", NULL
,
927 vfu_object_set_socket
, NULL
, NULL
);
928 object_class_property_set_description(klass
, "socket",
930 "(ex: type=unix,path=/tmp/sock). "
931 "Only UNIX is presently supported");
932 object_class_property_add_str(klass
, "device", NULL
,
933 vfu_object_set_device
);
934 object_class_property_set_description(klass
, "device",
935 "device ID - only PCI devices "
936 "are presently supported");
939 static const TypeInfo vfu_object_info
= {
940 .name
= TYPE_VFU_OBJECT
,
941 .parent
= TYPE_OBJECT
,
942 .instance_size
= sizeof(VfuObject
),
943 .instance_init
= vfu_object_init
,
944 .instance_finalize
= vfu_object_finalize
,
945 .class_size
= sizeof(VfuObjectClass
),
946 .class_init
= vfu_object_class_init
,
947 .interfaces
= (InterfaceInfo
[]) {
948 { TYPE_USER_CREATABLE
},
953 static void vfu_register_types(void)
955 type_register_static(&vfu_object_info
);
958 type_init(vfu_register_types
);