2 * QEMU PowerPC sPAPR XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "hw/ppc/spapr.h"
18 #include "hw/ppc/spapr_cpu_core.h"
19 #include "hw/ppc/spapr_xive.h"
20 #include "hw/ppc/xive.h"
23 #include <sys/ioctl.h>
26 * Helpers for CPU hotplug
28 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
30 typedef struct KVMEnabledCPU
{
31 unsigned long vcpu_id
;
32 QLIST_ENTRY(KVMEnabledCPU
) node
;
35 static QLIST_HEAD(, KVMEnabledCPU
)
36 kvm_enabled_cpus
= QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus
);
38 static bool kvm_cpu_is_enabled(CPUState
*cs
)
40 KVMEnabledCPU
*enabled_cpu
;
41 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
43 QLIST_FOREACH(enabled_cpu
, &kvm_enabled_cpus
, node
) {
44 if (enabled_cpu
->vcpu_id
== vcpu_id
) {
51 static void kvm_cpu_enable(CPUState
*cs
)
53 KVMEnabledCPU
*enabled_cpu
;
54 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
56 enabled_cpu
= g_malloc(sizeof(*enabled_cpu
));
57 enabled_cpu
->vcpu_id
= vcpu_id
;
58 QLIST_INSERT_HEAD(&kvm_enabled_cpus
, enabled_cpu
, node
);
61 static void kvm_cpu_disable_all(void)
63 KVMEnabledCPU
*enabled_cpu
, *next
;
65 QLIST_FOREACH_SAFE(enabled_cpu
, &kvm_enabled_cpus
, node
, next
) {
66 QLIST_REMOVE(enabled_cpu
, node
);
72 * XIVE Thread Interrupt Management context (KVM)
75 static void kvmppc_xive_cpu_set_state(XiveTCTX
*tctx
, Error
**errp
)
80 /* word0 and word1 of the OS ring. */
81 state
[0] = *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]);
83 ret
= kvm_set_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
85 error_setg_errno(errp
, errno
,
86 "XIVE: could not restore KVM state of CPU %ld",
87 kvm_arch_vcpu_id(tctx
->cs
));
91 void kvmppc_xive_cpu_get_state(XiveTCTX
*tctx
, Error
**errp
)
93 SpaprXive
*xive
= SPAPR_MACHINE(qdev_get_machine())->xive
;
94 uint64_t state
[2] = { 0 };
97 /* The KVM XIVE device is not in use */
102 ret
= kvm_get_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
104 error_setg_errno(errp
, errno
,
105 "XIVE: could not capture KVM state of CPU %ld",
106 kvm_arch_vcpu_id(tctx
->cs
));
110 /* word0 and word1 of the OS ring. */
111 *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]) = state
[0];
119 static void kvmppc_xive_cpu_do_synchronize_state(CPUState
*cpu
,
122 XiveCpuGetState
*s
= arg
.host_ptr
;
124 kvmppc_xive_cpu_get_state(s
->tctx
, &s
->err
);
127 void kvmppc_xive_cpu_synchronize_state(XiveTCTX
*tctx
, Error
**errp
)
129 XiveCpuGetState s
= {
135 * Kick the vCPU to make sure they are available for the KVM ioctl.
137 run_on_cpu(tctx
->cs
, kvmppc_xive_cpu_do_synchronize_state
,
138 RUN_ON_CPU_HOST_PTR(&s
));
141 error_propagate(errp
, s
.err
);
146 void kvmppc_xive_cpu_connect(XiveTCTX
*tctx
, Error
**errp
)
148 SpaprXive
*xive
= SPAPR_MACHINE(qdev_get_machine())->xive
;
149 unsigned long vcpu_id
;
152 /* The KVM XIVE device is not in use */
153 if (xive
->fd
== -1) {
157 /* Check if CPU was hot unplugged and replugged. */
158 if (kvm_cpu_is_enabled(tctx
->cs
)) {
162 vcpu_id
= kvm_arch_vcpu_id(tctx
->cs
);
164 ret
= kvm_vcpu_enable_cap(tctx
->cs
, KVM_CAP_PPC_IRQ_XIVE
, 0, xive
->fd
,
167 error_setg(errp
, "XIVE: unable to connect CPU%ld to KVM device: %s",
168 vcpu_id
, strerror(errno
));
172 kvm_cpu_enable(tctx
->cs
);
176 * XIVE Interrupt Source (KVM)
179 void kvmppc_xive_set_source_config(SpaprXive
*xive
, uint32_t lisn
, XiveEAS
*eas
,
189 Error
*local_err
= NULL
;
191 assert(xive_eas_is_valid(eas
));
193 end_idx
= xive_get_field64(EAS_END_INDEX
, eas
->w
);
194 end_blk
= xive_get_field64(EAS_END_BLOCK
, eas
->w
);
195 eisn
= xive_get_field64(EAS_END_DATA
, eas
->w
);
196 masked
= xive_eas_is_masked(eas
);
198 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
200 kvm_src
= priority
<< KVM_XIVE_SOURCE_PRIORITY_SHIFT
&
201 KVM_XIVE_SOURCE_PRIORITY_MASK
;
202 kvm_src
|= server
<< KVM_XIVE_SOURCE_SERVER_SHIFT
&
203 KVM_XIVE_SOURCE_SERVER_MASK
;
204 kvm_src
|= ((uint64_t) masked
<< KVM_XIVE_SOURCE_MASKED_SHIFT
) &
205 KVM_XIVE_SOURCE_MASKED_MASK
;
206 kvm_src
|= ((uint64_t)eisn
<< KVM_XIVE_SOURCE_EISN_SHIFT
) &
207 KVM_XIVE_SOURCE_EISN_MASK
;
209 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_CONFIG
, lisn
,
210 &kvm_src
, true, &local_err
);
212 error_propagate(errp
, local_err
);
217 void kvmppc_xive_sync_source(SpaprXive
*xive
, uint32_t lisn
, Error
**errp
)
219 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_SYNC
, lisn
,
224 * At reset, the interrupt sources are simply created and MASKED. We
225 * only need to inform the KVM XIVE device about their type: LSI or
228 void kvmppc_xive_source_reset_one(XiveSource
*xsrc
, int srcno
, Error
**errp
)
230 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
233 /* The KVM XIVE device is not in use */
234 if (xive
->fd
== -1) {
238 if (xive_source_irq_is_lsi(xsrc
, srcno
)) {
239 state
|= KVM_XIVE_LEVEL_SENSITIVE
;
240 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
241 state
|= KVM_XIVE_LEVEL_ASSERTED
;
245 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE
, srcno
, &state
,
249 static void kvmppc_xive_source_reset(XiveSource
*xsrc
, Error
**errp
)
253 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
254 Error
*local_err
= NULL
;
256 kvmppc_xive_source_reset_one(xsrc
, i
, &local_err
);
258 error_propagate(errp
, local_err
);
265 * This is used to perform the magic loads on the ESB pages, described
268 * Memory barriers should not be needed for loads (no store for now).
270 static uint64_t xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
271 uint64_t data
, bool write
)
273 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_mgmt(xsrc
, srcno
) +
277 *addr
= cpu_to_be64(data
);
280 /* Prevent the compiler from optimizing away the load */
281 volatile uint64_t value
= be64_to_cpu(*addr
);
286 static uint8_t xive_esb_read(XiveSource
*xsrc
, int srcno
, uint32_t offset
)
288 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0) & 0x3;
291 static void xive_esb_trigger(XiveSource
*xsrc
, int srcno
)
293 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_page(xsrc
, srcno
);
298 uint64_t kvmppc_xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
299 uint64_t data
, bool write
)
302 return xive_esb_rw(xsrc
, srcno
, offset
, data
, 1);
306 * Special Load EOI handling for LSI sources. Q bit is never set
307 * and the interrupt should be re-triggered if the level is still
310 if (xive_source_irq_is_lsi(xsrc
, srcno
) &&
311 offset
== XIVE_ESB_LOAD_EOI
) {
312 xive_esb_read(xsrc
, srcno
, XIVE_ESB_SET_PQ_00
);
313 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
314 xive_esb_trigger(xsrc
, srcno
);
318 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0);
322 static void kvmppc_xive_source_get_state(XiveSource
*xsrc
)
326 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
327 /* Perform a load without side effect to retrieve the PQ bits */
328 uint8_t pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
330 /* and save PQ locally */
331 xive_source_esb_set(xsrc
, i
, pq
);
335 void kvmppc_xive_source_set_irq(void *opaque
, int srcno
, int val
)
337 XiveSource
*xsrc
= opaque
;
338 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
339 struct kvm_irq_level args
;
342 /* The KVM XIVE device should be in use */
343 assert(xive
->fd
!= -1);
346 if (!xive_source_irq_is_lsi(xsrc
, srcno
)) {
350 args
.level
= KVM_INTERRUPT_SET
;
353 xsrc
->status
[srcno
] |= XIVE_STATUS_ASSERTED
;
354 args
.level
= KVM_INTERRUPT_SET_LEVEL
;
356 xsrc
->status
[srcno
] &= ~XIVE_STATUS_ASSERTED
;
357 args
.level
= KVM_INTERRUPT_UNSET
;
360 rc
= kvm_vm_ioctl(kvm_state
, KVM_IRQ_LINE
, &args
);
362 error_report("XIVE: kvm_irq_line() failed : %s", strerror(errno
));
367 * sPAPR XIVE interrupt controller (KVM)
369 void kvmppc_xive_get_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
370 uint32_t end_idx
, XiveEND
*end
,
373 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
377 Error
*local_err
= NULL
;
379 assert(xive_end_is_valid(end
));
381 /* Encode the tuple (server, prio) as a KVM EQ index */
382 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
384 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
385 KVM_XIVE_EQ_PRIORITY_MASK
;
386 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
387 KVM_XIVE_EQ_SERVER_MASK
;
389 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
390 &kvm_eq
, false, &local_err
);
392 error_propagate(errp
, local_err
);
397 * The EQ index and toggle bit are updated by HW. These are the
398 * only fields from KVM we want to update QEMU with. The other END
399 * fields should already be in the QEMU END table.
401 end
->w1
= xive_set_field32(END_W1_GENERATION
, 0ul, kvm_eq
.qtoggle
) |
402 xive_set_field32(END_W1_PAGE_OFF
, 0ul, kvm_eq
.qindex
);
405 void kvmppc_xive_set_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
406 uint32_t end_idx
, XiveEND
*end
,
409 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
413 Error
*local_err
= NULL
;
416 * Build the KVM state from the local END structure.
420 if (xive_get_field32(END_W0_UCOND_NOTIFY
, end
->w0
)) {
421 kvm_eq
.flags
|= KVM_XIVE_EQ_ALWAYS_NOTIFY
;
425 * If the hcall is disabling the EQ, set the size and page address
426 * to zero. When migrating, only valid ENDs are taken into
429 if (xive_end_is_valid(end
)) {
430 kvm_eq
.qshift
= xive_get_field32(END_W0_QSIZE
, end
->w0
) + 12;
431 kvm_eq
.qaddr
= xive_end_qaddr(end
);
433 * The EQ toggle bit and index should only be relevant when
434 * restoring the EQ state
436 kvm_eq
.qtoggle
= xive_get_field32(END_W1_GENERATION
, end
->w1
);
437 kvm_eq
.qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
443 /* Encode the tuple (server, prio) as a KVM EQ index */
444 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
446 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
447 KVM_XIVE_EQ_PRIORITY_MASK
;
448 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
449 KVM_XIVE_EQ_SERVER_MASK
;
451 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
452 &kvm_eq
, true, &local_err
);
454 error_propagate(errp
, local_err
);
459 void kvmppc_xive_reset(SpaprXive
*xive
, Error
**errp
)
461 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
, KVM_DEV_XIVE_RESET
,
465 static void kvmppc_xive_get_queues(SpaprXive
*xive
, Error
**errp
)
467 Error
*local_err
= NULL
;
470 for (i
= 0; i
< xive
->nr_ends
; i
++) {
471 if (!xive_end_is_valid(&xive
->endt
[i
])) {
475 kvmppc_xive_get_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
476 &xive
->endt
[i
], &local_err
);
478 error_propagate(errp
, local_err
);
485 * The primary goal of the XIVE VM change handler is to mark the EQ
486 * pages dirty when all XIVE event notifications have stopped.
488 * Whenever the VM is stopped, the VM change handler sets the source
489 * PQs to PENDING to stop the flow of events and to possibly catch a
490 * triggered interrupt occuring while the VM is stopped. The previous
491 * state is saved in anticipation of a migration. The XIVE controller
492 * is then synced through KVM to flush any in-flight event
493 * notification and stabilize the EQs.
495 * At this stage, we can mark the EQ page dirty and let a migration
496 * sequence transfer the EQ pages to the destination, which is done
497 * just after the stop state.
499 * The previous configuration of the sources is restored when the VM
500 * runs again. If an interrupt was queued while the VM was stopped,
501 * simply generate a trigger.
503 static void kvmppc_xive_change_state_handler(void *opaque
, int running
,
506 SpaprXive
*xive
= opaque
;
507 XiveSource
*xsrc
= &xive
->source
;
508 Error
*local_err
= NULL
;
512 * Restore the sources to their initial state. This is called when
513 * the VM resumes after a stop or a migration.
516 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
517 uint8_t pq
= xive_source_esb_get(xsrc
, i
);
520 old_pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_00
+ (pq
<< 8));
523 * An interrupt was queued while the VM was stopped,
524 * generate a trigger.
526 if (pq
== XIVE_ESB_RESET
&& old_pq
== XIVE_ESB_QUEUED
) {
527 xive_esb_trigger(xsrc
, i
);
535 * Mask the sources, to stop the flow of event notifications, and
536 * save the PQs locally in the XiveSource object. The XiveSource
537 * state will be collected later on by its vmstate handler if a
538 * migration is in progress.
540 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
541 uint8_t pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
544 * PQ is set to PENDING to possibly catch a triggered
545 * interrupt occuring while the VM is stopped (hotplug event
548 if (pq
!= XIVE_ESB_OFF
) {
549 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_10
);
551 xive_source_esb_set(xsrc
, i
, pq
);
555 * Sync the XIVE controller in KVM, to flush in-flight event
556 * notification that should be enqueued in the EQs and mark the
557 * XIVE EQ pages dirty to collect all updates.
559 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
,
560 KVM_DEV_XIVE_EQ_SYNC
, NULL
, true, &local_err
);
562 error_report_err(local_err
);
567 void kvmppc_xive_synchronize_state(SpaprXive
*xive
, Error
**errp
)
569 /* The KVM XIVE device is not in use */
570 if (xive
->fd
== -1) {
575 * When the VM is stopped, the sources are masked and the previous
576 * state is saved in anticipation of a migration. We should not
577 * synchronize the source state in that case else we will override
580 if (runstate_is_running()) {
581 kvmppc_xive_source_get_state(&xive
->source
);
584 /* EAT: there is no extra state to query from KVM */
587 kvmppc_xive_get_queues(xive
, errp
);
591 * The SpaprXive 'pre_save' method is called by the vmstate handler of
592 * the SpaprXive model, after the XIVE controller is synced in the VM
595 int kvmppc_xive_pre_save(SpaprXive
*xive
)
597 Error
*local_err
= NULL
;
599 /* The KVM XIVE device is not in use */
600 if (xive
->fd
== -1) {
604 /* EAT: there is no extra state to query from KVM */
607 kvmppc_xive_get_queues(xive
, &local_err
);
609 error_report_err(local_err
);
617 * The SpaprXive 'post_load' method is not called by a vmstate
618 * handler. It is called at the sPAPR machine level at the end of the
619 * migration sequence by the sPAPR IRQ backend 'post_load' method,
620 * when all XIVE states have been transferred and loaded.
622 int kvmppc_xive_post_load(SpaprXive
*xive
, int version_id
)
624 Error
*local_err
= NULL
;
628 /* The KVM XIVE device should be in use */
629 assert(xive
->fd
!= -1);
631 /* Restore the ENDT first. The targetting depends on it. */
632 for (i
= 0; i
< xive
->nr_ends
; i
++) {
633 if (!xive_end_is_valid(&xive
->endt
[i
])) {
637 kvmppc_xive_set_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
638 &xive
->endt
[i
], &local_err
);
640 error_report_err(local_err
);
645 /* Restore the EAT */
646 for (i
= 0; i
< xive
->nr_irqs
; i
++) {
647 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
651 kvmppc_xive_set_source_config(xive
, i
, &xive
->eat
[i
], &local_err
);
653 error_report_err(local_err
);
658 /* Restore the thread interrupt contexts */
660 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
662 kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu
)->tctx
, &local_err
);
664 error_report_err(local_err
);
669 /* The source states will be restored when the machine starts running */
673 static void *kvmppc_xive_mmap(SpaprXive
*xive
, int pgoff
, size_t len
,
677 uint32_t page_shift
= 16; /* TODO: fix page_shift */
679 addr
= mmap(NULL
, len
, PROT_WRITE
| PROT_READ
, MAP_SHARED
, xive
->fd
,
680 pgoff
<< page_shift
);
681 if (addr
== MAP_FAILED
) {
682 error_setg_errno(errp
, errno
, "XIVE: unable to set memory mapping");
690 * All the XIVE memory regions are now backed by mappings from the KVM
693 void kvmppc_xive_connect(SpaprXive
*xive
, Error
**errp
)
695 XiveSource
*xsrc
= &xive
->source
;
696 Error
*local_err
= NULL
;
697 size_t esb_len
= (1ull << xsrc
->esb_shift
) * xsrc
->nr_irqs
;
698 size_t tima_len
= 4ull << TM_SHIFT
;
702 * The KVM XIVE device already in use. This is the case when
703 * rebooting under the XIVE-only interrupt mode.
705 if (xive
->fd
!= -1) {
709 if (!kvmppc_has_cap_xive()) {
710 error_setg(errp
, "IRQ_XIVE capability must be present for KVM");
714 /* First, create the KVM XIVE device */
715 xive
->fd
= kvm_create_device(kvm_state
, KVM_DEV_TYPE_XIVE
, false);
717 error_setg_errno(errp
, -xive
->fd
, "XIVE: error creating KVM device");
722 * 1. Source ESB pages - KVM mapping
724 xsrc
->esb_mmap
= kvmppc_xive_mmap(xive
, KVM_XIVE_ESB_PAGE_OFFSET
, esb_len
,
730 memory_region_init_ram_device_ptr(&xsrc
->esb_mmio_kvm
, OBJECT(xsrc
),
731 "xive.esb", esb_len
, xsrc
->esb_mmap
);
732 memory_region_add_subregion_overlap(&xsrc
->esb_mmio
, 0,
733 &xsrc
->esb_mmio_kvm
, 1);
736 * 2. END ESB pages (No KVM support yet)
740 * 3. TIMA pages - KVM mapping
742 xive
->tm_mmap
= kvmppc_xive_mmap(xive
, KVM_XIVE_TIMA_PAGE_OFFSET
, tima_len
,
747 memory_region_init_ram_device_ptr(&xive
->tm_mmio_kvm
, OBJECT(xive
),
748 "xive.tima", tima_len
, xive
->tm_mmap
);
749 memory_region_add_subregion_overlap(&xive
->tm_mmio
, 0,
750 &xive
->tm_mmio_kvm
, 1);
752 xive
->change
= qemu_add_vm_change_state_handler(
753 kvmppc_xive_change_state_handler
, xive
);
755 /* Connect the presenters to the initial VCPUs of the machine */
757 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
759 kvmppc_xive_cpu_connect(spapr_cpu_state(cpu
)->tctx
, &local_err
);
765 /* Update the KVM sources */
766 kvmppc_xive_source_reset(xsrc
, &local_err
);
771 kvm_kernel_irqchip
= true;
772 kvm_msi_via_irqfd_allowed
= true;
773 kvm_gsi_direct_mapping
= true;
777 error_propagate(errp
, local_err
);
778 kvmppc_xive_disconnect(xive
, NULL
);
781 void kvmppc_xive_disconnect(SpaprXive
*xive
, Error
**errp
)
786 /* The KVM XIVE device is not in use */
787 if (!xive
|| xive
->fd
== -1) {
791 if (!kvmppc_has_cap_xive()) {
792 error_setg(errp
, "IRQ_XIVE capability must be present for KVM");
796 /* Clear the KVM mapping */
797 xsrc
= &xive
->source
;
798 esb_len
= (1ull << xsrc
->esb_shift
) * xsrc
->nr_irqs
;
800 if (xsrc
->esb_mmap
) {
801 memory_region_del_subregion(&xsrc
->esb_mmio
, &xsrc
->esb_mmio_kvm
);
802 object_unparent(OBJECT(&xsrc
->esb_mmio_kvm
));
803 munmap(xsrc
->esb_mmap
, esb_len
);
804 xsrc
->esb_mmap
= NULL
;
808 memory_region_del_subregion(&xive
->tm_mmio
, &xive
->tm_mmio_kvm
);
809 object_unparent(OBJECT(&xive
->tm_mmio_kvm
));
810 munmap(xive
->tm_mmap
, 4ull << TM_SHIFT
);
811 xive
->tm_mmap
= NULL
;
815 * When the KVM device fd is closed, the KVM device is destroyed
816 * and removed from the list of devices of the VM. The VCPU
817 * presenters are also detached from the device.
819 if (xive
->fd
!= -1) {
824 kvm_kernel_irqchip
= false;
825 kvm_msi_via_irqfd_allowed
= false;
826 kvm_gsi_direct_mapping
= false;
828 /* Clear the local list of presenter (hotplug) */
829 kvm_cpu_disable_all();
831 /* VM Change state handler is not needed anymore */
833 qemu_del_vm_change_state_handler(xive
->change
);