2 * QEMU PowerPC sPAPR XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/runstate.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
24 #include <sys/ioctl.h>
27 * Helpers for CPU hotplug
29 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
31 typedef struct KVMEnabledCPU
{
32 unsigned long vcpu_id
;
33 QLIST_ENTRY(KVMEnabledCPU
) node
;
36 static QLIST_HEAD(, KVMEnabledCPU
)
37 kvm_enabled_cpus
= QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus
);
39 static bool kvm_cpu_is_enabled(CPUState
*cs
)
41 KVMEnabledCPU
*enabled_cpu
;
42 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
44 QLIST_FOREACH(enabled_cpu
, &kvm_enabled_cpus
, node
) {
45 if (enabled_cpu
->vcpu_id
== vcpu_id
) {
52 static void kvm_cpu_enable(CPUState
*cs
)
54 KVMEnabledCPU
*enabled_cpu
;
55 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
57 enabled_cpu
= g_malloc(sizeof(*enabled_cpu
));
58 enabled_cpu
->vcpu_id
= vcpu_id
;
59 QLIST_INSERT_HEAD(&kvm_enabled_cpus
, enabled_cpu
, node
);
62 static void kvm_cpu_disable_all(void)
64 KVMEnabledCPU
*enabled_cpu
, *next
;
66 QLIST_FOREACH_SAFE(enabled_cpu
, &kvm_enabled_cpus
, node
, next
) {
67 QLIST_REMOVE(enabled_cpu
, node
);
73 * XIVE Thread Interrupt Management context (KVM)
76 void kvmppc_xive_cpu_set_state(XiveTCTX
*tctx
, Error
**errp
)
78 SpaprXive
*xive
= SPAPR_MACHINE(qdev_get_machine())->xive
;
82 /* The KVM XIVE device is not in use yet */
87 /* word0 and word1 of the OS ring. */
88 state
[0] = *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]);
90 ret
= kvm_set_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
92 error_setg_errno(errp
, errno
,
93 "XIVE: could not restore KVM state of CPU %ld",
94 kvm_arch_vcpu_id(tctx
->cs
));
98 void kvmppc_xive_cpu_get_state(XiveTCTX
*tctx
, Error
**errp
)
100 SpaprXive
*xive
= SPAPR_MACHINE(qdev_get_machine())->xive
;
101 uint64_t state
[2] = { 0 };
104 /* The KVM XIVE device is not in use */
105 if (xive
->fd
== -1) {
109 ret
= kvm_get_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
111 error_setg_errno(errp
, errno
,
112 "XIVE: could not capture KVM state of CPU %ld",
113 kvm_arch_vcpu_id(tctx
->cs
));
117 /* word0 and word1 of the OS ring. */
118 *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]) = state
[0];
126 static void kvmppc_xive_cpu_do_synchronize_state(CPUState
*cpu
,
129 XiveCpuGetState
*s
= arg
.host_ptr
;
131 kvmppc_xive_cpu_get_state(s
->tctx
, &s
->err
);
134 void kvmppc_xive_cpu_synchronize_state(XiveTCTX
*tctx
, Error
**errp
)
136 XiveCpuGetState s
= {
142 * Kick the vCPU to make sure they are available for the KVM ioctl.
144 run_on_cpu(tctx
->cs
, kvmppc_xive_cpu_do_synchronize_state
,
145 RUN_ON_CPU_HOST_PTR(&s
));
148 error_propagate(errp
, s
.err
);
153 void kvmppc_xive_cpu_connect(XiveTCTX
*tctx
, Error
**errp
)
155 SpaprXive
*xive
= SPAPR_MACHINE(qdev_get_machine())->xive
;
156 unsigned long vcpu_id
;
159 /* The KVM XIVE device is not in use */
160 if (xive
->fd
== -1) {
164 /* Check if CPU was hot unplugged and replugged. */
165 if (kvm_cpu_is_enabled(tctx
->cs
)) {
169 vcpu_id
= kvm_arch_vcpu_id(tctx
->cs
);
171 ret
= kvm_vcpu_enable_cap(tctx
->cs
, KVM_CAP_PPC_IRQ_XIVE
, 0, xive
->fd
,
174 error_setg(errp
, "XIVE: unable to connect CPU%ld to KVM device: %s",
175 vcpu_id
, strerror(errno
));
179 kvm_cpu_enable(tctx
->cs
);
183 * XIVE Interrupt Source (KVM)
186 void kvmppc_xive_set_source_config(SpaprXive
*xive
, uint32_t lisn
, XiveEAS
*eas
,
196 Error
*local_err
= NULL
;
198 assert(xive_eas_is_valid(eas
));
200 end_idx
= xive_get_field64(EAS_END_INDEX
, eas
->w
);
201 end_blk
= xive_get_field64(EAS_END_BLOCK
, eas
->w
);
202 eisn
= xive_get_field64(EAS_END_DATA
, eas
->w
);
203 masked
= xive_eas_is_masked(eas
);
205 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
207 kvm_src
= priority
<< KVM_XIVE_SOURCE_PRIORITY_SHIFT
&
208 KVM_XIVE_SOURCE_PRIORITY_MASK
;
209 kvm_src
|= server
<< KVM_XIVE_SOURCE_SERVER_SHIFT
&
210 KVM_XIVE_SOURCE_SERVER_MASK
;
211 kvm_src
|= ((uint64_t) masked
<< KVM_XIVE_SOURCE_MASKED_SHIFT
) &
212 KVM_XIVE_SOURCE_MASKED_MASK
;
213 kvm_src
|= ((uint64_t)eisn
<< KVM_XIVE_SOURCE_EISN_SHIFT
) &
214 KVM_XIVE_SOURCE_EISN_MASK
;
216 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_CONFIG
, lisn
,
217 &kvm_src
, true, &local_err
);
219 error_propagate(errp
, local_err
);
224 void kvmppc_xive_sync_source(SpaprXive
*xive
, uint32_t lisn
, Error
**errp
)
226 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_SYNC
, lisn
,
231 * At reset, the interrupt sources are simply created and MASKED. We
232 * only need to inform the KVM XIVE device about their type: LSI or
235 int kvmppc_xive_source_reset_one(XiveSource
*xsrc
, int srcno
, Error
**errp
)
237 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
240 /* The KVM XIVE device is not in use */
241 if (xive
->fd
== -1) {
245 if (xive_source_irq_is_lsi(xsrc
, srcno
)) {
246 state
|= KVM_XIVE_LEVEL_SENSITIVE
;
247 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
248 state
|= KVM_XIVE_LEVEL_ASSERTED
;
252 return kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE
, srcno
, &state
,
256 static void kvmppc_xive_source_reset(XiveSource
*xsrc
, Error
**errp
)
258 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
261 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
262 Error
*local_err
= NULL
;
264 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
268 kvmppc_xive_source_reset_one(xsrc
, i
, &local_err
);
270 error_propagate(errp
, local_err
);
277 * This is used to perform the magic loads on the ESB pages, described
280 * Memory barriers should not be needed for loads (no store for now).
282 static uint64_t xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
283 uint64_t data
, bool write
)
285 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_mgmt(xsrc
, srcno
) +
289 *addr
= cpu_to_be64(data
);
292 /* Prevent the compiler from optimizing away the load */
293 volatile uint64_t value
= be64_to_cpu(*addr
);
298 static uint8_t xive_esb_read(XiveSource
*xsrc
, int srcno
, uint32_t offset
)
300 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0) & 0x3;
303 static void xive_esb_trigger(XiveSource
*xsrc
, int srcno
)
305 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_page(xsrc
, srcno
);
310 uint64_t kvmppc_xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
311 uint64_t data
, bool write
)
314 return xive_esb_rw(xsrc
, srcno
, offset
, data
, 1);
318 * Special Load EOI handling for LSI sources. Q bit is never set
319 * and the interrupt should be re-triggered if the level is still
322 if (xive_source_irq_is_lsi(xsrc
, srcno
) &&
323 offset
== XIVE_ESB_LOAD_EOI
) {
324 xive_esb_read(xsrc
, srcno
, XIVE_ESB_SET_PQ_00
);
325 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
326 xive_esb_trigger(xsrc
, srcno
);
330 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0);
334 static void kvmppc_xive_source_get_state(XiveSource
*xsrc
)
336 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
339 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
342 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
346 /* Perform a load without side effect to retrieve the PQ bits */
347 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
349 /* and save PQ locally */
350 xive_source_esb_set(xsrc
, i
, pq
);
354 void kvmppc_xive_source_set_irq(void *opaque
, int srcno
, int val
)
356 XiveSource
*xsrc
= opaque
;
358 if (!xive_source_irq_is_lsi(xsrc
, srcno
)) {
364 xsrc
->status
[srcno
] |= XIVE_STATUS_ASSERTED
;
366 xsrc
->status
[srcno
] &= ~XIVE_STATUS_ASSERTED
;
370 xive_esb_trigger(xsrc
, srcno
);
374 * sPAPR XIVE interrupt controller (KVM)
376 void kvmppc_xive_get_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
377 uint32_t end_idx
, XiveEND
*end
,
380 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
384 Error
*local_err
= NULL
;
386 assert(xive_end_is_valid(end
));
388 /* Encode the tuple (server, prio) as a KVM EQ index */
389 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
391 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
392 KVM_XIVE_EQ_PRIORITY_MASK
;
393 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
394 KVM_XIVE_EQ_SERVER_MASK
;
396 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
397 &kvm_eq
, false, &local_err
);
399 error_propagate(errp
, local_err
);
404 * The EQ index and toggle bit are updated by HW. These are the
405 * only fields from KVM we want to update QEMU with. The other END
406 * fields should already be in the QEMU END table.
408 end
->w1
= xive_set_field32(END_W1_GENERATION
, 0ul, kvm_eq
.qtoggle
) |
409 xive_set_field32(END_W1_PAGE_OFF
, 0ul, kvm_eq
.qindex
);
412 void kvmppc_xive_set_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
413 uint32_t end_idx
, XiveEND
*end
,
416 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
420 Error
*local_err
= NULL
;
423 * Build the KVM state from the local END structure.
427 if (xive_get_field32(END_W0_UCOND_NOTIFY
, end
->w0
)) {
428 kvm_eq
.flags
|= KVM_XIVE_EQ_ALWAYS_NOTIFY
;
432 * If the hcall is disabling the EQ, set the size and page address
433 * to zero. When migrating, only valid ENDs are taken into
436 if (xive_end_is_valid(end
)) {
437 kvm_eq
.qshift
= xive_get_field32(END_W0_QSIZE
, end
->w0
) + 12;
438 kvm_eq
.qaddr
= xive_end_qaddr(end
);
440 * The EQ toggle bit and index should only be relevant when
441 * restoring the EQ state
443 kvm_eq
.qtoggle
= xive_get_field32(END_W1_GENERATION
, end
->w1
);
444 kvm_eq
.qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
450 /* Encode the tuple (server, prio) as a KVM EQ index */
451 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
453 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
454 KVM_XIVE_EQ_PRIORITY_MASK
;
455 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
456 KVM_XIVE_EQ_SERVER_MASK
;
458 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
459 &kvm_eq
, true, &local_err
);
461 error_propagate(errp
, local_err
);
466 void kvmppc_xive_reset(SpaprXive
*xive
, Error
**errp
)
468 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
, KVM_DEV_XIVE_RESET
,
472 static void kvmppc_xive_get_queues(SpaprXive
*xive
, Error
**errp
)
474 Error
*local_err
= NULL
;
477 for (i
= 0; i
< xive
->nr_ends
; i
++) {
478 if (!xive_end_is_valid(&xive
->endt
[i
])) {
482 kvmppc_xive_get_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
483 &xive
->endt
[i
], &local_err
);
485 error_propagate(errp
, local_err
);
492 * The primary goal of the XIVE VM change handler is to mark the EQ
493 * pages dirty when all XIVE event notifications have stopped.
495 * Whenever the VM is stopped, the VM change handler sets the source
496 * PQs to PENDING to stop the flow of events and to possibly catch a
497 * triggered interrupt occuring while the VM is stopped. The previous
498 * state is saved in anticipation of a migration. The XIVE controller
499 * is then synced through KVM to flush any in-flight event
500 * notification and stabilize the EQs.
502 * At this stage, we can mark the EQ page dirty and let a migration
503 * sequence transfer the EQ pages to the destination, which is done
504 * just after the stop state.
506 * The previous configuration of the sources is restored when the VM
507 * runs again. If an interrupt was queued while the VM was stopped,
508 * simply generate a trigger.
510 static void kvmppc_xive_change_state_handler(void *opaque
, int running
,
513 SpaprXive
*xive
= opaque
;
514 XiveSource
*xsrc
= &xive
->source
;
515 Error
*local_err
= NULL
;
519 * Restore the sources to their initial state. This is called when
520 * the VM resumes after a stop or a migration.
523 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
527 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
531 pq
= xive_source_esb_get(xsrc
, i
);
532 old_pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_00
+ (pq
<< 8));
535 * An interrupt was queued while the VM was stopped,
536 * generate a trigger.
538 if (pq
== XIVE_ESB_RESET
&& old_pq
== XIVE_ESB_QUEUED
) {
539 xive_esb_trigger(xsrc
, i
);
547 * Mask the sources, to stop the flow of event notifications, and
548 * save the PQs locally in the XiveSource object. The XiveSource
549 * state will be collected later on by its vmstate handler if a
550 * migration is in progress.
552 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
555 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
559 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
562 * PQ is set to PENDING to possibly catch a triggered
563 * interrupt occuring while the VM is stopped (hotplug event
566 if (pq
!= XIVE_ESB_OFF
) {
567 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_10
);
569 xive_source_esb_set(xsrc
, i
, pq
);
573 * Sync the XIVE controller in KVM, to flush in-flight event
574 * notification that should be enqueued in the EQs and mark the
575 * XIVE EQ pages dirty to collect all updates.
577 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
,
578 KVM_DEV_XIVE_EQ_SYNC
, NULL
, true, &local_err
);
580 error_report_err(local_err
);
585 void kvmppc_xive_synchronize_state(SpaprXive
*xive
, Error
**errp
)
587 /* The KVM XIVE device is not in use */
588 if (xive
->fd
== -1) {
593 * When the VM is stopped, the sources are masked and the previous
594 * state is saved in anticipation of a migration. We should not
595 * synchronize the source state in that case else we will override
598 if (runstate_is_running()) {
599 kvmppc_xive_source_get_state(&xive
->source
);
602 /* EAT: there is no extra state to query from KVM */
605 kvmppc_xive_get_queues(xive
, errp
);
609 * The SpaprXive 'pre_save' method is called by the vmstate handler of
610 * the SpaprXive model, after the XIVE controller is synced in the VM
613 int kvmppc_xive_pre_save(SpaprXive
*xive
)
615 Error
*local_err
= NULL
;
617 /* The KVM XIVE device is not in use */
618 if (xive
->fd
== -1) {
622 /* EAT: there is no extra state to query from KVM */
625 kvmppc_xive_get_queues(xive
, &local_err
);
627 error_report_err(local_err
);
635 * The SpaprXive 'post_load' method is not called by a vmstate
636 * handler. It is called at the sPAPR machine level at the end of the
637 * migration sequence by the sPAPR IRQ backend 'post_load' method,
638 * when all XIVE states have been transferred and loaded.
640 int kvmppc_xive_post_load(SpaprXive
*xive
, int version_id
)
642 Error
*local_err
= NULL
;
646 /* The KVM XIVE device should be in use */
647 assert(xive
->fd
!= -1);
649 /* Restore the ENDT first. The targetting depends on it. */
650 for (i
= 0; i
< xive
->nr_ends
; i
++) {
651 if (!xive_end_is_valid(&xive
->endt
[i
])) {
655 kvmppc_xive_set_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
656 &xive
->endt
[i
], &local_err
);
658 error_report_err(local_err
);
663 /* Restore the EAT */
664 for (i
= 0; i
< xive
->nr_irqs
; i
++) {
665 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
670 * We can only restore the source config if the source has been
671 * previously set in KVM. Since we don't do that for all interrupts
672 * at reset time anymore, let's do it now.
674 kvmppc_xive_source_reset_one(&xive
->source
, i
, &local_err
);
676 error_report_err(local_err
);
680 kvmppc_xive_set_source_config(xive
, i
, &xive
->eat
[i
], &local_err
);
682 error_report_err(local_err
);
688 * Restore the thread interrupt contexts of initial CPUs.
690 * The context of hotplugged CPUs is restored later, by the
691 * 'post_load' handler of the XiveTCTX model because they are not
692 * available at the time the SpaprXive 'post_load' method is
693 * called. We can not restore the context of all CPUs in the
694 * 'post_load' handler of XiveTCTX because the machine is not
695 * necessarily connected to the KVM device at that time.
698 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
700 kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu
)->tctx
, &local_err
);
702 error_report_err(local_err
);
707 /* The source states will be restored when the machine starts running */
711 static void *kvmppc_xive_mmap(SpaprXive
*xive
, int pgoff
, size_t len
,
715 uint32_t page_shift
= 16; /* TODO: fix page_shift */
717 addr
= mmap(NULL
, len
, PROT_WRITE
| PROT_READ
, MAP_SHARED
, xive
->fd
,
718 pgoff
<< page_shift
);
719 if (addr
== MAP_FAILED
) {
720 error_setg_errno(errp
, errno
, "XIVE: unable to set memory mapping");
728 * All the XIVE memory regions are now backed by mappings from the KVM
731 int kvmppc_xive_connect(SpaprInterruptController
*intc
, Error
**errp
)
733 SpaprXive
*xive
= SPAPR_XIVE(intc
);
734 XiveSource
*xsrc
= &xive
->source
;
735 Error
*local_err
= NULL
;
736 size_t esb_len
= (1ull << xsrc
->esb_shift
) * xsrc
->nr_irqs
;
737 size_t tima_len
= 4ull << TM_SHIFT
;
741 * The KVM XIVE device already in use. This is the case when
742 * rebooting under the XIVE-only interrupt mode.
744 if (xive
->fd
!= -1) {
748 if (!kvmppc_has_cap_xive()) {
749 error_setg(errp
, "IRQ_XIVE capability must be present for KVM");
753 /* First, create the KVM XIVE device */
754 xive
->fd
= kvm_create_device(kvm_state
, KVM_DEV_TYPE_XIVE
, false);
756 error_setg_errno(errp
, -xive
->fd
, "XIVE: error creating KVM device");
761 * 1. Source ESB pages - KVM mapping
763 xsrc
->esb_mmap
= kvmppc_xive_mmap(xive
, KVM_XIVE_ESB_PAGE_OFFSET
, esb_len
,
769 memory_region_init_ram_device_ptr(&xsrc
->esb_mmio_kvm
, OBJECT(xsrc
),
770 "xive.esb", esb_len
, xsrc
->esb_mmap
);
771 memory_region_add_subregion_overlap(&xsrc
->esb_mmio
, 0,
772 &xsrc
->esb_mmio_kvm
, 1);
775 * 2. END ESB pages (No KVM support yet)
779 * 3. TIMA pages - KVM mapping
781 xive
->tm_mmap
= kvmppc_xive_mmap(xive
, KVM_XIVE_TIMA_PAGE_OFFSET
, tima_len
,
786 memory_region_init_ram_device_ptr(&xive
->tm_mmio_kvm
, OBJECT(xive
),
787 "xive.tima", tima_len
, xive
->tm_mmap
);
788 memory_region_add_subregion_overlap(&xive
->tm_mmio
, 0,
789 &xive
->tm_mmio_kvm
, 1);
791 xive
->change
= qemu_add_vm_change_state_handler(
792 kvmppc_xive_change_state_handler
, xive
);
794 /* Connect the presenters to the initial VCPUs of the machine */
796 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
798 kvmppc_xive_cpu_connect(spapr_cpu_state(cpu
)->tctx
, &local_err
);
804 /* Update the KVM sources */
805 kvmppc_xive_source_reset(xsrc
, &local_err
);
810 kvm_kernel_irqchip
= true;
811 kvm_msi_via_irqfd_allowed
= true;
812 kvm_gsi_direct_mapping
= true;
816 error_propagate(errp
, local_err
);
817 kvmppc_xive_disconnect(intc
);
821 void kvmppc_xive_disconnect(SpaprInterruptController
*intc
)
823 SpaprXive
*xive
= SPAPR_XIVE(intc
);
827 /* The KVM XIVE device is not in use */
828 if (!xive
|| xive
->fd
== -1) {
832 /* Clear the KVM mapping */
833 xsrc
= &xive
->source
;
834 esb_len
= (1ull << xsrc
->esb_shift
) * xsrc
->nr_irqs
;
836 if (xsrc
->esb_mmap
) {
837 memory_region_del_subregion(&xsrc
->esb_mmio
, &xsrc
->esb_mmio_kvm
);
838 object_unparent(OBJECT(&xsrc
->esb_mmio_kvm
));
839 munmap(xsrc
->esb_mmap
, esb_len
);
840 xsrc
->esb_mmap
= NULL
;
844 memory_region_del_subregion(&xive
->tm_mmio
, &xive
->tm_mmio_kvm
);
845 object_unparent(OBJECT(&xive
->tm_mmio_kvm
));
846 munmap(xive
->tm_mmap
, 4ull << TM_SHIFT
);
847 xive
->tm_mmap
= NULL
;
851 * When the KVM device fd is closed, the KVM device is destroyed
852 * and removed from the list of devices of the VM. The VCPU
853 * presenters are also detached from the device.
855 if (xive
->fd
!= -1) {
860 kvm_kernel_irqchip
= false;
861 kvm_msi_via_irqfd_allowed
= false;
862 kvm_gsi_direct_mapping
= false;
864 /* Clear the local list of presenter (hotplug) */
865 kvm_cpu_disable_all();
867 /* VM Change state handler is not needed anymore */
869 qemu_del_vm_change_state_handler(xive
->change
);