2 * QEMU PowerPC sPAPR IRQ interface
4 * Copyright (c) 2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
15 #include "hw/ppc/spapr.h"
16 #include "hw/ppc/spapr_cpu_core.h"
17 #include "hw/ppc/spapr_xive.h"
18 #include "hw/ppc/xics.h"
19 #include "hw/ppc/xics_spapr.h"
20 #include "hw/qdev-properties.h"
21 #include "cpu-models.h"
22 #include "sysemu/kvm.h"
26 void spapr_irq_msi_init(SpaprMachineState
*spapr
, uint32_t nr_msis
)
28 spapr
->irq_map_nr
= nr_msis
;
29 spapr
->irq_map
= bitmap_new(spapr
->irq_map_nr
);
32 int spapr_irq_msi_alloc(SpaprMachineState
*spapr
, uint32_t num
, bool align
,
38 * The 'align_mask' parameter of bitmap_find_next_zero_area()
39 * should be one less than a power of 2; 0 means no
40 * alignment. Adapt the 'align' value of the former allocator
41 * to fit the requirements of bitmap_find_next_zero_area()
45 irq
= bitmap_find_next_zero_area(spapr
->irq_map
, spapr
->irq_map_nr
, 0, num
,
47 if (irq
== spapr
->irq_map_nr
) {
48 error_setg(errp
, "can't find a free %d-IRQ block", num
);
52 bitmap_set(spapr
->irq_map
, irq
, num
);
54 return irq
+ SPAPR_IRQ_MSI
;
57 void spapr_irq_msi_free(SpaprMachineState
*spapr
, int irq
, uint32_t num
)
59 bitmap_clear(spapr
->irq_map
, irq
- SPAPR_IRQ_MSI
, num
);
62 static void spapr_irq_init_kvm(SpaprMachineState
*spapr
,
63 SpaprIrq
*irq
, Error
**errp
)
65 MachineState
*machine
= MACHINE(spapr
);
66 Error
*local_err
= NULL
;
68 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine
)) {
69 irq
->init_kvm(spapr
, &local_err
);
70 if (local_err
&& machine_kernel_irqchip_required(machine
)) {
71 error_prepend(&local_err
,
72 "kernel_irqchip requested but unavailable: ");
73 error_propagate(errp
, local_err
);
82 * We failed to initialize the KVM device, fallback to
85 error_prepend(&local_err
, "kernel_irqchip allowed but unavailable: ");
86 error_append_hint(&local_err
, "Falling back to kernel-irqchip=off\n");
87 warn_report_err(local_err
);
95 static void spapr_irq_init_xics(SpaprMachineState
*spapr
, Error
**errp
)
98 Error
*local_err
= NULL
;
100 obj
= object_new(TYPE_ICS_SPAPR
);
101 object_property_add_child(OBJECT(spapr
), "ics", obj
, &error_abort
);
102 object_property_add_const_link(obj
, ICS_PROP_XICS
, OBJECT(spapr
),
104 object_property_set_int(obj
, spapr
->irq
->nr_xirqs
,
105 "nr-irqs", &error_fatal
);
106 object_property_set_bool(obj
, true, "realized", &local_err
);
108 error_propagate(errp
, local_err
);
112 spapr
->ics
= ICS_SPAPR(obj
);
115 static int spapr_irq_claim_xics(SpaprMachineState
*spapr
, int irq
, bool lsi
,
118 ICSState
*ics
= spapr
->ics
;
122 if (!ics_valid_irq(ics
, irq
)) {
123 error_setg(errp
, "IRQ %d is invalid", irq
);
127 if (!ics_irq_free(ics
, irq
- ics
->offset
)) {
128 error_setg(errp
, "IRQ %d is not free", irq
);
132 ics_set_irq_type(ics
, irq
- ics
->offset
, lsi
);
136 static void spapr_irq_free_xics(SpaprMachineState
*spapr
, int irq
, int num
)
138 ICSState
*ics
= spapr
->ics
;
139 uint32_t srcno
= irq
- ics
->offset
;
142 if (ics_valid_irq(ics
, irq
)) {
143 trace_spapr_irq_free(0, irq
, num
);
144 for (i
= srcno
; i
< srcno
+ num
; ++i
) {
145 if (ics_irq_free(ics
, i
)) {
146 trace_spapr_irq_free_warn(0, i
);
148 memset(&ics
->irqs
[i
], 0, sizeof(ICSIRQState
));
153 static void spapr_irq_print_info_xics(SpaprMachineState
*spapr
, Monitor
*mon
)
158 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
160 icp_pic_print_info(spapr_cpu_state(cpu
)->icp
, mon
);
163 ics_pic_print_info(spapr
->ics
, mon
);
166 static void spapr_irq_cpu_intc_create_xics(SpaprMachineState
*spapr
,
167 PowerPCCPU
*cpu
, Error
**errp
)
169 Error
*local_err
= NULL
;
171 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
173 obj
= icp_create(OBJECT(cpu
), TYPE_ICP
, XICS_FABRIC(spapr
),
176 error_propagate(errp
, local_err
);
180 spapr_cpu
->icp
= ICP(obj
);
183 static int spapr_irq_post_load_xics(SpaprMachineState
*spapr
, int version_id
)
185 if (!kvm_irqchip_in_kernel()) {
188 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
189 icp_resend(spapr_cpu_state(cpu
)->icp
);
195 static void spapr_irq_set_irq_xics(void *opaque
, int irq
, int val
)
197 SpaprMachineState
*spapr
= opaque
;
198 uint32_t srcno
= irq
- spapr
->ics
->offset
;
200 ics_set_irq(spapr
->ics
, srcno
, val
);
203 static void spapr_irq_reset_xics(SpaprMachineState
*spapr
, Error
**errp
)
205 Error
*local_err
= NULL
;
207 spapr_irq_init_kvm(spapr
, &spapr_irq_xics
, &local_err
);
209 error_propagate(errp
, local_err
);
214 static void spapr_irq_init_kvm_xics(SpaprMachineState
*spapr
, Error
**errp
)
217 xics_kvm_connect(spapr
, errp
);
221 SpaprIrq spapr_irq_xics
= {
222 .nr_xirqs
= SPAPR_NR_XIRQS
,
223 .nr_msis
= SPAPR_NR_MSIS
,
224 .ov5
= SPAPR_OV5_XIVE_LEGACY
,
226 .init
= spapr_irq_init_xics
,
227 .claim
= spapr_irq_claim_xics
,
228 .free
= spapr_irq_free_xics
,
229 .print_info
= spapr_irq_print_info_xics
,
230 .dt_populate
= spapr_dt_xics
,
231 .cpu_intc_create
= spapr_irq_cpu_intc_create_xics
,
232 .post_load
= spapr_irq_post_load_xics
,
233 .reset
= spapr_irq_reset_xics
,
234 .set_irq
= spapr_irq_set_irq_xics
,
235 .init_kvm
= spapr_irq_init_kvm_xics
,
241 static void spapr_irq_init_xive(SpaprMachineState
*spapr
, Error
**errp
)
243 uint32_t nr_servers
= spapr_max_server_number(spapr
);
247 dev
= qdev_create(NULL
, TYPE_SPAPR_XIVE
);
248 qdev_prop_set_uint32(dev
, "nr-irqs",
249 spapr
->irq
->nr_xirqs
+ SPAPR_XIRQ_BASE
);
251 * 8 XIVE END structures per CPU. One for each available priority
253 qdev_prop_set_uint32(dev
, "nr-ends", nr_servers
<< 3);
254 qdev_init_nofail(dev
);
256 spapr
->xive
= SPAPR_XIVE(dev
);
258 /* Enable the CPU IPIs */
259 for (i
= 0; i
< nr_servers
; ++i
) {
260 spapr_xive_irq_claim(spapr
->xive
, SPAPR_IRQ_IPI
+ i
, false);
263 spapr_xive_hcall_init(spapr
);
266 static int spapr_irq_claim_xive(SpaprMachineState
*spapr
, int irq
, bool lsi
,
269 if (!spapr_xive_irq_claim(spapr
->xive
, irq
, lsi
)) {
270 error_setg(errp
, "IRQ %d is invalid", irq
);
276 static void spapr_irq_free_xive(SpaprMachineState
*spapr
, int irq
, int num
)
280 for (i
= irq
; i
< irq
+ num
; ++i
) {
281 spapr_xive_irq_free(spapr
->xive
, i
);
285 static void spapr_irq_print_info_xive(SpaprMachineState
*spapr
,
291 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
293 xive_tctx_pic_print_info(spapr_cpu_state(cpu
)->tctx
, mon
);
296 spapr_xive_pic_print_info(spapr
->xive
, mon
);
299 static void spapr_irq_cpu_intc_create_xive(SpaprMachineState
*spapr
,
300 PowerPCCPU
*cpu
, Error
**errp
)
302 Error
*local_err
= NULL
;
304 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
306 obj
= xive_tctx_create(OBJECT(cpu
), XIVE_ROUTER(spapr
->xive
), &local_err
);
308 error_propagate(errp
, local_err
);
312 spapr_cpu
->tctx
= XIVE_TCTX(obj
);
315 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
316 * don't beneficiate from the reset of the XIVE IRQ backend
318 spapr_xive_set_tctx_os_cam(spapr_cpu
->tctx
);
321 static int spapr_irq_post_load_xive(SpaprMachineState
*spapr
, int version_id
)
323 return spapr_xive_post_load(spapr
->xive
, version_id
);
326 static void spapr_irq_reset_xive(SpaprMachineState
*spapr
, Error
**errp
)
329 Error
*local_err
= NULL
;
332 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
334 /* (TCG) Set the OS CAM line of the thread interrupt context. */
335 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu
)->tctx
);
338 spapr_irq_init_kvm(spapr
, &spapr_irq_xive
, &local_err
);
340 error_propagate(errp
, local_err
);
344 /* Activate the XIVE MMIOs */
345 spapr_xive_mmio_set_enabled(spapr
->xive
, true);
348 static void spapr_irq_set_irq_xive(void *opaque
, int irq
, int val
)
350 SpaprMachineState
*spapr
= opaque
;
352 if (kvm_irqchip_in_kernel()) {
353 kvmppc_xive_source_set_irq(&spapr
->xive
->source
, irq
, val
);
355 xive_source_set_irq(&spapr
->xive
->source
, irq
, val
);
359 static void spapr_irq_init_kvm_xive(SpaprMachineState
*spapr
, Error
**errp
)
362 kvmppc_xive_connect(spapr
->xive
, errp
);
366 SpaprIrq spapr_irq_xive
= {
367 .nr_xirqs
= SPAPR_NR_XIRQS
,
368 .nr_msis
= SPAPR_NR_MSIS
,
369 .ov5
= SPAPR_OV5_XIVE_EXPLOIT
,
371 .init
= spapr_irq_init_xive
,
372 .claim
= spapr_irq_claim_xive
,
373 .free
= spapr_irq_free_xive
,
374 .print_info
= spapr_irq_print_info_xive
,
375 .dt_populate
= spapr_dt_xive
,
376 .cpu_intc_create
= spapr_irq_cpu_intc_create_xive
,
377 .post_load
= spapr_irq_post_load_xive
,
378 .reset
= spapr_irq_reset_xive
,
379 .set_irq
= spapr_irq_set_irq_xive
,
380 .init_kvm
= spapr_irq_init_kvm_xive
,
384 * Dual XIVE and XICS IRQ backend.
386 * Both interrupt mode, XIVE and XICS, objects are created but the
387 * machine starts in legacy interrupt mode (XICS). It can be changed
388 * by the CAS negotiation process and, in that case, the new mode is
389 * activated after an extra machine reset.
393 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
396 static SpaprIrq
*spapr_irq_current(SpaprMachineState
*spapr
)
398 return spapr_ovec_test(spapr
->ov5_cas
, OV5_XIVE_EXPLOIT
) ?
399 &spapr_irq_xive
: &spapr_irq_xics
;
402 static void spapr_irq_init_dual(SpaprMachineState
*spapr
, Error
**errp
)
404 Error
*local_err
= NULL
;
406 spapr_irq_xics
.init(spapr
, &local_err
);
408 error_propagate(errp
, local_err
);
412 spapr_irq_xive
.init(spapr
, &local_err
);
414 error_propagate(errp
, local_err
);
419 static int spapr_irq_claim_dual(SpaprMachineState
*spapr
, int irq
, bool lsi
,
422 Error
*local_err
= NULL
;
425 ret
= spapr_irq_xics
.claim(spapr
, irq
, lsi
, &local_err
);
427 error_propagate(errp
, local_err
);
431 ret
= spapr_irq_xive
.claim(spapr
, irq
, lsi
, &local_err
);
433 error_propagate(errp
, local_err
);
440 static void spapr_irq_free_dual(SpaprMachineState
*spapr
, int irq
, int num
)
442 spapr_irq_xics
.free(spapr
, irq
, num
);
443 spapr_irq_xive
.free(spapr
, irq
, num
);
446 static void spapr_irq_print_info_dual(SpaprMachineState
*spapr
, Monitor
*mon
)
448 spapr_irq_current(spapr
)->print_info(spapr
, mon
);
451 static void spapr_irq_dt_populate_dual(SpaprMachineState
*spapr
,
452 uint32_t nr_servers
, void *fdt
,
455 spapr_irq_current(spapr
)->dt_populate(spapr
, nr_servers
, fdt
, phandle
);
458 static void spapr_irq_cpu_intc_create_dual(SpaprMachineState
*spapr
,
459 PowerPCCPU
*cpu
, Error
**errp
)
461 Error
*local_err
= NULL
;
463 spapr_irq_xive
.cpu_intc_create(spapr
, cpu
, &local_err
);
465 error_propagate(errp
, local_err
);
469 spapr_irq_xics
.cpu_intc_create(spapr
, cpu
, errp
);
472 static int spapr_irq_post_load_dual(SpaprMachineState
*spapr
, int version_id
)
475 * Force a reset of the XIVE backend after migration. The machine
476 * defaults to XICS at startup.
478 if (spapr_ovec_test(spapr
->ov5_cas
, OV5_XIVE_EXPLOIT
)) {
479 if (kvm_irqchip_in_kernel()) {
480 xics_kvm_disconnect(spapr
, &error_fatal
);
482 spapr_irq_xive
.reset(spapr
, &error_fatal
);
485 return spapr_irq_current(spapr
)->post_load(spapr
, version_id
);
488 static void spapr_irq_reset_dual(SpaprMachineState
*spapr
, Error
**errp
)
490 Error
*local_err
= NULL
;
493 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
496 spapr_xive_mmio_set_enabled(spapr
->xive
, false);
498 /* Destroy all KVM devices */
499 if (kvm_irqchip_in_kernel()) {
500 xics_kvm_disconnect(spapr
, &local_err
);
502 error_propagate(errp
, local_err
);
503 error_prepend(errp
, "KVM XICS disconnect failed: ");
506 kvmppc_xive_disconnect(spapr
->xive
, &local_err
);
508 error_propagate(errp
, local_err
);
509 error_prepend(errp
, "KVM XIVE disconnect failed: ");
514 spapr_irq_current(spapr
)->reset(spapr
, errp
);
517 static void spapr_irq_set_irq_dual(void *opaque
, int irq
, int val
)
519 SpaprMachineState
*spapr
= opaque
;
521 spapr_irq_current(spapr
)->set_irq(spapr
, irq
, val
);
525 * Define values in sync with the XIVE and XICS backend
527 SpaprIrq spapr_irq_dual
= {
528 .nr_xirqs
= SPAPR_NR_XIRQS
,
529 .nr_msis
= SPAPR_NR_MSIS
,
530 .ov5
= SPAPR_OV5_XIVE_BOTH
,
532 .init
= spapr_irq_init_dual
,
533 .claim
= spapr_irq_claim_dual
,
534 .free
= spapr_irq_free_dual
,
535 .print_info
= spapr_irq_print_info_dual
,
536 .dt_populate
= spapr_irq_dt_populate_dual
,
537 .cpu_intc_create
= spapr_irq_cpu_intc_create_dual
,
538 .post_load
= spapr_irq_post_load_dual
,
539 .reset
= spapr_irq_reset_dual
,
540 .set_irq
= spapr_irq_set_irq_dual
,
541 .init_kvm
= NULL
, /* should not be used */
545 static void spapr_irq_check(SpaprMachineState
*spapr
, Error
**errp
)
547 MachineState
*machine
= MACHINE(spapr
);
550 * Sanity checks on non-P9 machines. On these, XIVE is not
551 * advertised, see spapr_dt_ov5_platform_support()
553 if (!ppc_type_check_compat(machine
->cpu_type
, CPU_POWERPC_LOGICAL_3_00
,
554 0, spapr
->max_compat_pvr
)) {
556 * If the 'dual' interrupt mode is selected, force XICS as CAS
557 * negotiation is useless.
559 if (spapr
->irq
== &spapr_irq_dual
) {
560 spapr
->irq
= &spapr_irq_xics
;
565 * Non-P9 machines using only XIVE is a bogus setup. We have two
566 * scenarios to take into account because of the compat mode:
568 * 1. POWER7/8 machines should fail to init later on when creating
569 * the XIVE interrupt presenters because a POWER9 exception
572 * 2. POWER9 machines using the POWER8 compat mode won't fail and
573 * will let the OS boot with a partial XIVE setup : DT
574 * properties but no hcalls.
576 * To cover both and not confuse the OS, add an early failure in
579 if (spapr
->irq
== &spapr_irq_xive
) {
580 error_setg(errp
, "XIVE-only machines require a POWER9 CPU");
586 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
587 * re-created. Detect that early to avoid QEMU to exit later when the
591 spapr
->irq
== &spapr_irq_dual
&&
592 machine_kernel_irqchip_required(machine
) &&
593 xics_kvm_has_broken_disconnect(spapr
)) {
594 error_setg(errp
, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
600 * sPAPR IRQ frontend routines for devices
602 void spapr_irq_init(SpaprMachineState
*spapr
, Error
**errp
)
604 MachineState
*machine
= MACHINE(spapr
);
605 Error
*local_err
= NULL
;
607 if (machine_kernel_irqchip_split(machine
)) {
608 error_setg(errp
, "kernel_irqchip split mode not supported on pseries");
612 if (!kvm_enabled() && machine_kernel_irqchip_required(machine
)) {
614 "kernel_irqchip requested but only available with KVM");
618 spapr_irq_check(spapr
, &local_err
);
620 error_propagate(errp
, local_err
);
624 /* Initialize the MSI IRQ allocator. */
625 if (!SPAPR_MACHINE_GET_CLASS(spapr
)->legacy_irq_allocation
) {
626 spapr_irq_msi_init(spapr
, spapr
->irq
->nr_msis
);
629 spapr
->irq
->init(spapr
, errp
);
631 spapr
->qirqs
= qemu_allocate_irqs(spapr
->irq
->set_irq
, spapr
,
632 spapr
->irq
->nr_xirqs
+ SPAPR_XIRQ_BASE
);
635 int spapr_irq_claim(SpaprMachineState
*spapr
, int irq
, bool lsi
, Error
**errp
)
637 return spapr
->irq
->claim(spapr
, irq
, lsi
, errp
);
640 void spapr_irq_free(SpaprMachineState
*spapr
, int irq
, int num
)
642 spapr
->irq
->free(spapr
, irq
, num
);
645 qemu_irq
spapr_qirq(SpaprMachineState
*spapr
, int irq
)
648 * This interface is basically for VIO and PHB devices to find the
649 * right qemu_irq to manipulate, so we only allow access to the
650 * external irqs for now. Currently anything which needs to
651 * access the IPIs most naturally gets there via the guest side
652 * interfaces, we can change this if we need to in future.
654 assert(irq
>= SPAPR_XIRQ_BASE
);
655 assert(irq
< (spapr
->irq
->nr_xirqs
+ SPAPR_XIRQ_BASE
));
658 assert(ics_valid_irq(spapr
->ics
, irq
));
661 assert(irq
< spapr
->xive
->nr_irqs
);
662 assert(xive_eas_is_valid(&spapr
->xive
->eat
[irq
]));
665 return spapr
->qirqs
[irq
];
668 int spapr_irq_post_load(SpaprMachineState
*spapr
, int version_id
)
670 return spapr
->irq
->post_load(spapr
, version_id
);
673 void spapr_irq_reset(SpaprMachineState
*spapr
, Error
**errp
)
675 assert(!spapr
->irq_map
|| bitmap_empty(spapr
->irq_map
, spapr
->irq_map_nr
));
677 if (spapr
->irq
->reset
) {
678 spapr
->irq
->reset(spapr
, errp
);
682 int spapr_irq_get_phandle(SpaprMachineState
*spapr
, void *fdt
, Error
**errp
)
684 const char *nodename
= "interrupt-controller";
687 offset
= fdt_subnode_offset(fdt
, 0, nodename
);
689 error_setg(errp
, "Can't find node \"%s\": %s",
690 nodename
, fdt_strerror(offset
));
694 phandle
= fdt_get_phandle(fdt
, offset
);
696 error_setg(errp
, "Can't get phandle of node \"%s\"", nodename
);
704 * XICS legacy routines - to deprecate one day
707 static int ics_find_free_block(ICSState
*ics
, int num
, int alignnum
)
711 for (first
= 0; first
< ics
->nr_irqs
; first
+= alignnum
) {
712 if (num
> (ics
->nr_irqs
- first
)) {
715 for (i
= first
; i
< first
+ num
; ++i
) {
716 if (!ics_irq_free(ics
, i
)) {
720 if (i
== (first
+ num
)) {
728 int spapr_irq_find(SpaprMachineState
*spapr
, int num
, bool align
, Error
**errp
)
730 ICSState
*ics
= spapr
->ics
;
736 * MSIMesage::data is used for storing VIRQ so
737 * it has to be aligned to num to support multiple
738 * MSI vectors. MSI-X is not affected by this.
739 * The hint is used for the first IRQ, the rest should
740 * be allocated continuously.
743 assert((num
== 1) || (num
== 2) || (num
== 4) ||
744 (num
== 8) || (num
== 16) || (num
== 32));
745 first
= ics_find_free_block(ics
, num
, num
);
747 first
= ics_find_free_block(ics
, num
, 1);
751 error_setg(errp
, "can't find a free %d-IRQ block", num
);
755 return first
+ ics
->offset
;
758 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400
760 SpaprIrq spapr_irq_xics_legacy
= {
761 .nr_xirqs
= SPAPR_IRQ_XICS_LEGACY_NR_XIRQS
,
762 .nr_msis
= SPAPR_IRQ_XICS_LEGACY_NR_XIRQS
,
763 .ov5
= SPAPR_OV5_XIVE_LEGACY
,
765 .init
= spapr_irq_init_xics
,
766 .claim
= spapr_irq_claim_xics
,
767 .free
= spapr_irq_free_xics
,
768 .print_info
= spapr_irq_print_info_xics
,
769 .dt_populate
= spapr_dt_xics
,
770 .cpu_intc_create
= spapr_irq_cpu_intc_create_xics
,
771 .post_load
= spapr_irq_post_load_xics
,
772 .reset
= spapr_irq_reset_xics
,
773 .set_irq
= spapr_irq_set_irq_xics
,
774 .init_kvm
= spapr_irq_init_kvm_xics
,