2 * QEMU PowerPC sPAPR IRQ interface
4 * Copyright (c) 2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/ppc/spapr.h"
15 #include "hw/ppc/spapr_xive.h"
16 #include "hw/ppc/xics.h"
17 #include "sysemu/kvm.h"
21 void spapr_irq_msi_init(sPAPRMachineState
*spapr
, uint32_t nr_msis
)
23 spapr
->irq_map_nr
= nr_msis
;
24 spapr
->irq_map
= bitmap_new(spapr
->irq_map_nr
);
27 int spapr_irq_msi_alloc(sPAPRMachineState
*spapr
, uint32_t num
, bool align
,
33 * The 'align_mask' parameter of bitmap_find_next_zero_area()
34 * should be one less than a power of 2; 0 means no
35 * alignment. Adapt the 'align' value of the former allocator
36 * to fit the requirements of bitmap_find_next_zero_area()
40 irq
= bitmap_find_next_zero_area(spapr
->irq_map
, spapr
->irq_map_nr
, 0, num
,
42 if (irq
== spapr
->irq_map_nr
) {
43 error_setg(errp
, "can't find a free %d-IRQ block", num
);
47 bitmap_set(spapr
->irq_map
, irq
, num
);
49 return irq
+ SPAPR_IRQ_MSI
;
52 void spapr_irq_msi_free(sPAPRMachineState
*spapr
, int irq
, uint32_t num
)
54 bitmap_clear(spapr
->irq_map
, irq
- SPAPR_IRQ_MSI
, num
);
57 void spapr_irq_msi_reset(sPAPRMachineState
*spapr
)
59 bitmap_clear(spapr
->irq_map
, 0, spapr
->irq_map_nr
);
67 static ICSState
*spapr_ics_create(sPAPRMachineState
*spapr
,
69 int nr_irqs
, Error
**errp
)
71 Error
*local_err
= NULL
;
74 obj
= object_new(type_ics
);
75 object_property_add_child(OBJECT(spapr
), "ics", obj
, &error_abort
);
76 object_property_add_const_link(obj
, ICS_PROP_XICS
, OBJECT(spapr
),
78 object_property_set_int(obj
, nr_irqs
, "nr-irqs", &local_err
);
82 object_property_set_bool(obj
, true, "realized", &local_err
);
90 error_propagate(errp
, local_err
);
94 static void spapr_irq_init_xics(sPAPRMachineState
*spapr
, Error
**errp
)
96 MachineState
*machine
= MACHINE(spapr
);
97 int nr_irqs
= spapr
->irq
->nr_irqs
;
98 Error
*local_err
= NULL
;
101 if (machine_kernel_irqchip_allowed(machine
) &&
102 !xics_kvm_init(spapr
, &local_err
)) {
103 spapr
->icp_type
= TYPE_KVM_ICP
;
104 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_KVM
, nr_irqs
,
107 if (machine_kernel_irqchip_required(machine
) && !spapr
->ics
) {
108 error_prepend(&local_err
,
109 "kernel_irqchip requested but unavailable: ");
112 error_free(local_err
);
117 xics_spapr_init(spapr
);
118 spapr
->icp_type
= TYPE_ICP
;
119 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_SIMPLE
, nr_irqs
,
124 error_propagate(errp
, local_err
);
127 #define ICS_IRQ_FREE(ics, srcno) \
128 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
130 static int spapr_irq_claim_xics(sPAPRMachineState
*spapr
, int irq
, bool lsi
,
133 ICSState
*ics
= spapr
->ics
;
137 if (!ics_valid_irq(ics
, irq
)) {
138 error_setg(errp
, "IRQ %d is invalid", irq
);
142 if (!ICS_IRQ_FREE(ics
, irq
- ics
->offset
)) {
143 error_setg(errp
, "IRQ %d is not free", irq
);
147 ics_set_irq_type(ics
, irq
- ics
->offset
, lsi
);
151 static void spapr_irq_free_xics(sPAPRMachineState
*spapr
, int irq
, int num
)
153 ICSState
*ics
= spapr
->ics
;
154 uint32_t srcno
= irq
- ics
->offset
;
157 if (ics_valid_irq(ics
, irq
)) {
158 trace_spapr_irq_free(0, irq
, num
);
159 for (i
= srcno
; i
< srcno
+ num
; ++i
) {
160 if (ICS_IRQ_FREE(ics
, i
)) {
161 trace_spapr_irq_free_warn(0, i
);
163 memset(&ics
->irqs
[i
], 0, sizeof(ICSIRQState
));
168 static qemu_irq
spapr_qirq_xics(sPAPRMachineState
*spapr
, int irq
)
170 ICSState
*ics
= spapr
->ics
;
171 uint32_t srcno
= irq
- ics
->offset
;
173 if (ics_valid_irq(ics
, irq
)) {
174 return spapr
->qirqs
[srcno
];
180 static void spapr_irq_print_info_xics(sPAPRMachineState
*spapr
, Monitor
*mon
)
185 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
187 icp_pic_print_info(cpu
->icp
, mon
);
190 ics_pic_print_info(spapr
->ics
, mon
);
193 static void spapr_irq_cpu_intc_create_xics(sPAPRMachineState
*spapr
,
194 PowerPCCPU
*cpu
, Error
**errp
)
196 Error
*local_err
= NULL
;
199 obj
= icp_create(OBJECT(cpu
), spapr
->icp_type
, XICS_FABRIC(spapr
),
202 error_propagate(errp
, local_err
);
209 static int spapr_irq_post_load_xics(sPAPRMachineState
*spapr
, int version_id
)
211 if (!object_dynamic_cast(OBJECT(spapr
->ics
), TYPE_ICS_KVM
)) {
214 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
215 icp_resend(cpu
->icp
);
221 static void spapr_irq_set_irq_xics(void *opaque
, int srcno
, int val
)
223 sPAPRMachineState
*spapr
= opaque
;
224 MachineState
*machine
= MACHINE(opaque
);
226 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine
)) {
227 ics_kvm_set_irq(spapr
->ics
, srcno
, val
);
229 ics_simple_set_irq(spapr
->ics
, srcno
, val
);
233 static void spapr_irq_reset_xics(sPAPRMachineState
*spapr
, Error
**errp
)
235 /* TODO: create the KVM XICS device */
238 #define SPAPR_IRQ_XICS_NR_IRQS 0x1000
239 #define SPAPR_IRQ_XICS_NR_MSIS \
240 (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
242 sPAPRIrq spapr_irq_xics
= {
243 .nr_irqs
= SPAPR_IRQ_XICS_NR_IRQS
,
244 .nr_msis
= SPAPR_IRQ_XICS_NR_MSIS
,
245 .ov5
= SPAPR_OV5_XIVE_LEGACY
,
247 .init
= spapr_irq_init_xics
,
248 .claim
= spapr_irq_claim_xics
,
249 .free
= spapr_irq_free_xics
,
250 .qirq
= spapr_qirq_xics
,
251 .print_info
= spapr_irq_print_info_xics
,
252 .dt_populate
= spapr_dt_xics
,
253 .cpu_intc_create
= spapr_irq_cpu_intc_create_xics
,
254 .post_load
= spapr_irq_post_load_xics
,
255 .reset
= spapr_irq_reset_xics
,
256 .set_irq
= spapr_irq_set_irq_xics
,
262 static void spapr_irq_init_xive(sPAPRMachineState
*spapr
, Error
**errp
)
264 MachineState
*machine
= MACHINE(spapr
);
265 uint32_t nr_servers
= spapr_max_server_number(spapr
);
269 /* KVM XIVE device not yet available */
271 if (machine_kernel_irqchip_required(machine
)) {
272 error_setg(errp
, "kernel_irqchip requested. no KVM XIVE support");
277 dev
= qdev_create(NULL
, TYPE_SPAPR_XIVE
);
278 qdev_prop_set_uint32(dev
, "nr-irqs", spapr
->irq
->nr_irqs
);
280 * 8 XIVE END structures per CPU. One for each available priority
282 qdev_prop_set_uint32(dev
, "nr-ends", nr_servers
<< 3);
283 qdev_init_nofail(dev
);
285 spapr
->xive
= SPAPR_XIVE(dev
);
287 /* Enable the CPU IPIs */
288 for (i
= 0; i
< nr_servers
; ++i
) {
289 spapr_xive_irq_claim(spapr
->xive
, SPAPR_IRQ_IPI
+ i
, false);
292 spapr_xive_hcall_init(spapr
);
295 static int spapr_irq_claim_xive(sPAPRMachineState
*spapr
, int irq
, bool lsi
,
298 if (!spapr_xive_irq_claim(spapr
->xive
, irq
, lsi
)) {
299 error_setg(errp
, "IRQ %d is invalid", irq
);
305 static void spapr_irq_free_xive(sPAPRMachineState
*spapr
, int irq
, int num
)
309 for (i
= irq
; i
< irq
+ num
; ++i
) {
310 spapr_xive_irq_free(spapr
->xive
, i
);
314 static qemu_irq
spapr_qirq_xive(sPAPRMachineState
*spapr
, int irq
)
316 sPAPRXive
*xive
= spapr
->xive
;
318 if (irq
>= xive
->nr_irqs
) {
322 /* The sPAPR machine/device should have claimed the IRQ before */
323 assert(xive_eas_is_valid(&xive
->eat
[irq
]));
325 return spapr
->qirqs
[irq
];
328 static void spapr_irq_print_info_xive(sPAPRMachineState
*spapr
,
334 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
336 xive_tctx_pic_print_info(cpu
->tctx
, mon
);
339 spapr_xive_pic_print_info(spapr
->xive
, mon
);
342 static void spapr_irq_cpu_intc_create_xive(sPAPRMachineState
*spapr
,
343 PowerPCCPU
*cpu
, Error
**errp
)
345 Error
*local_err
= NULL
;
348 obj
= xive_tctx_create(OBJECT(cpu
), XIVE_ROUTER(spapr
->xive
), &local_err
);
350 error_propagate(errp
, local_err
);
354 cpu
->tctx
= XIVE_TCTX(obj
);
357 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
358 * don't beneficiate from the reset of the XIVE IRQ backend
360 spapr_xive_set_tctx_os_cam(cpu
->tctx
);
363 static int spapr_irq_post_load_xive(sPAPRMachineState
*spapr
, int version_id
)
368 static void spapr_irq_reset_xive(sPAPRMachineState
*spapr
, Error
**errp
)
373 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
375 /* (TCG) Set the OS CAM line of the thread interrupt context. */
376 spapr_xive_set_tctx_os_cam(cpu
->tctx
);
380 static void spapr_irq_set_irq_xive(void *opaque
, int srcno
, int val
)
382 sPAPRMachineState
*spapr
= opaque
;
384 xive_source_set_irq(&spapr
->xive
->source
, srcno
, val
);
388 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
392 #define SPAPR_IRQ_XIVE_NR_IRQS 0x2000
393 #define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
395 sPAPRIrq spapr_irq_xive
= {
396 .nr_irqs
= SPAPR_IRQ_XIVE_NR_IRQS
,
397 .nr_msis
= SPAPR_IRQ_XIVE_NR_MSIS
,
398 .ov5
= SPAPR_OV5_XIVE_EXPLOIT
,
400 .init
= spapr_irq_init_xive
,
401 .claim
= spapr_irq_claim_xive
,
402 .free
= spapr_irq_free_xive
,
403 .qirq
= spapr_qirq_xive
,
404 .print_info
= spapr_irq_print_info_xive
,
405 .dt_populate
= spapr_dt_xive
,
406 .cpu_intc_create
= spapr_irq_cpu_intc_create_xive
,
407 .post_load
= spapr_irq_post_load_xive
,
408 .reset
= spapr_irq_reset_xive
,
409 .set_irq
= spapr_irq_set_irq_xive
,
413 * Dual XIVE and XICS IRQ backend.
415 * Both interrupt mode, XIVE and XICS, objects are created but the
416 * machine starts in legacy interrupt mode (XICS). It can be changed
417 * by the CAS negotiation process and, in that case, the new mode is
418 * activated after an extra machine reset.
422 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
425 static sPAPRIrq
*spapr_irq_current(sPAPRMachineState
*spapr
)
427 return spapr_ovec_test(spapr
->ov5_cas
, OV5_XIVE_EXPLOIT
) ?
428 &spapr_irq_xive
: &spapr_irq_xics
;
431 static void spapr_irq_init_dual(sPAPRMachineState
*spapr
, Error
**errp
)
433 MachineState
*machine
= MACHINE(spapr
);
434 Error
*local_err
= NULL
;
436 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine
)) {
437 error_setg(errp
, "No KVM support for the 'dual' machine");
441 spapr_irq_xics
.init(spapr
, &local_err
);
443 error_propagate(errp
, local_err
);
448 * Align the XICS and the XIVE IRQ number space under QEMU.
450 * However, the XICS KVM device still considers that the IRQ
451 * numbers should start at XICS_IRQ_BASE (0x1000). Either we
452 * should introduce a KVM device ioctl to set the offset or ignore
453 * the lower 4K numbers when using the get/set ioctl of the XICS
454 * KVM device. The second option seems the least intrusive.
456 spapr
->ics
->offset
= 0;
458 spapr_irq_xive
.init(spapr
, &local_err
);
460 error_propagate(errp
, local_err
);
465 static int spapr_irq_claim_dual(sPAPRMachineState
*spapr
, int irq
, bool lsi
,
468 Error
*local_err
= NULL
;
471 ret
= spapr_irq_xics
.claim(spapr
, irq
, lsi
, &local_err
);
473 error_propagate(errp
, local_err
);
477 ret
= spapr_irq_xive
.claim(spapr
, irq
, lsi
, &local_err
);
479 error_propagate(errp
, local_err
);
486 static void spapr_irq_free_dual(sPAPRMachineState
*spapr
, int irq
, int num
)
488 spapr_irq_xics
.free(spapr
, irq
, num
);
489 spapr_irq_xive
.free(spapr
, irq
, num
);
492 static qemu_irq
spapr_qirq_dual(sPAPRMachineState
*spapr
, int irq
)
494 sPAPRXive
*xive
= spapr
->xive
;
495 ICSState
*ics
= spapr
->ics
;
497 if (irq
>= spapr
->irq
->nr_irqs
) {
502 * The IRQ number should have been claimed under both interrupt
505 assert(!ICS_IRQ_FREE(ics
, irq
- ics
->offset
));
506 assert(xive_eas_is_valid(&xive
->eat
[irq
]));
508 return spapr
->qirqs
[irq
];
511 static void spapr_irq_print_info_dual(sPAPRMachineState
*spapr
, Monitor
*mon
)
513 spapr_irq_current(spapr
)->print_info(spapr
, mon
);
516 static void spapr_irq_dt_populate_dual(sPAPRMachineState
*spapr
,
517 uint32_t nr_servers
, void *fdt
,
520 spapr_irq_current(spapr
)->dt_populate(spapr
, nr_servers
, fdt
, phandle
);
523 static void spapr_irq_cpu_intc_create_dual(sPAPRMachineState
*spapr
,
524 PowerPCCPU
*cpu
, Error
**errp
)
526 Error
*local_err
= NULL
;
528 spapr_irq_xive
.cpu_intc_create(spapr
, cpu
, &local_err
);
530 error_propagate(errp
, local_err
);
534 spapr_irq_xics
.cpu_intc_create(spapr
, cpu
, errp
);
537 static int spapr_irq_post_load_dual(sPAPRMachineState
*spapr
, int version_id
)
540 * Force a reset of the XIVE backend after migration. The machine
541 * defaults to XICS at startup.
543 if (spapr_ovec_test(spapr
->ov5_cas
, OV5_XIVE_EXPLOIT
)) {
544 spapr_irq_xive
.reset(spapr
, &error_fatal
);
547 return spapr_irq_current(spapr
)->post_load(spapr
, version_id
);
550 static void spapr_irq_reset_dual(sPAPRMachineState
*spapr
, Error
**errp
)
552 spapr_irq_current(spapr
)->reset(spapr
, errp
);
555 static void spapr_irq_set_irq_dual(void *opaque
, int srcno
, int val
)
557 sPAPRMachineState
*spapr
= opaque
;
559 spapr_irq_current(spapr
)->set_irq(spapr
, srcno
, val
);
563 * Define values in sync with the XIVE and XICS backend
565 #define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
566 #define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
568 sPAPRIrq spapr_irq_dual
= {
569 .nr_irqs
= SPAPR_IRQ_DUAL_NR_IRQS
,
570 .nr_msis
= SPAPR_IRQ_DUAL_NR_MSIS
,
571 .ov5
= SPAPR_OV5_XIVE_BOTH
,
573 .init
= spapr_irq_init_dual
,
574 .claim
= spapr_irq_claim_dual
,
575 .free
= spapr_irq_free_dual
,
576 .qirq
= spapr_qirq_dual
,
577 .print_info
= spapr_irq_print_info_dual
,
578 .dt_populate
= spapr_irq_dt_populate_dual
,
579 .cpu_intc_create
= spapr_irq_cpu_intc_create_dual
,
580 .post_load
= spapr_irq_post_load_dual
,
581 .reset
= spapr_irq_reset_dual
,
582 .set_irq
= spapr_irq_set_irq_dual
586 * sPAPR IRQ frontend routines for devices
588 void spapr_irq_init(sPAPRMachineState
*spapr
, Error
**errp
)
590 /* Initialize the MSI IRQ allocator. */
591 if (!SPAPR_MACHINE_GET_CLASS(spapr
)->legacy_irq_allocation
) {
592 spapr_irq_msi_init(spapr
, spapr
->irq
->nr_msis
);
595 spapr
->irq
->init(spapr
, errp
);
597 spapr
->qirqs
= qemu_allocate_irqs(spapr
->irq
->set_irq
, spapr
,
598 spapr
->irq
->nr_irqs
);
601 int spapr_irq_claim(sPAPRMachineState
*spapr
, int irq
, bool lsi
, Error
**errp
)
603 return spapr
->irq
->claim(spapr
, irq
, lsi
, errp
);
606 void spapr_irq_free(sPAPRMachineState
*spapr
, int irq
, int num
)
608 spapr
->irq
->free(spapr
, irq
, num
);
611 qemu_irq
spapr_qirq(sPAPRMachineState
*spapr
, int irq
)
613 return spapr
->irq
->qirq(spapr
, irq
);
616 int spapr_irq_post_load(sPAPRMachineState
*spapr
, int version_id
)
618 return spapr
->irq
->post_load(spapr
, version_id
);
621 void spapr_irq_reset(sPAPRMachineState
*spapr
, Error
**errp
)
623 if (spapr
->irq
->reset
) {
624 spapr
->irq
->reset(spapr
, errp
);
629 * XICS legacy routines - to deprecate one day
632 static int ics_find_free_block(ICSState
*ics
, int num
, int alignnum
)
636 for (first
= 0; first
< ics
->nr_irqs
; first
+= alignnum
) {
637 if (num
> (ics
->nr_irqs
- first
)) {
640 for (i
= first
; i
< first
+ num
; ++i
) {
641 if (!ICS_IRQ_FREE(ics
, i
)) {
645 if (i
== (first
+ num
)) {
653 int spapr_irq_find(sPAPRMachineState
*spapr
, int num
, bool align
, Error
**errp
)
655 ICSState
*ics
= spapr
->ics
;
661 * MSIMesage::data is used for storing VIRQ so
662 * it has to be aligned to num to support multiple
663 * MSI vectors. MSI-X is not affected by this.
664 * The hint is used for the first IRQ, the rest should
665 * be allocated continuously.
668 assert((num
== 1) || (num
== 2) || (num
== 4) ||
669 (num
== 8) || (num
== 16) || (num
== 32));
670 first
= ics_find_free_block(ics
, num
, num
);
672 first
= ics_find_free_block(ics
, num
, 1);
676 error_setg(errp
, "can't find a free %d-IRQ block", num
);
680 return first
+ ics
->offset
;
683 #define SPAPR_IRQ_XICS_LEGACY_NR_IRQS 0x400
685 sPAPRIrq spapr_irq_xics_legacy
= {
686 .nr_irqs
= SPAPR_IRQ_XICS_LEGACY_NR_IRQS
,
687 .nr_msis
= SPAPR_IRQ_XICS_LEGACY_NR_IRQS
,
688 .ov5
= SPAPR_OV5_XIVE_LEGACY
,
690 .init
= spapr_irq_init_xics
,
691 .claim
= spapr_irq_claim_xics
,
692 .free
= spapr_irq_free_xics
,
693 .qirq
= spapr_qirq_xics
,
694 .print_info
= spapr_irq_print_info_xics
,
695 .dt_populate
= spapr_dt_xics
,
696 .cpu_intc_create
= spapr_irq_cpu_intc_create_xics
,
697 .post_load
= spapr_irq_post_load_xics
,
698 .set_irq
= spapr_irq_set_irq_xics
,