2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
31 #include "qemu/timer.h"
32 #include "hw/ppc/xics.h"
33 #include "hw/qdev-properties.h"
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qapi/visitor.h"
37 #include "migration/vmstate.h"
38 #include "monitor/monitor.h"
39 #include "hw/intc/intc.h"
41 #include "sysemu/kvm.h"
42 #include "sysemu/reset.h"
44 void icp_pic_print_info(ICPState
*icp
, Monitor
*mon
)
48 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
49 * are hot plugged or unplugged.
55 cpu_index
= icp
->cs
? icp
->cs
->cpu_index
: -1;
61 if (kvm_irqchip_in_kernel()) {
62 icp_synchronize_state(icp
);
65 monitor_printf(mon
, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
66 cpu_index
, icp
->xirr
, icp
->xirr_owner
,
67 icp
->pending_priority
, icp
->mfrr
);
70 void ics_pic_print_info(ICSState
*ics
, Monitor
*mon
)
74 monitor_printf(mon
, "ICS %4x..%4x %p\n",
75 ics
->offset
, ics
->offset
+ ics
->nr_irqs
- 1, ics
);
81 if (kvm_irqchip_in_kernel()) {
82 ics_synchronize_state(ics
);
85 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
86 ICSIRQState
*irq
= ics
->irqs
+ i
;
88 if (!(irq
->flags
& XICS_FLAGS_IRQ_MASK
)) {
91 monitor_printf(mon
, " %4x %s %02x %02x\n",
93 (irq
->flags
& XICS_FLAGS_IRQ_LSI
) ?
95 irq
->priority
, irq
->status
);
100 * ICP: Presentation layer
103 #define XISR_MASK 0x00ffffff
104 #define CPPR_MASK 0xff000000
106 #define XISR(icp) (((icp)->xirr) & XISR_MASK)
107 #define CPPR(icp) (((icp)->xirr) >> 24)
109 static void ics_reject(ICSState
*ics
, uint32_t nr
);
110 static void ics_eoi(ICSState
*ics
, uint32_t nr
);
112 static void icp_check_ipi(ICPState
*icp
)
114 if (XISR(icp
) && (icp
->pending_priority
<= icp
->mfrr
)) {
118 trace_xics_icp_check_ipi(icp
->cs
->cpu_index
, icp
->mfrr
);
120 if (XISR(icp
) && icp
->xirr_owner
) {
121 ics_reject(icp
->xirr_owner
, XISR(icp
));
124 icp
->xirr
= (icp
->xirr
& ~XISR_MASK
) | XICS_IPI
;
125 icp
->pending_priority
= icp
->mfrr
;
126 icp
->xirr_owner
= NULL
;
127 qemu_irq_raise(icp
->output
);
130 void icp_resend(ICPState
*icp
)
132 XICSFabric
*xi
= icp
->xics
;
133 XICSFabricClass
*xic
= XICS_FABRIC_GET_CLASS(xi
);
135 if (icp
->mfrr
< CPPR(icp
)) {
142 void icp_set_cppr(ICPState
*icp
, uint8_t cppr
)
147 old_cppr
= CPPR(icp
);
148 icp
->xirr
= (icp
->xirr
& ~CPPR_MASK
) | (cppr
<< 24);
150 if (cppr
< old_cppr
) {
151 if (XISR(icp
) && (cppr
<= icp
->pending_priority
)) {
152 old_xisr
= XISR(icp
);
153 icp
->xirr
&= ~XISR_MASK
; /* Clear XISR */
154 icp
->pending_priority
= 0xff;
155 qemu_irq_lower(icp
->output
);
156 if (icp
->xirr_owner
) {
157 ics_reject(icp
->xirr_owner
, old_xisr
);
158 icp
->xirr_owner
= NULL
;
168 void icp_set_mfrr(ICPState
*icp
, uint8_t mfrr
)
171 if (mfrr
< CPPR(icp
)) {
176 uint32_t icp_accept(ICPState
*icp
)
178 uint32_t xirr
= icp
->xirr
;
180 qemu_irq_lower(icp
->output
);
181 icp
->xirr
= icp
->pending_priority
<< 24;
182 icp
->pending_priority
= 0xff;
183 icp
->xirr_owner
= NULL
;
185 trace_xics_icp_accept(xirr
, icp
->xirr
);
190 uint32_t icp_ipoll(ICPState
*icp
, uint32_t *mfrr
)
198 void icp_eoi(ICPState
*icp
, uint32_t xirr
)
200 XICSFabric
*xi
= icp
->xics
;
201 XICSFabricClass
*xic
= XICS_FABRIC_GET_CLASS(xi
);
205 /* Send EOI -> ICS */
206 icp
->xirr
= (icp
->xirr
& ~CPPR_MASK
) | (xirr
& CPPR_MASK
);
207 trace_xics_icp_eoi(icp
->cs
->cpu_index
, xirr
, icp
->xirr
);
208 irq
= xirr
& XISR_MASK
;
210 ics
= xic
->ics_get(xi
, irq
);
219 void icp_irq(ICSState
*ics
, int server
, int nr
, uint8_t priority
)
221 ICPState
*icp
= xics_icp_get(ics
->xics
, server
);
223 trace_xics_icp_irq(server
, nr
, priority
);
225 if ((priority
>= CPPR(icp
))
226 || (XISR(icp
) && (icp
->pending_priority
<= priority
))) {
229 if (XISR(icp
) && icp
->xirr_owner
) {
230 ics_reject(icp
->xirr_owner
, XISR(icp
));
231 icp
->xirr_owner
= NULL
;
233 icp
->xirr
= (icp
->xirr
& ~XISR_MASK
) | (nr
& XISR_MASK
);
234 icp
->xirr_owner
= ics
;
235 icp
->pending_priority
= priority
;
236 trace_xics_icp_raise(icp
->xirr
, icp
->pending_priority
);
237 qemu_irq_raise(icp
->output
);
241 static int icp_pre_save(void *opaque
)
243 ICPState
*icp
= opaque
;
245 if (kvm_irqchip_in_kernel()) {
246 icp_get_kvm_state(icp
);
252 static int icp_post_load(void *opaque
, int version_id
)
254 ICPState
*icp
= opaque
;
256 if (kvm_irqchip_in_kernel()) {
257 Error
*local_err
= NULL
;
260 ret
= icp_set_kvm_state(icp
, &local_err
);
262 error_report_err(local_err
);
270 static const VMStateDescription vmstate_icp_server
= {
271 .name
= "icp/server",
273 .minimum_version_id
= 1,
274 .pre_save
= icp_pre_save
,
275 .post_load
= icp_post_load
,
276 .fields
= (VMStateField
[]) {
278 VMSTATE_UINT32(xirr
, ICPState
),
279 VMSTATE_UINT8(pending_priority
, ICPState
),
280 VMSTATE_UINT8(mfrr
, ICPState
),
281 VMSTATE_END_OF_LIST()
285 void icp_reset(ICPState
*icp
)
288 icp
->pending_priority
= 0xff;
291 if (kvm_irqchip_in_kernel()) {
292 Error
*local_err
= NULL
;
294 icp_set_kvm_state(icp
, &local_err
);
296 error_report_err(local_err
);
301 static void icp_realize(DeviceState
*dev
, Error
**errp
)
303 ICPState
*icp
= ICP(dev
);
311 cpu
= POWERPC_CPU(icp
->cs
);
313 switch (PPC_INPUT(env
)) {
314 case PPC_FLAGS_INPUT_POWER7
:
315 icp
->output
= qdev_get_gpio_in(DEVICE(cpu
), POWER7_INPUT_INT
);
317 case PPC_FLAGS_INPUT_POWER9
: /* For SPAPR xics emulation */
318 icp
->output
= qdev_get_gpio_in(DEVICE(cpu
), POWER9_INPUT_INT
);
321 case PPC_FLAGS_INPUT_970
:
322 icp
->output
= qdev_get_gpio_in(DEVICE(cpu
), PPC970_INPUT_INT
);
326 error_setg(errp
, "XICS interrupt controller does not support this CPU bus model");
330 /* Connect the presenter to the VCPU (required for CPU hotplug) */
331 if (kvm_irqchip_in_kernel()) {
332 icp_kvm_realize(dev
, &err
);
334 error_propagate(errp
, err
);
339 vmstate_register(NULL
, icp
->cs
->cpu_index
, &vmstate_icp_server
, icp
);
342 static void icp_unrealize(DeviceState
*dev
)
344 ICPState
*icp
= ICP(dev
);
346 vmstate_unregister(NULL
, &vmstate_icp_server
, icp
);
349 static Property icp_properties
[] = {
350 DEFINE_PROP_LINK(ICP_PROP_XICS
, ICPState
, xics
, TYPE_XICS_FABRIC
,
352 DEFINE_PROP_LINK(ICP_PROP_CPU
, ICPState
, cs
, TYPE_CPU
, CPUState
*),
353 DEFINE_PROP_END_OF_LIST(),
356 static void icp_class_init(ObjectClass
*klass
, void *data
)
358 DeviceClass
*dc
= DEVICE_CLASS(klass
);
360 dc
->realize
= icp_realize
;
361 dc
->unrealize
= icp_unrealize
;
362 device_class_set_props(dc
, icp_properties
);
364 * Reason: part of XICS interrupt controller, needs to be wired up
367 dc
->user_creatable
= false;
370 static const TypeInfo icp_info
= {
372 .parent
= TYPE_DEVICE
,
373 .instance_size
= sizeof(ICPState
),
374 .class_init
= icp_class_init
,
375 .class_size
= sizeof(ICPStateClass
),
378 Object
*icp_create(Object
*cpu
, const char *type
, XICSFabric
*xi
, Error
**errp
)
382 obj
= object_new(type
);
383 object_property_add_child(cpu
, type
, obj
);
385 object_property_set_link(obj
, ICP_PROP_XICS
, OBJECT(xi
), &error_abort
);
386 object_property_set_link(obj
, ICP_PROP_CPU
, cpu
, &error_abort
);
387 if (!qdev_realize(DEVICE(obj
), NULL
, errp
)) {
388 object_unparent(obj
);
395 void icp_destroy(ICPState
*icp
)
397 Object
*obj
= OBJECT(icp
);
399 object_unparent(obj
);
405 static void ics_resend_msi(ICSState
*ics
, int srcno
)
407 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
409 /* FIXME: filter by server#? */
410 if (irq
->status
& XICS_STATUS_REJECTED
) {
411 irq
->status
&= ~XICS_STATUS_REJECTED
;
412 if (irq
->priority
!= 0xff) {
413 icp_irq(ics
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
418 static void ics_resend_lsi(ICSState
*ics
, int srcno
)
420 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
422 if ((irq
->priority
!= 0xff)
423 && (irq
->status
& XICS_STATUS_ASSERTED
)
424 && !(irq
->status
& XICS_STATUS_SENT
)) {
425 irq
->status
|= XICS_STATUS_SENT
;
426 icp_irq(ics
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
430 static void ics_set_irq_msi(ICSState
*ics
, int srcno
, int val
)
432 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
434 trace_xics_ics_set_irq_msi(srcno
, srcno
+ ics
->offset
);
437 if (irq
->priority
== 0xff) {
438 irq
->status
|= XICS_STATUS_MASKED_PENDING
;
439 trace_xics_masked_pending();
441 icp_irq(ics
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
446 static void ics_set_irq_lsi(ICSState
*ics
, int srcno
, int val
)
448 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
450 trace_xics_ics_set_irq_lsi(srcno
, srcno
+ ics
->offset
);
452 irq
->status
|= XICS_STATUS_ASSERTED
;
454 irq
->status
&= ~XICS_STATUS_ASSERTED
;
456 ics_resend_lsi(ics
, srcno
);
459 void ics_set_irq(void *opaque
, int srcno
, int val
)
461 ICSState
*ics
= (ICSState
*)opaque
;
463 if (kvm_irqchip_in_kernel()) {
464 ics_kvm_set_irq(ics
, srcno
, val
);
468 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
469 ics_set_irq_lsi(ics
, srcno
, val
);
471 ics_set_irq_msi(ics
, srcno
, val
);
475 static void ics_write_xive_msi(ICSState
*ics
, int srcno
)
477 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
479 if (!(irq
->status
& XICS_STATUS_MASKED_PENDING
)
480 || (irq
->priority
== 0xff)) {
484 irq
->status
&= ~XICS_STATUS_MASKED_PENDING
;
485 icp_irq(ics
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
488 static void ics_write_xive_lsi(ICSState
*ics
, int srcno
)
490 ics_resend_lsi(ics
, srcno
);
493 void ics_write_xive(ICSState
*ics
, int srcno
, int server
,
494 uint8_t priority
, uint8_t saved_priority
)
496 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
498 irq
->server
= server
;
499 irq
->priority
= priority
;
500 irq
->saved_priority
= saved_priority
;
502 trace_xics_ics_write_xive(ics
->offset
+ srcno
, srcno
, server
, priority
);
504 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
505 ics_write_xive_lsi(ics
, srcno
);
507 ics_write_xive_msi(ics
, srcno
);
511 static void ics_reject(ICSState
*ics
, uint32_t nr
)
513 ICSStateClass
*isc
= ICS_GET_CLASS(ics
);
514 ICSIRQState
*irq
= ics
->irqs
+ nr
- ics
->offset
;
517 isc
->reject(ics
, nr
);
521 trace_xics_ics_reject(nr
, nr
- ics
->offset
);
522 if (irq
->flags
& XICS_FLAGS_IRQ_MSI
) {
523 irq
->status
|= XICS_STATUS_REJECTED
;
524 } else if (irq
->flags
& XICS_FLAGS_IRQ_LSI
) {
525 irq
->status
&= ~XICS_STATUS_SENT
;
529 void ics_resend(ICSState
*ics
)
531 ICSStateClass
*isc
= ICS_GET_CLASS(ics
);
539 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
540 /* FIXME: filter by server#? */
541 if (ics
->irqs
[i
].flags
& XICS_FLAGS_IRQ_LSI
) {
542 ics_resend_lsi(ics
, i
);
544 ics_resend_msi(ics
, i
);
549 static void ics_eoi(ICSState
*ics
, uint32_t nr
)
551 int srcno
= nr
- ics
->offset
;
552 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
554 trace_xics_ics_eoi(nr
);
556 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
557 irq
->status
&= ~XICS_STATUS_SENT
;
561 static void ics_reset_irq(ICSIRQState
*irq
)
563 irq
->priority
= 0xff;
564 irq
->saved_priority
= 0xff;
567 static void ics_reset_hold(Object
*obj
)
569 ICSState
*ics
= ICS(obj
);
570 g_autofree
uint8_t *flags
= g_malloc(ics
->nr_irqs
);
573 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
574 flags
[i
] = ics
->irqs
[i
].flags
;
577 memset(ics
->irqs
, 0, sizeof(ICSIRQState
) * ics
->nr_irqs
);
579 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
580 ics_reset_irq(ics
->irqs
+ i
);
581 ics
->irqs
[i
].flags
= flags
[i
];
584 if (kvm_irqchip_in_kernel()) {
585 Error
*local_err
= NULL
;
587 ics_set_kvm_state(ics
, &local_err
);
589 error_report_err(local_err
);
594 static void ics_reset_handler(void *dev
)
596 device_cold_reset(dev
);
599 static void ics_realize(DeviceState
*dev
, Error
**errp
)
601 ICSState
*ics
= ICS(dev
);
606 error_setg(errp
, "Number of interrupts needs to be greater 0");
609 ics
->irqs
= g_new0(ICSIRQState
, ics
->nr_irqs
);
611 qemu_register_reset(ics_reset_handler
, ics
);
614 static void ics_instance_init(Object
*obj
)
616 ICSState
*ics
= ICS(obj
);
618 ics
->offset
= XICS_IRQ_BASE
;
621 static int ics_pre_save(void *opaque
)
623 ICSState
*ics
= opaque
;
625 if (kvm_irqchip_in_kernel()) {
626 ics_get_kvm_state(ics
);
632 static int ics_post_load(void *opaque
, int version_id
)
634 ICSState
*ics
= opaque
;
636 if (kvm_irqchip_in_kernel()) {
637 Error
*local_err
= NULL
;
640 ret
= ics_set_kvm_state(ics
, &local_err
);
642 error_report_err(local_err
);
650 static const VMStateDescription vmstate_ics_irq
= {
653 .minimum_version_id
= 1,
654 .fields
= (VMStateField
[]) {
655 VMSTATE_UINT32(server
, ICSIRQState
),
656 VMSTATE_UINT8(priority
, ICSIRQState
),
657 VMSTATE_UINT8(saved_priority
, ICSIRQState
),
658 VMSTATE_UINT8(status
, ICSIRQState
),
659 VMSTATE_UINT8(flags
, ICSIRQState
),
660 VMSTATE_END_OF_LIST()
664 static const VMStateDescription vmstate_ics
= {
667 .minimum_version_id
= 1,
668 .pre_save
= ics_pre_save
,
669 .post_load
= ics_post_load
,
670 .fields
= (VMStateField
[]) {
672 VMSTATE_UINT32_EQUAL(nr_irqs
, ICSState
, NULL
),
674 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs
, ICSState
, nr_irqs
,
677 VMSTATE_END_OF_LIST()
681 static Property ics_properties
[] = {
682 DEFINE_PROP_UINT32("nr-irqs", ICSState
, nr_irqs
, 0),
683 DEFINE_PROP_LINK(ICS_PROP_XICS
, ICSState
, xics
, TYPE_XICS_FABRIC
,
685 DEFINE_PROP_END_OF_LIST(),
688 static void ics_class_init(ObjectClass
*klass
, void *data
)
690 DeviceClass
*dc
= DEVICE_CLASS(klass
);
691 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
693 dc
->realize
= ics_realize
;
694 device_class_set_props(dc
, ics_properties
);
695 dc
->vmsd
= &vmstate_ics
;
697 * Reason: part of XICS interrupt controller, needs to be wired up,
698 * e.g. by spapr_irq_init().
700 dc
->user_creatable
= false;
701 rc
->phases
.hold
= ics_reset_hold
;
704 static const TypeInfo ics_info
= {
706 .parent
= TYPE_DEVICE
,
707 .instance_size
= sizeof(ICSState
),
708 .instance_init
= ics_instance_init
,
709 .class_init
= ics_class_init
,
710 .class_size
= sizeof(ICSStateClass
),
713 static const TypeInfo xics_fabric_info
= {
714 .name
= TYPE_XICS_FABRIC
,
715 .parent
= TYPE_INTERFACE
,
716 .class_size
= sizeof(XICSFabricClass
),
722 ICPState
*xics_icp_get(XICSFabric
*xi
, int server
)
724 XICSFabricClass
*xic
= XICS_FABRIC_GET_CLASS(xi
);
726 return xic
->icp_get(xi
, server
);
729 void ics_set_irq_type(ICSState
*ics
, int srcno
, bool lsi
)
731 assert(!(ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_MASK
));
733 ics
->irqs
[srcno
].flags
|=
734 lsi
? XICS_FLAGS_IRQ_LSI
: XICS_FLAGS_IRQ_MSI
;
736 if (kvm_irqchip_in_kernel()) {
737 Error
*local_err
= NULL
;
739 ics_reset_irq(ics
->irqs
+ srcno
);
740 ics_set_kvm_state_one(ics
, srcno
, &local_err
);
742 error_report_err(local_err
);
747 static void xics_register_types(void)
749 type_register_static(&ics_info
);
750 type_register_static(&icp_info
);
751 type_register_static(&xics_fabric_info
);
754 type_init(xics_register_types
)