2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "qemu-common.h"
34 #include "qemu/timer.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/xics.h"
37 #include "qemu/error-report.h"
38 #include "qapi/visitor.h"
40 static int get_cpu_index_by_dt_id(int cpu_dt_id
)
42 PowerPCCPU
*cpu
= ppc_get_vcpu_by_dt_id(cpu_dt_id
);
45 return cpu
->parent_obj
.cpu_index
;
51 void xics_cpu_destroy(XICSState
*icp
, PowerPCCPU
*cpu
)
53 CPUState
*cs
= CPU(cpu
);
54 ICPState
*ss
= &icp
->ss
[cs
->cpu_index
];
56 assert(cs
->cpu_index
< icp
->nr_servers
);
63 void xics_cpu_setup(XICSState
*icp
, PowerPCCPU
*cpu
)
65 CPUState
*cs
= CPU(cpu
);
66 CPUPPCState
*env
= &cpu
->env
;
67 ICPState
*ss
= &icp
->ss
[cs
->cpu_index
];
68 XICSStateClass
*info
= XICS_COMMON_GET_CLASS(icp
);
70 assert(cs
->cpu_index
< icp
->nr_servers
);
74 if (info
->cpu_setup
) {
75 info
->cpu_setup(icp
, cpu
);
78 switch (PPC_INPUT(env
)) {
79 case PPC_FLAGS_INPUT_POWER7
:
80 ss
->output
= env
->irq_inputs
[POWER7_INPUT_INT
];
83 case PPC_FLAGS_INPUT_970
:
84 ss
->output
= env
->irq_inputs
[PPC970_INPUT_INT
];
88 error_report("XICS interrupt controller does not support this CPU "
95 * XICS Common class - parent for emulated XICS and KVM-XICS
97 static void xics_common_reset(DeviceState
*d
)
99 XICSState
*icp
= XICS_COMMON(d
);
102 for (i
= 0; i
< icp
->nr_servers
; i
++) {
103 device_reset(DEVICE(&icp
->ss
[i
]));
106 device_reset(DEVICE(icp
->ics
));
109 static void xics_prop_get_nr_irqs(Object
*obj
, Visitor
*v
, const char *name
,
110 void *opaque
, Error
**errp
)
112 XICSState
*icp
= XICS_COMMON(obj
);
113 int64_t value
= icp
->nr_irqs
;
115 visit_type_int(v
, name
, &value
, errp
);
118 static void xics_prop_set_nr_irqs(Object
*obj
, Visitor
*v
, const char *name
,
119 void *opaque
, Error
**errp
)
121 XICSState
*icp
= XICS_COMMON(obj
);
122 XICSStateClass
*info
= XICS_COMMON_GET_CLASS(icp
);
126 visit_type_int(v
, name
, &value
, &error
);
128 error_propagate(errp
, error
);
132 error_setg(errp
, "Number of interrupts is already set to %u",
137 assert(info
->set_nr_irqs
);
139 info
->set_nr_irqs(icp
, value
, errp
);
142 static void xics_prop_get_nr_servers(Object
*obj
, Visitor
*v
,
143 const char *name
, void *opaque
,
146 XICSState
*icp
= XICS_COMMON(obj
);
147 int64_t value
= icp
->nr_servers
;
149 visit_type_int(v
, name
, &value
, errp
);
152 static void xics_prop_set_nr_servers(Object
*obj
, Visitor
*v
,
153 const char *name
, void *opaque
,
156 XICSState
*icp
= XICS_COMMON(obj
);
157 XICSStateClass
*info
= XICS_COMMON_GET_CLASS(icp
);
161 visit_type_int(v
, name
, &value
, &error
);
163 error_propagate(errp
, error
);
166 if (icp
->nr_servers
) {
167 error_setg(errp
, "Number of servers is already set to %u",
172 assert(info
->set_nr_servers
);
173 info
->set_nr_servers(icp
, value
, errp
);
176 static void xics_common_initfn(Object
*obj
)
178 object_property_add(obj
, "nr_irqs", "int",
179 xics_prop_get_nr_irqs
, xics_prop_set_nr_irqs
,
181 object_property_add(obj
, "nr_servers", "int",
182 xics_prop_get_nr_servers
, xics_prop_set_nr_servers
,
186 static void xics_common_class_init(ObjectClass
*oc
, void *data
)
188 DeviceClass
*dc
= DEVICE_CLASS(oc
);
190 dc
->reset
= xics_common_reset
;
193 static const TypeInfo xics_common_info
= {
194 .name
= TYPE_XICS_COMMON
,
195 .parent
= TYPE_SYS_BUS_DEVICE
,
196 .instance_size
= sizeof(XICSState
),
197 .class_size
= sizeof(XICSStateClass
),
198 .instance_init
= xics_common_initfn
,
199 .class_init
= xics_common_class_init
,
203 * ICP: Presentation layer
206 #define XISR_MASK 0x00ffffff
207 #define CPPR_MASK 0xff000000
209 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
210 #define CPPR(ss) (((ss)->xirr) >> 24)
212 static void ics_reject(ICSState
*ics
, int nr
);
213 static void ics_resend(ICSState
*ics
);
214 static void ics_eoi(ICSState
*ics
, int nr
);
216 static void icp_check_ipi(XICSState
*icp
, int server
)
218 ICPState
*ss
= icp
->ss
+ server
;
220 if (XISR(ss
) && (ss
->pending_priority
<= ss
->mfrr
)) {
224 trace_xics_icp_check_ipi(server
, ss
->mfrr
);
227 ics_reject(icp
->ics
, XISR(ss
));
230 ss
->xirr
= (ss
->xirr
& ~XISR_MASK
) | XICS_IPI
;
231 ss
->pending_priority
= ss
->mfrr
;
232 qemu_irq_raise(ss
->output
);
235 static void icp_resend(XICSState
*icp
, int server
)
237 ICPState
*ss
= icp
->ss
+ server
;
239 if (ss
->mfrr
< CPPR(ss
)) {
240 icp_check_ipi(icp
, server
);
242 ics_resend(icp
->ics
);
245 static void icp_set_cppr(XICSState
*icp
, int server
, uint8_t cppr
)
247 ICPState
*ss
= icp
->ss
+ server
;
252 ss
->xirr
= (ss
->xirr
& ~CPPR_MASK
) | (cppr
<< 24);
254 if (cppr
< old_cppr
) {
255 if (XISR(ss
) && (cppr
<= ss
->pending_priority
)) {
257 ss
->xirr
&= ~XISR_MASK
; /* Clear XISR */
258 ss
->pending_priority
= 0xff;
259 qemu_irq_lower(ss
->output
);
260 ics_reject(icp
->ics
, old_xisr
);
264 icp_resend(icp
, server
);
269 static void icp_set_mfrr(XICSState
*icp
, int server
, uint8_t mfrr
)
271 ICPState
*ss
= icp
->ss
+ server
;
274 if (mfrr
< CPPR(ss
)) {
275 icp_check_ipi(icp
, server
);
279 static uint32_t icp_accept(ICPState
*ss
)
281 uint32_t xirr
= ss
->xirr
;
283 qemu_irq_lower(ss
->output
);
284 ss
->xirr
= ss
->pending_priority
<< 24;
285 ss
->pending_priority
= 0xff;
287 trace_xics_icp_accept(xirr
, ss
->xirr
);
292 static void icp_eoi(XICSState
*icp
, int server
, uint32_t xirr
)
294 ICPState
*ss
= icp
->ss
+ server
;
296 /* Send EOI -> ICS */
297 ss
->xirr
= (ss
->xirr
& ~CPPR_MASK
) | (xirr
& CPPR_MASK
);
298 trace_xics_icp_eoi(server
, xirr
, ss
->xirr
);
299 ics_eoi(icp
->ics
, xirr
& XISR_MASK
);
301 icp_resend(icp
, server
);
305 static void icp_irq(XICSState
*icp
, int server
, int nr
, uint8_t priority
)
307 ICPState
*ss
= icp
->ss
+ server
;
309 trace_xics_icp_irq(server
, nr
, priority
);
311 if ((priority
>= CPPR(ss
))
312 || (XISR(ss
) && (ss
->pending_priority
<= priority
))) {
313 ics_reject(icp
->ics
, nr
);
316 ics_reject(icp
->ics
, XISR(ss
));
318 ss
->xirr
= (ss
->xirr
& ~XISR_MASK
) | (nr
& XISR_MASK
);
319 ss
->pending_priority
= priority
;
320 trace_xics_icp_raise(ss
->xirr
, ss
->pending_priority
);
321 qemu_irq_raise(ss
->output
);
325 static void icp_dispatch_pre_save(void *opaque
)
327 ICPState
*ss
= opaque
;
328 ICPStateClass
*info
= ICP_GET_CLASS(ss
);
330 if (info
->pre_save
) {
335 static int icp_dispatch_post_load(void *opaque
, int version_id
)
337 ICPState
*ss
= opaque
;
338 ICPStateClass
*info
= ICP_GET_CLASS(ss
);
340 if (info
->post_load
) {
341 return info
->post_load(ss
, version_id
);
347 static const VMStateDescription vmstate_icp_server
= {
348 .name
= "icp/server",
350 .minimum_version_id
= 1,
351 .pre_save
= icp_dispatch_pre_save
,
352 .post_load
= icp_dispatch_post_load
,
353 .fields
= (VMStateField
[]) {
355 VMSTATE_UINT32(xirr
, ICPState
),
356 VMSTATE_UINT8(pending_priority
, ICPState
),
357 VMSTATE_UINT8(mfrr
, ICPState
),
358 VMSTATE_END_OF_LIST()
362 static void icp_reset(DeviceState
*dev
)
364 ICPState
*icp
= ICP(dev
);
367 icp
->pending_priority
= 0xff;
370 /* Make all outputs are deasserted */
371 qemu_set_irq(icp
->output
, 0);
374 static void icp_class_init(ObjectClass
*klass
, void *data
)
376 DeviceClass
*dc
= DEVICE_CLASS(klass
);
378 dc
->reset
= icp_reset
;
379 dc
->vmsd
= &vmstate_icp_server
;
382 static const TypeInfo icp_info
= {
384 .parent
= TYPE_DEVICE
,
385 .instance_size
= sizeof(ICPState
),
386 .class_init
= icp_class_init
,
387 .class_size
= sizeof(ICPStateClass
),
393 static int ics_valid_irq(ICSState
*ics
, uint32_t nr
)
395 return (nr
>= ics
->offset
)
396 && (nr
< (ics
->offset
+ ics
->nr_irqs
));
399 static void resend_msi(ICSState
*ics
, int srcno
)
401 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
403 /* FIXME: filter by server#? */
404 if (irq
->status
& XICS_STATUS_REJECTED
) {
405 irq
->status
&= ~XICS_STATUS_REJECTED
;
406 if (irq
->priority
!= 0xff) {
407 icp_irq(ics
->icp
, irq
->server
, srcno
+ ics
->offset
,
413 static void resend_lsi(ICSState
*ics
, int srcno
)
415 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
417 if ((irq
->priority
!= 0xff)
418 && (irq
->status
& XICS_STATUS_ASSERTED
)
419 && !(irq
->status
& XICS_STATUS_SENT
)) {
420 irq
->status
|= XICS_STATUS_SENT
;
421 icp_irq(ics
->icp
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
425 static void set_irq_msi(ICSState
*ics
, int srcno
, int val
)
427 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
429 trace_xics_set_irq_msi(srcno
, srcno
+ ics
->offset
);
432 if (irq
->priority
== 0xff) {
433 irq
->status
|= XICS_STATUS_MASKED_PENDING
;
434 trace_xics_masked_pending();
436 icp_irq(ics
->icp
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
441 static void set_irq_lsi(ICSState
*ics
, int srcno
, int val
)
443 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
445 trace_xics_set_irq_lsi(srcno
, srcno
+ ics
->offset
);
447 irq
->status
|= XICS_STATUS_ASSERTED
;
449 irq
->status
&= ~XICS_STATUS_ASSERTED
;
451 resend_lsi(ics
, srcno
);
454 static void ics_set_irq(void *opaque
, int srcno
, int val
)
456 ICSState
*ics
= (ICSState
*)opaque
;
458 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
459 set_irq_lsi(ics
, srcno
, val
);
461 set_irq_msi(ics
, srcno
, val
);
465 static void write_xive_msi(ICSState
*ics
, int srcno
)
467 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
469 if (!(irq
->status
& XICS_STATUS_MASKED_PENDING
)
470 || (irq
->priority
== 0xff)) {
474 irq
->status
&= ~XICS_STATUS_MASKED_PENDING
;
475 icp_irq(ics
->icp
, irq
->server
, srcno
+ ics
->offset
, irq
->priority
);
478 static void write_xive_lsi(ICSState
*ics
, int srcno
)
480 resend_lsi(ics
, srcno
);
483 static void ics_write_xive(ICSState
*ics
, int nr
, int server
,
484 uint8_t priority
, uint8_t saved_priority
)
486 int srcno
= nr
- ics
->offset
;
487 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
489 irq
->server
= server
;
490 irq
->priority
= priority
;
491 irq
->saved_priority
= saved_priority
;
493 trace_xics_ics_write_xive(nr
, srcno
, server
, priority
);
495 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
496 write_xive_lsi(ics
, srcno
);
498 write_xive_msi(ics
, srcno
);
502 static void ics_reject(ICSState
*ics
, int nr
)
504 ICSIRQState
*irq
= ics
->irqs
+ nr
- ics
->offset
;
506 trace_xics_ics_reject(nr
, nr
- ics
->offset
);
507 irq
->status
|= XICS_STATUS_REJECTED
; /* Irrelevant but harmless for LSI */
508 irq
->status
&= ~XICS_STATUS_SENT
; /* Irrelevant but harmless for MSI */
511 static void ics_resend(ICSState
*ics
)
515 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
516 /* FIXME: filter by server#? */
517 if (ics
->irqs
[i
].flags
& XICS_FLAGS_IRQ_LSI
) {
525 static void ics_eoi(ICSState
*ics
, int nr
)
527 int srcno
= nr
- ics
->offset
;
528 ICSIRQState
*irq
= ics
->irqs
+ srcno
;
530 trace_xics_ics_eoi(nr
);
532 if (ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_LSI
) {
533 irq
->status
&= ~XICS_STATUS_SENT
;
537 static void ics_reset(DeviceState
*dev
)
539 ICSState
*ics
= ICS(dev
);
541 uint8_t flags
[ics
->nr_irqs
];
543 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
544 flags
[i
] = ics
->irqs
[i
].flags
;
547 memset(ics
->irqs
, 0, sizeof(ICSIRQState
) * ics
->nr_irqs
);
549 for (i
= 0; i
< ics
->nr_irqs
; i
++) {
550 ics
->irqs
[i
].priority
= 0xff;
551 ics
->irqs
[i
].saved_priority
= 0xff;
552 ics
->irqs
[i
].flags
= flags
[i
];
556 static int ics_post_load(ICSState
*ics
, int version_id
)
560 for (i
= 0; i
< ics
->icp
->nr_servers
; i
++) {
561 icp_resend(ics
->icp
, i
);
567 static void ics_dispatch_pre_save(void *opaque
)
569 ICSState
*ics
= opaque
;
570 ICSStateClass
*info
= ICS_GET_CLASS(ics
);
572 if (info
->pre_save
) {
577 static int ics_dispatch_post_load(void *opaque
, int version_id
)
579 ICSState
*ics
= opaque
;
580 ICSStateClass
*info
= ICS_GET_CLASS(ics
);
582 if (info
->post_load
) {
583 return info
->post_load(ics
, version_id
);
589 static const VMStateDescription vmstate_ics_irq
= {
592 .minimum_version_id
= 1,
593 .fields
= (VMStateField
[]) {
594 VMSTATE_UINT32(server
, ICSIRQState
),
595 VMSTATE_UINT8(priority
, ICSIRQState
),
596 VMSTATE_UINT8(saved_priority
, ICSIRQState
),
597 VMSTATE_UINT8(status
, ICSIRQState
),
598 VMSTATE_UINT8(flags
, ICSIRQState
),
599 VMSTATE_END_OF_LIST()
603 static const VMStateDescription vmstate_ics
= {
606 .minimum_version_id
= 1,
607 .pre_save
= ics_dispatch_pre_save
,
608 .post_load
= ics_dispatch_post_load
,
609 .fields
= (VMStateField
[]) {
611 VMSTATE_UINT32_EQUAL(nr_irqs
, ICSState
),
613 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs
, ICSState
, nr_irqs
,
614 vmstate_ics_irq
, ICSIRQState
),
615 VMSTATE_END_OF_LIST()
619 static void ics_initfn(Object
*obj
)
621 ICSState
*ics
= ICS(obj
);
623 ics
->offset
= XICS_IRQ_BASE
;
626 static void ics_realize(DeviceState
*dev
, Error
**errp
)
628 ICSState
*ics
= ICS(dev
);
631 error_setg(errp
, "Number of interrupts needs to be greater 0");
634 ics
->irqs
= g_malloc0(ics
->nr_irqs
* sizeof(ICSIRQState
));
635 ics
->qirqs
= qemu_allocate_irqs(ics_set_irq
, ics
, ics
->nr_irqs
);
638 static void ics_class_init(ObjectClass
*klass
, void *data
)
640 DeviceClass
*dc
= DEVICE_CLASS(klass
);
641 ICSStateClass
*isc
= ICS_CLASS(klass
);
643 dc
->realize
= ics_realize
;
644 dc
->vmsd
= &vmstate_ics
;
645 dc
->reset
= ics_reset
;
646 isc
->post_load
= ics_post_load
;
649 static const TypeInfo ics_info
= {
651 .parent
= TYPE_DEVICE
,
652 .instance_size
= sizeof(ICSState
),
653 .class_init
= ics_class_init
,
654 .class_size
= sizeof(ICSStateClass
),
655 .instance_init
= ics_initfn
,
661 static int xics_find_source(XICSState
*icp
, int irq
)
666 /* FIXME: implement multiple sources */
667 for (src
= 0; src
< sources
; ++src
) {
668 ICSState
*ics
= &icp
->ics
[src
];
669 if (ics_valid_irq(ics
, irq
)) {
677 qemu_irq
xics_get_qirq(XICSState
*icp
, int irq
)
679 int src
= xics_find_source(icp
, irq
);
682 ICSState
*ics
= &icp
->ics
[src
];
683 return ics
->qirqs
[irq
- ics
->offset
];
689 static void ics_set_irq_type(ICSState
*ics
, int srcno
, bool lsi
)
691 assert(!(ics
->irqs
[srcno
].flags
& XICS_FLAGS_IRQ_MASK
));
693 ics
->irqs
[srcno
].flags
|=
694 lsi
? XICS_FLAGS_IRQ_LSI
: XICS_FLAGS_IRQ_MSI
;
697 void xics_set_irq_type(XICSState
*icp
, int irq
, bool lsi
)
699 int src
= xics_find_source(icp
, irq
);
704 ics
= &icp
->ics
[src
];
705 ics_set_irq_type(ics
, irq
- ics
->offset
, lsi
);
708 #define ICS_IRQ_FREE(ics, srcno) \
709 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
711 static int ics_find_free_block(ICSState
*ics
, int num
, int alignnum
)
715 for (first
= 0; first
< ics
->nr_irqs
; first
+= alignnum
) {
716 if (num
> (ics
->nr_irqs
- first
)) {
719 for (i
= first
; i
< first
+ num
; ++i
) {
720 if (!ICS_IRQ_FREE(ics
, i
)) {
724 if (i
== (first
+ num
)) {
732 int xics_alloc(XICSState
*icp
, int src
, int irq_hint
, bool lsi
, Error
**errp
)
734 ICSState
*ics
= &icp
->ics
[src
];
738 assert(src
== xics_find_source(icp
, irq_hint
));
739 if (!ICS_IRQ_FREE(ics
, irq_hint
- ics
->offset
)) {
740 error_setg(errp
, "can't allocate IRQ %d: already in use", irq_hint
);
745 irq
= ics_find_free_block(ics
, 1, 1);
747 error_setg(errp
, "can't allocate IRQ: no IRQ left");
753 ics_set_irq_type(ics
, irq
- ics
->offset
, lsi
);
754 trace_xics_alloc(src
, irq
);
760 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block.
761 * If align==true, aligns the first IRQ number to num.
763 int xics_alloc_block(XICSState
*icp
, int src
, int num
, bool lsi
, bool align
,
767 ICSState
*ics
= &icp
->ics
[src
];
771 * MSIMesage::data is used for storing VIRQ so
772 * it has to be aligned to num to support multiple
773 * MSI vectors. MSI-X is not affected by this.
774 * The hint is used for the first IRQ, the rest should
775 * be allocated continuously.
778 assert((num
== 1) || (num
== 2) || (num
== 4) ||
779 (num
== 8) || (num
== 16) || (num
== 32));
780 first
= ics_find_free_block(ics
, num
, num
);
782 first
= ics_find_free_block(ics
, num
, 1);
785 error_setg(errp
, "can't find a free %d-IRQ block", num
);
790 for (i
= first
; i
< first
+ num
; ++i
) {
791 ics_set_irq_type(ics
, i
, lsi
);
794 first
+= ics
->offset
;
796 trace_xics_alloc_block(src
, first
, num
, lsi
, align
);
801 static void ics_free(ICSState
*ics
, int srcno
, int num
)
805 for (i
= srcno
; i
< srcno
+ num
; ++i
) {
806 if (ICS_IRQ_FREE(ics
, i
)) {
807 trace_xics_ics_free_warn(ics
- ics
->icp
->ics
, i
+ ics
->offset
);
809 memset(&ics
->irqs
[i
], 0, sizeof(ICSIRQState
));
813 void xics_free(XICSState
*icp
, int irq
, int num
)
815 int src
= xics_find_source(icp
, irq
);
818 ICSState
*ics
= &icp
->ics
[src
];
820 /* FIXME: implement multiple sources */
823 trace_xics_ics_free(ics
- icp
->ics
, irq
, num
);
824 ics_free(ics
, irq
- ics
->offset
, num
);
832 static target_ulong
h_cppr(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
833 target_ulong opcode
, target_ulong
*args
)
835 CPUState
*cs
= CPU(cpu
);
836 target_ulong cppr
= args
[0];
838 icp_set_cppr(spapr
->icp
, cs
->cpu_index
, cppr
);
842 static target_ulong
h_ipi(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
843 target_ulong opcode
, target_ulong
*args
)
845 target_ulong server
= get_cpu_index_by_dt_id(args
[0]);
846 target_ulong mfrr
= args
[1];
848 if (server
>= spapr
->icp
->nr_servers
) {
852 icp_set_mfrr(spapr
->icp
, server
, mfrr
);
856 static target_ulong
h_xirr(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
857 target_ulong opcode
, target_ulong
*args
)
859 CPUState
*cs
= CPU(cpu
);
860 uint32_t xirr
= icp_accept(spapr
->icp
->ss
+ cs
->cpu_index
);
866 static target_ulong
h_xirr_x(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
867 target_ulong opcode
, target_ulong
*args
)
869 CPUState
*cs
= CPU(cpu
);
870 ICPState
*ss
= &spapr
->icp
->ss
[cs
->cpu_index
];
871 uint32_t xirr
= icp_accept(ss
);
874 args
[1] = cpu_get_host_ticks();
878 static target_ulong
h_eoi(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
879 target_ulong opcode
, target_ulong
*args
)
881 CPUState
*cs
= CPU(cpu
);
882 target_ulong xirr
= args
[0];
884 icp_eoi(spapr
->icp
, cs
->cpu_index
, xirr
);
888 static target_ulong
h_ipoll(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
889 target_ulong opcode
, target_ulong
*args
)
891 CPUState
*cs
= CPU(cpu
);
892 ICPState
*ss
= &spapr
->icp
->ss
[cs
->cpu_index
];
900 static void rtas_set_xive(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
902 uint32_t nargs
, target_ulong args
,
903 uint32_t nret
, target_ulong rets
)
905 ICSState
*ics
= spapr
->icp
->ics
;
906 uint32_t nr
, server
, priority
;
908 if ((nargs
!= 3) || (nret
!= 1)) {
909 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
913 nr
= rtas_ld(args
, 0);
914 server
= get_cpu_index_by_dt_id(rtas_ld(args
, 1));
915 priority
= rtas_ld(args
, 2);
917 if (!ics_valid_irq(ics
, nr
) || (server
>= ics
->icp
->nr_servers
)
918 || (priority
> 0xff)) {
919 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
923 ics_write_xive(ics
, nr
, server
, priority
, priority
);
925 rtas_st(rets
, 0, RTAS_OUT_SUCCESS
);
928 static void rtas_get_xive(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
930 uint32_t nargs
, target_ulong args
,
931 uint32_t nret
, target_ulong rets
)
933 ICSState
*ics
= spapr
->icp
->ics
;
936 if ((nargs
!= 1) || (nret
!= 3)) {
937 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
941 nr
= rtas_ld(args
, 0);
943 if (!ics_valid_irq(ics
, nr
)) {
944 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
948 rtas_st(rets
, 0, RTAS_OUT_SUCCESS
);
949 rtas_st(rets
, 1, ics
->irqs
[nr
- ics
->offset
].server
);
950 rtas_st(rets
, 2, ics
->irqs
[nr
- ics
->offset
].priority
);
953 static void rtas_int_off(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
955 uint32_t nargs
, target_ulong args
,
956 uint32_t nret
, target_ulong rets
)
958 ICSState
*ics
= spapr
->icp
->ics
;
961 if ((nargs
!= 1) || (nret
!= 1)) {
962 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
966 nr
= rtas_ld(args
, 0);
968 if (!ics_valid_irq(ics
, nr
)) {
969 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
973 ics_write_xive(ics
, nr
, ics
->irqs
[nr
- ics
->offset
].server
, 0xff,
974 ics
->irqs
[nr
- ics
->offset
].priority
);
976 rtas_st(rets
, 0, RTAS_OUT_SUCCESS
);
979 static void rtas_int_on(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
981 uint32_t nargs
, target_ulong args
,
982 uint32_t nret
, target_ulong rets
)
984 ICSState
*ics
= spapr
->icp
->ics
;
987 if ((nargs
!= 1) || (nret
!= 1)) {
988 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
992 nr
= rtas_ld(args
, 0);
994 if (!ics_valid_irq(ics
, nr
)) {
995 rtas_st(rets
, 0, RTAS_OUT_PARAM_ERROR
);
999 ics_write_xive(ics
, nr
, ics
->irqs
[nr
- ics
->offset
].server
,
1000 ics
->irqs
[nr
- ics
->offset
].saved_priority
,
1001 ics
->irqs
[nr
- ics
->offset
].saved_priority
);
1003 rtas_st(rets
, 0, RTAS_OUT_SUCCESS
);
1010 static void xics_set_nr_irqs(XICSState
*icp
, uint32_t nr_irqs
, Error
**errp
)
1012 icp
->nr_irqs
= icp
->ics
->nr_irqs
= nr_irqs
;
1015 static void xics_set_nr_servers(XICSState
*icp
, uint32_t nr_servers
,
1020 icp
->nr_servers
= nr_servers
;
1022 icp
->ss
= g_malloc0(icp
->nr_servers
*sizeof(ICPState
));
1023 for (i
= 0; i
< icp
->nr_servers
; i
++) {
1025 object_initialize(&icp
->ss
[i
], sizeof(icp
->ss
[i
]), TYPE_ICP
);
1026 snprintf(buffer
, sizeof(buffer
), "icp[%d]", i
);
1027 object_property_add_child(OBJECT(icp
), buffer
, OBJECT(&icp
->ss
[i
]),
1032 static void xics_realize(DeviceState
*dev
, Error
**errp
)
1034 XICSState
*icp
= XICS(dev
);
1035 Error
*error
= NULL
;
1038 if (!icp
->nr_servers
) {
1039 error_setg(errp
, "Number of servers needs to be greater 0");
1043 /* Registration of global state belongs into realize */
1044 spapr_rtas_register(RTAS_IBM_SET_XIVE
, "ibm,set-xive", rtas_set_xive
);
1045 spapr_rtas_register(RTAS_IBM_GET_XIVE
, "ibm,get-xive", rtas_get_xive
);
1046 spapr_rtas_register(RTAS_IBM_INT_OFF
, "ibm,int-off", rtas_int_off
);
1047 spapr_rtas_register(RTAS_IBM_INT_ON
, "ibm,int-on", rtas_int_on
);
1049 spapr_register_hypercall(H_CPPR
, h_cppr
);
1050 spapr_register_hypercall(H_IPI
, h_ipi
);
1051 spapr_register_hypercall(H_XIRR
, h_xirr
);
1052 spapr_register_hypercall(H_XIRR_X
, h_xirr_x
);
1053 spapr_register_hypercall(H_EOI
, h_eoi
);
1054 spapr_register_hypercall(H_IPOLL
, h_ipoll
);
1056 object_property_set_bool(OBJECT(icp
->ics
), true, "realized", &error
);
1058 error_propagate(errp
, error
);
1062 for (i
= 0; i
< icp
->nr_servers
; i
++) {
1063 object_property_set_bool(OBJECT(&icp
->ss
[i
]), true, "realized", &error
);
1065 error_propagate(errp
, error
);
1071 static void xics_initfn(Object
*obj
)
1073 XICSState
*xics
= XICS(obj
);
1075 xics
->ics
= ICS(object_new(TYPE_ICS
));
1076 object_property_add_child(obj
, "ics", OBJECT(xics
->ics
), NULL
);
1077 xics
->ics
->icp
= xics
;
1080 static void xics_class_init(ObjectClass
*oc
, void *data
)
1082 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1083 XICSStateClass
*xsc
= XICS_CLASS(oc
);
1085 dc
->realize
= xics_realize
;
1086 xsc
->set_nr_irqs
= xics_set_nr_irqs
;
1087 xsc
->set_nr_servers
= xics_set_nr_servers
;
1090 static const TypeInfo xics_info
= {
1092 .parent
= TYPE_XICS_COMMON
,
1093 .instance_size
= sizeof(XICSState
),
1094 .class_size
= sizeof(XICSStateClass
),
1095 .class_init
= xics_class_init
,
1096 .instance_init
= xics_initfn
,
1099 static void xics_register_types(void)
1101 type_register_static(&xics_common_info
);
1102 type_register_static(&xics_info
);
1103 type_register_static(&ics_info
);
1104 type_register_static(&icp_info
);
1107 type_init(xics_register_types
)