hw/intc/arm_gicv3: Implement CPU i/f SGI generation registers
[qemu/kevin.git] / hw / intc / xics.c
blobcce7f3d11264cda268de3c4c0af3f5342d0fea60
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "qemu-common.h"
31 #include "cpu.h"
32 #include "hw/hw.h"
33 #include "trace.h"
34 #include "qemu/timer.h"
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/xics.h"
37 #include "qemu/error-report.h"
38 #include "qapi/visitor.h"
40 static int get_cpu_index_by_dt_id(int cpu_dt_id)
42 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
44 if (cpu) {
45 return cpu->parent_obj.cpu_index;
48 return -1;
51 void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu)
53 CPUState *cs = CPU(cpu);
54 ICPState *ss = &icp->ss[cs->cpu_index];
56 assert(cs->cpu_index < icp->nr_servers);
57 assert(cs == ss->cs);
59 ss->output = NULL;
60 ss->cs = NULL;
63 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
65 CPUState *cs = CPU(cpu);
66 CPUPPCState *env = &cpu->env;
67 ICPState *ss = &icp->ss[cs->cpu_index];
68 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
70 assert(cs->cpu_index < icp->nr_servers);
72 ss->cs = cs;
74 if (info->cpu_setup) {
75 info->cpu_setup(icp, cpu);
78 switch (PPC_INPUT(env)) {
79 case PPC_FLAGS_INPUT_POWER7:
80 ss->output = env->irq_inputs[POWER7_INPUT_INT];
81 break;
83 case PPC_FLAGS_INPUT_970:
84 ss->output = env->irq_inputs[PPC970_INPUT_INT];
85 break;
87 default:
88 error_report("XICS interrupt controller does not support this CPU "
89 "bus model");
90 abort();
95 * XICS Common class - parent for emulated XICS and KVM-XICS
97 static void xics_common_reset(DeviceState *d)
99 XICSState *icp = XICS_COMMON(d);
100 int i;
102 for (i = 0; i < icp->nr_servers; i++) {
103 device_reset(DEVICE(&icp->ss[i]));
106 device_reset(DEVICE(icp->ics));
109 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
110 void *opaque, Error **errp)
112 XICSState *icp = XICS_COMMON(obj);
113 int64_t value = icp->nr_irqs;
115 visit_type_int(v, name, &value, errp);
118 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
119 void *opaque, Error **errp)
121 XICSState *icp = XICS_COMMON(obj);
122 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
123 Error *error = NULL;
124 int64_t value;
126 visit_type_int(v, name, &value, &error);
127 if (error) {
128 error_propagate(errp, error);
129 return;
131 if (icp->nr_irqs) {
132 error_setg(errp, "Number of interrupts is already set to %u",
133 icp->nr_irqs);
134 return;
137 assert(info->set_nr_irqs);
138 assert(icp->ics);
139 info->set_nr_irqs(icp, value, errp);
142 static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
143 const char *name, void *opaque,
144 Error **errp)
146 XICSState *icp = XICS_COMMON(obj);
147 int64_t value = icp->nr_servers;
149 visit_type_int(v, name, &value, errp);
152 static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
153 const char *name, void *opaque,
154 Error **errp)
156 XICSState *icp = XICS_COMMON(obj);
157 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
158 Error *error = NULL;
159 int64_t value;
161 visit_type_int(v, name, &value, &error);
162 if (error) {
163 error_propagate(errp, error);
164 return;
166 if (icp->nr_servers) {
167 error_setg(errp, "Number of servers is already set to %u",
168 icp->nr_servers);
169 return;
172 assert(info->set_nr_servers);
173 info->set_nr_servers(icp, value, errp);
176 static void xics_common_initfn(Object *obj)
178 object_property_add(obj, "nr_irqs", "int",
179 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
180 NULL, NULL, NULL);
181 object_property_add(obj, "nr_servers", "int",
182 xics_prop_get_nr_servers, xics_prop_set_nr_servers,
183 NULL, NULL, NULL);
186 static void xics_common_class_init(ObjectClass *oc, void *data)
188 DeviceClass *dc = DEVICE_CLASS(oc);
190 dc->reset = xics_common_reset;
193 static const TypeInfo xics_common_info = {
194 .name = TYPE_XICS_COMMON,
195 .parent = TYPE_SYS_BUS_DEVICE,
196 .instance_size = sizeof(XICSState),
197 .class_size = sizeof(XICSStateClass),
198 .instance_init = xics_common_initfn,
199 .class_init = xics_common_class_init,
203 * ICP: Presentation layer
206 #define XISR_MASK 0x00ffffff
207 #define CPPR_MASK 0xff000000
209 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
210 #define CPPR(ss) (((ss)->xirr) >> 24)
212 static void ics_reject(ICSState *ics, int nr);
213 static void ics_resend(ICSState *ics);
214 static void ics_eoi(ICSState *ics, int nr);
216 static void icp_check_ipi(XICSState *icp, int server)
218 ICPState *ss = icp->ss + server;
220 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
221 return;
224 trace_xics_icp_check_ipi(server, ss->mfrr);
226 if (XISR(ss)) {
227 ics_reject(icp->ics, XISR(ss));
230 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
231 ss->pending_priority = ss->mfrr;
232 qemu_irq_raise(ss->output);
235 static void icp_resend(XICSState *icp, int server)
237 ICPState *ss = icp->ss + server;
239 if (ss->mfrr < CPPR(ss)) {
240 icp_check_ipi(icp, server);
242 ics_resend(icp->ics);
245 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
247 ICPState *ss = icp->ss + server;
248 uint8_t old_cppr;
249 uint32_t old_xisr;
251 old_cppr = CPPR(ss);
252 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
254 if (cppr < old_cppr) {
255 if (XISR(ss) && (cppr <= ss->pending_priority)) {
256 old_xisr = XISR(ss);
257 ss->xirr &= ~XISR_MASK; /* Clear XISR */
258 ss->pending_priority = 0xff;
259 qemu_irq_lower(ss->output);
260 ics_reject(icp->ics, old_xisr);
262 } else {
263 if (!XISR(ss)) {
264 icp_resend(icp, server);
269 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr)
271 ICPState *ss = icp->ss + server;
273 ss->mfrr = mfrr;
274 if (mfrr < CPPR(ss)) {
275 icp_check_ipi(icp, server);
279 static uint32_t icp_accept(ICPState *ss)
281 uint32_t xirr = ss->xirr;
283 qemu_irq_lower(ss->output);
284 ss->xirr = ss->pending_priority << 24;
285 ss->pending_priority = 0xff;
287 trace_xics_icp_accept(xirr, ss->xirr);
289 return xirr;
292 static void icp_eoi(XICSState *icp, int server, uint32_t xirr)
294 ICPState *ss = icp->ss + server;
296 /* Send EOI -> ICS */
297 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
298 trace_xics_icp_eoi(server, xirr, ss->xirr);
299 ics_eoi(icp->ics, xirr & XISR_MASK);
300 if (!XISR(ss)) {
301 icp_resend(icp, server);
305 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority)
307 ICPState *ss = icp->ss + server;
309 trace_xics_icp_irq(server, nr, priority);
311 if ((priority >= CPPR(ss))
312 || (XISR(ss) && (ss->pending_priority <= priority))) {
313 ics_reject(icp->ics, nr);
314 } else {
315 if (XISR(ss)) {
316 ics_reject(icp->ics, XISR(ss));
318 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
319 ss->pending_priority = priority;
320 trace_xics_icp_raise(ss->xirr, ss->pending_priority);
321 qemu_irq_raise(ss->output);
325 static void icp_dispatch_pre_save(void *opaque)
327 ICPState *ss = opaque;
328 ICPStateClass *info = ICP_GET_CLASS(ss);
330 if (info->pre_save) {
331 info->pre_save(ss);
335 static int icp_dispatch_post_load(void *opaque, int version_id)
337 ICPState *ss = opaque;
338 ICPStateClass *info = ICP_GET_CLASS(ss);
340 if (info->post_load) {
341 return info->post_load(ss, version_id);
344 return 0;
347 static const VMStateDescription vmstate_icp_server = {
348 .name = "icp/server",
349 .version_id = 1,
350 .minimum_version_id = 1,
351 .pre_save = icp_dispatch_pre_save,
352 .post_load = icp_dispatch_post_load,
353 .fields = (VMStateField[]) {
354 /* Sanity check */
355 VMSTATE_UINT32(xirr, ICPState),
356 VMSTATE_UINT8(pending_priority, ICPState),
357 VMSTATE_UINT8(mfrr, ICPState),
358 VMSTATE_END_OF_LIST()
362 static void icp_reset(DeviceState *dev)
364 ICPState *icp = ICP(dev);
366 icp->xirr = 0;
367 icp->pending_priority = 0xff;
368 icp->mfrr = 0xff;
370 /* Make all outputs are deasserted */
371 qemu_set_irq(icp->output, 0);
374 static void icp_class_init(ObjectClass *klass, void *data)
376 DeviceClass *dc = DEVICE_CLASS(klass);
378 dc->reset = icp_reset;
379 dc->vmsd = &vmstate_icp_server;
382 static const TypeInfo icp_info = {
383 .name = TYPE_ICP,
384 .parent = TYPE_DEVICE,
385 .instance_size = sizeof(ICPState),
386 .class_init = icp_class_init,
387 .class_size = sizeof(ICPStateClass),
391 * ICS: Source layer
393 static int ics_valid_irq(ICSState *ics, uint32_t nr)
395 return (nr >= ics->offset)
396 && (nr < (ics->offset + ics->nr_irqs));
399 static void resend_msi(ICSState *ics, int srcno)
401 ICSIRQState *irq = ics->irqs + srcno;
403 /* FIXME: filter by server#? */
404 if (irq->status & XICS_STATUS_REJECTED) {
405 irq->status &= ~XICS_STATUS_REJECTED;
406 if (irq->priority != 0xff) {
407 icp_irq(ics->icp, irq->server, srcno + ics->offset,
408 irq->priority);
413 static void resend_lsi(ICSState *ics, int srcno)
415 ICSIRQState *irq = ics->irqs + srcno;
417 if ((irq->priority != 0xff)
418 && (irq->status & XICS_STATUS_ASSERTED)
419 && !(irq->status & XICS_STATUS_SENT)) {
420 irq->status |= XICS_STATUS_SENT;
421 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
425 static void set_irq_msi(ICSState *ics, int srcno, int val)
427 ICSIRQState *irq = ics->irqs + srcno;
429 trace_xics_set_irq_msi(srcno, srcno + ics->offset);
431 if (val) {
432 if (irq->priority == 0xff) {
433 irq->status |= XICS_STATUS_MASKED_PENDING;
434 trace_xics_masked_pending();
435 } else {
436 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
441 static void set_irq_lsi(ICSState *ics, int srcno, int val)
443 ICSIRQState *irq = ics->irqs + srcno;
445 trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
446 if (val) {
447 irq->status |= XICS_STATUS_ASSERTED;
448 } else {
449 irq->status &= ~XICS_STATUS_ASSERTED;
451 resend_lsi(ics, srcno);
454 static void ics_set_irq(void *opaque, int srcno, int val)
456 ICSState *ics = (ICSState *)opaque;
458 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
459 set_irq_lsi(ics, srcno, val);
460 } else {
461 set_irq_msi(ics, srcno, val);
465 static void write_xive_msi(ICSState *ics, int srcno)
467 ICSIRQState *irq = ics->irqs + srcno;
469 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
470 || (irq->priority == 0xff)) {
471 return;
474 irq->status &= ~XICS_STATUS_MASKED_PENDING;
475 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
478 static void write_xive_lsi(ICSState *ics, int srcno)
480 resend_lsi(ics, srcno);
483 static void ics_write_xive(ICSState *ics, int nr, int server,
484 uint8_t priority, uint8_t saved_priority)
486 int srcno = nr - ics->offset;
487 ICSIRQState *irq = ics->irqs + srcno;
489 irq->server = server;
490 irq->priority = priority;
491 irq->saved_priority = saved_priority;
493 trace_xics_ics_write_xive(nr, srcno, server, priority);
495 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
496 write_xive_lsi(ics, srcno);
497 } else {
498 write_xive_msi(ics, srcno);
502 static void ics_reject(ICSState *ics, int nr)
504 ICSIRQState *irq = ics->irqs + nr - ics->offset;
506 trace_xics_ics_reject(nr, nr - ics->offset);
507 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
508 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
511 static void ics_resend(ICSState *ics)
513 int i;
515 for (i = 0; i < ics->nr_irqs; i++) {
516 /* FIXME: filter by server#? */
517 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
518 resend_lsi(ics, i);
519 } else {
520 resend_msi(ics, i);
525 static void ics_eoi(ICSState *ics, int nr)
527 int srcno = nr - ics->offset;
528 ICSIRQState *irq = ics->irqs + srcno;
530 trace_xics_ics_eoi(nr);
532 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
533 irq->status &= ~XICS_STATUS_SENT;
537 static void ics_reset(DeviceState *dev)
539 ICSState *ics = ICS(dev);
540 int i;
541 uint8_t flags[ics->nr_irqs];
543 for (i = 0; i < ics->nr_irqs; i++) {
544 flags[i] = ics->irqs[i].flags;
547 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
549 for (i = 0; i < ics->nr_irqs; i++) {
550 ics->irqs[i].priority = 0xff;
551 ics->irqs[i].saved_priority = 0xff;
552 ics->irqs[i].flags = flags[i];
556 static int ics_post_load(ICSState *ics, int version_id)
558 int i;
560 for (i = 0; i < ics->icp->nr_servers; i++) {
561 icp_resend(ics->icp, i);
564 return 0;
567 static void ics_dispatch_pre_save(void *opaque)
569 ICSState *ics = opaque;
570 ICSStateClass *info = ICS_GET_CLASS(ics);
572 if (info->pre_save) {
573 info->pre_save(ics);
577 static int ics_dispatch_post_load(void *opaque, int version_id)
579 ICSState *ics = opaque;
580 ICSStateClass *info = ICS_GET_CLASS(ics);
582 if (info->post_load) {
583 return info->post_load(ics, version_id);
586 return 0;
589 static const VMStateDescription vmstate_ics_irq = {
590 .name = "ics/irq",
591 .version_id = 2,
592 .minimum_version_id = 1,
593 .fields = (VMStateField[]) {
594 VMSTATE_UINT32(server, ICSIRQState),
595 VMSTATE_UINT8(priority, ICSIRQState),
596 VMSTATE_UINT8(saved_priority, ICSIRQState),
597 VMSTATE_UINT8(status, ICSIRQState),
598 VMSTATE_UINT8(flags, ICSIRQState),
599 VMSTATE_END_OF_LIST()
603 static const VMStateDescription vmstate_ics = {
604 .name = "ics",
605 .version_id = 1,
606 .minimum_version_id = 1,
607 .pre_save = ics_dispatch_pre_save,
608 .post_load = ics_dispatch_post_load,
609 .fields = (VMStateField[]) {
610 /* Sanity check */
611 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState),
613 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
614 vmstate_ics_irq, ICSIRQState),
615 VMSTATE_END_OF_LIST()
619 static void ics_initfn(Object *obj)
621 ICSState *ics = ICS(obj);
623 ics->offset = XICS_IRQ_BASE;
626 static void ics_realize(DeviceState *dev, Error **errp)
628 ICSState *ics = ICS(dev);
630 if (!ics->nr_irqs) {
631 error_setg(errp, "Number of interrupts needs to be greater 0");
632 return;
634 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
635 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs);
638 static void ics_class_init(ObjectClass *klass, void *data)
640 DeviceClass *dc = DEVICE_CLASS(klass);
641 ICSStateClass *isc = ICS_CLASS(klass);
643 dc->realize = ics_realize;
644 dc->vmsd = &vmstate_ics;
645 dc->reset = ics_reset;
646 isc->post_load = ics_post_load;
649 static const TypeInfo ics_info = {
650 .name = TYPE_ICS,
651 .parent = TYPE_DEVICE,
652 .instance_size = sizeof(ICSState),
653 .class_init = ics_class_init,
654 .class_size = sizeof(ICSStateClass),
655 .instance_init = ics_initfn,
659 * Exported functions
661 static int xics_find_source(XICSState *icp, int irq)
663 int sources = 1;
664 int src;
666 /* FIXME: implement multiple sources */
667 for (src = 0; src < sources; ++src) {
668 ICSState *ics = &icp->ics[src];
669 if (ics_valid_irq(ics, irq)) {
670 return src;
674 return -1;
677 qemu_irq xics_get_qirq(XICSState *icp, int irq)
679 int src = xics_find_source(icp, irq);
681 if (src >= 0) {
682 ICSState *ics = &icp->ics[src];
683 return ics->qirqs[irq - ics->offset];
686 return NULL;
689 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
691 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
693 ics->irqs[srcno].flags |=
694 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
697 void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
699 int src = xics_find_source(icp, irq);
700 ICSState *ics;
702 assert(src >= 0);
704 ics = &icp->ics[src];
705 ics_set_irq_type(ics, irq - ics->offset, lsi);
708 #define ICS_IRQ_FREE(ics, srcno) \
709 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
711 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
713 int first, i;
715 for (first = 0; first < ics->nr_irqs; first += alignnum) {
716 if (num > (ics->nr_irqs - first)) {
717 return -1;
719 for (i = first; i < first + num; ++i) {
720 if (!ICS_IRQ_FREE(ics, i)) {
721 break;
724 if (i == (first + num)) {
725 return first;
729 return -1;
732 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp)
734 ICSState *ics = &icp->ics[src];
735 int irq;
737 if (irq_hint) {
738 assert(src == xics_find_source(icp, irq_hint));
739 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
740 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
741 return -1;
743 irq = irq_hint;
744 } else {
745 irq = ics_find_free_block(ics, 1, 1);
746 if (irq < 0) {
747 error_setg(errp, "can't allocate IRQ: no IRQ left");
748 return -1;
750 irq += ics->offset;
753 ics_set_irq_type(ics, irq - ics->offset, lsi);
754 trace_xics_alloc(src, irq);
756 return irq;
760 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block.
761 * If align==true, aligns the first IRQ number to num.
763 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align,
764 Error **errp)
766 int i, first = -1;
767 ICSState *ics = &icp->ics[src];
769 assert(src == 0);
771 * MSIMesage::data is used for storing VIRQ so
772 * it has to be aligned to num to support multiple
773 * MSI vectors. MSI-X is not affected by this.
774 * The hint is used for the first IRQ, the rest should
775 * be allocated continuously.
777 if (align) {
778 assert((num == 1) || (num == 2) || (num == 4) ||
779 (num == 8) || (num == 16) || (num == 32));
780 first = ics_find_free_block(ics, num, num);
781 } else {
782 first = ics_find_free_block(ics, num, 1);
784 if (first < 0) {
785 error_setg(errp, "can't find a free %d-IRQ block", num);
786 return -1;
789 if (first >= 0) {
790 for (i = first; i < first + num; ++i) {
791 ics_set_irq_type(ics, i, lsi);
794 first += ics->offset;
796 trace_xics_alloc_block(src, first, num, lsi, align);
798 return first;
801 static void ics_free(ICSState *ics, int srcno, int num)
803 int i;
805 for (i = srcno; i < srcno + num; ++i) {
806 if (ICS_IRQ_FREE(ics, i)) {
807 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset);
809 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
813 void xics_free(XICSState *icp, int irq, int num)
815 int src = xics_find_source(icp, irq);
817 if (src >= 0) {
818 ICSState *ics = &icp->ics[src];
820 /* FIXME: implement multiple sources */
821 assert(src == 0);
823 trace_xics_ics_free(ics - icp->ics, irq, num);
824 ics_free(ics, irq - ics->offset, num);
829 * Guest interfaces
832 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
833 target_ulong opcode, target_ulong *args)
835 CPUState *cs = CPU(cpu);
836 target_ulong cppr = args[0];
838 icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
839 return H_SUCCESS;
842 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
843 target_ulong opcode, target_ulong *args)
845 target_ulong server = get_cpu_index_by_dt_id(args[0]);
846 target_ulong mfrr = args[1];
848 if (server >= spapr->icp->nr_servers) {
849 return H_PARAMETER;
852 icp_set_mfrr(spapr->icp, server, mfrr);
853 return H_SUCCESS;
856 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
857 target_ulong opcode, target_ulong *args)
859 CPUState *cs = CPU(cpu);
860 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
862 args[0] = xirr;
863 return H_SUCCESS;
866 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
867 target_ulong opcode, target_ulong *args)
869 CPUState *cs = CPU(cpu);
870 ICPState *ss = &spapr->icp->ss[cs->cpu_index];
871 uint32_t xirr = icp_accept(ss);
873 args[0] = xirr;
874 args[1] = cpu_get_host_ticks();
875 return H_SUCCESS;
878 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
879 target_ulong opcode, target_ulong *args)
881 CPUState *cs = CPU(cpu);
882 target_ulong xirr = args[0];
884 icp_eoi(spapr->icp, cs->cpu_index, xirr);
885 return H_SUCCESS;
888 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
889 target_ulong opcode, target_ulong *args)
891 CPUState *cs = CPU(cpu);
892 ICPState *ss = &spapr->icp->ss[cs->cpu_index];
894 args[0] = ss->xirr;
895 args[1] = ss->mfrr;
897 return H_SUCCESS;
900 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
901 uint32_t token,
902 uint32_t nargs, target_ulong args,
903 uint32_t nret, target_ulong rets)
905 ICSState *ics = spapr->icp->ics;
906 uint32_t nr, server, priority;
908 if ((nargs != 3) || (nret != 1)) {
909 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
910 return;
913 nr = rtas_ld(args, 0);
914 server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
915 priority = rtas_ld(args, 2);
917 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
918 || (priority > 0xff)) {
919 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
920 return;
923 ics_write_xive(ics, nr, server, priority, priority);
925 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
928 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
929 uint32_t token,
930 uint32_t nargs, target_ulong args,
931 uint32_t nret, target_ulong rets)
933 ICSState *ics = spapr->icp->ics;
934 uint32_t nr;
936 if ((nargs != 1) || (nret != 3)) {
937 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
938 return;
941 nr = rtas_ld(args, 0);
943 if (!ics_valid_irq(ics, nr)) {
944 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
945 return;
948 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
949 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
950 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
953 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
954 uint32_t token,
955 uint32_t nargs, target_ulong args,
956 uint32_t nret, target_ulong rets)
958 ICSState *ics = spapr->icp->ics;
959 uint32_t nr;
961 if ((nargs != 1) || (nret != 1)) {
962 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
963 return;
966 nr = rtas_ld(args, 0);
968 if (!ics_valid_irq(ics, nr)) {
969 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
970 return;
973 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
974 ics->irqs[nr - ics->offset].priority);
976 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
979 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
980 uint32_t token,
981 uint32_t nargs, target_ulong args,
982 uint32_t nret, target_ulong rets)
984 ICSState *ics = spapr->icp->ics;
985 uint32_t nr;
987 if ((nargs != 1) || (nret != 1)) {
988 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
989 return;
992 nr = rtas_ld(args, 0);
994 if (!ics_valid_irq(ics, nr)) {
995 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
996 return;
999 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
1000 ics->irqs[nr - ics->offset].saved_priority,
1001 ics->irqs[nr - ics->offset].saved_priority);
1003 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
1007 * XICS
1010 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp)
1012 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs;
1015 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers,
1016 Error **errp)
1018 int i;
1020 icp->nr_servers = nr_servers;
1022 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
1023 for (i = 0; i < icp->nr_servers; i++) {
1024 char buffer[32];
1025 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP);
1026 snprintf(buffer, sizeof(buffer), "icp[%d]", i);
1027 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]),
1028 errp);
1032 static void xics_realize(DeviceState *dev, Error **errp)
1034 XICSState *icp = XICS(dev);
1035 Error *error = NULL;
1036 int i;
1038 if (!icp->nr_servers) {
1039 error_setg(errp, "Number of servers needs to be greater 0");
1040 return;
1043 /* Registration of global state belongs into realize */
1044 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
1045 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
1046 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
1047 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on);
1049 spapr_register_hypercall(H_CPPR, h_cppr);
1050 spapr_register_hypercall(H_IPI, h_ipi);
1051 spapr_register_hypercall(H_XIRR, h_xirr);
1052 spapr_register_hypercall(H_XIRR_X, h_xirr_x);
1053 spapr_register_hypercall(H_EOI, h_eoi);
1054 spapr_register_hypercall(H_IPOLL, h_ipoll);
1056 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error);
1057 if (error) {
1058 error_propagate(errp, error);
1059 return;
1062 for (i = 0; i < icp->nr_servers; i++) {
1063 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error);
1064 if (error) {
1065 error_propagate(errp, error);
1066 return;
1071 static void xics_initfn(Object *obj)
1073 XICSState *xics = XICS(obj);
1075 xics->ics = ICS(object_new(TYPE_ICS));
1076 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
1077 xics->ics->icp = xics;
1080 static void xics_class_init(ObjectClass *oc, void *data)
1082 DeviceClass *dc = DEVICE_CLASS(oc);
1083 XICSStateClass *xsc = XICS_CLASS(oc);
1085 dc->realize = xics_realize;
1086 xsc->set_nr_irqs = xics_set_nr_irqs;
1087 xsc->set_nr_servers = xics_set_nr_servers;
1090 static const TypeInfo xics_info = {
1091 .name = TYPE_XICS,
1092 .parent = TYPE_XICS_COMMON,
1093 .instance_size = sizeof(XICSState),
1094 .class_size = sizeof(XICSStateClass),
1095 .class_init = xics_class_init,
1096 .instance_init = xics_initfn,
1099 static void xics_register_types(void)
1101 type_register_static(&xics_common_info);
1102 type_register_static(&xics_info);
1103 type_register_static(&ics_info);
1104 type_register_static(&icp_info);
1107 type_init(xics_register_types)