include/qemu/osdep.h: Don't include qapi/error.h
[qemu/ar7.git] / hw / intc / xics.c
blob04a079cd550995ad8b73c94c968faa4b23801edf
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "hw/hw.h"
31 #include "trace.h"
32 #include "qemu/timer.h"
33 #include "hw/ppc/spapr.h"
34 #include "hw/ppc/xics.h"
35 #include "qemu/error-report.h"
36 #include "qapi/visitor.h"
38 static int get_cpu_index_by_dt_id(int cpu_dt_id)
40 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
42 if (cpu) {
43 return cpu->parent_obj.cpu_index;
46 return -1;
49 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
51 CPUState *cs = CPU(cpu);
52 CPUPPCState *env = &cpu->env;
53 ICPState *ss = &icp->ss[cs->cpu_index];
54 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
56 assert(cs->cpu_index < icp->nr_servers);
58 if (info->cpu_setup) {
59 info->cpu_setup(icp, cpu);
62 switch (PPC_INPUT(env)) {
63 case PPC_FLAGS_INPUT_POWER7:
64 ss->output = env->irq_inputs[POWER7_INPUT_INT];
65 break;
67 case PPC_FLAGS_INPUT_970:
68 ss->output = env->irq_inputs[PPC970_INPUT_INT];
69 break;
71 default:
72 error_report("XICS interrupt controller does not support this CPU "
73 "bus model");
74 abort();
79 * XICS Common class - parent for emulated XICS and KVM-XICS
81 static void xics_common_reset(DeviceState *d)
83 XICSState *icp = XICS_COMMON(d);
84 int i;
86 for (i = 0; i < icp->nr_servers; i++) {
87 device_reset(DEVICE(&icp->ss[i]));
90 device_reset(DEVICE(icp->ics));
93 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
94 void *opaque, Error **errp)
96 XICSState *icp = XICS_COMMON(obj);
97 int64_t value = icp->nr_irqs;
99 visit_type_int(v, name, &value, errp);
102 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
103 void *opaque, Error **errp)
105 XICSState *icp = XICS_COMMON(obj);
106 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
107 Error *error = NULL;
108 int64_t value;
110 visit_type_int(v, name, &value, &error);
111 if (error) {
112 error_propagate(errp, error);
113 return;
115 if (icp->nr_irqs) {
116 error_setg(errp, "Number of interrupts is already set to %u",
117 icp->nr_irqs);
118 return;
121 assert(info->set_nr_irqs);
122 assert(icp->ics);
123 info->set_nr_irqs(icp, value, errp);
126 static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
127 const char *name, void *opaque,
128 Error **errp)
130 XICSState *icp = XICS_COMMON(obj);
131 int64_t value = icp->nr_servers;
133 visit_type_int(v, name, &value, errp);
136 static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
137 const char *name, void *opaque,
138 Error **errp)
140 XICSState *icp = XICS_COMMON(obj);
141 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
142 Error *error = NULL;
143 int64_t value;
145 visit_type_int(v, name, &value, &error);
146 if (error) {
147 error_propagate(errp, error);
148 return;
150 if (icp->nr_servers) {
151 error_setg(errp, "Number of servers is already set to %u",
152 icp->nr_servers);
153 return;
156 assert(info->set_nr_servers);
157 info->set_nr_servers(icp, value, errp);
160 static void xics_common_initfn(Object *obj)
162 object_property_add(obj, "nr_irqs", "int",
163 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
164 NULL, NULL, NULL);
165 object_property_add(obj, "nr_servers", "int",
166 xics_prop_get_nr_servers, xics_prop_set_nr_servers,
167 NULL, NULL, NULL);
170 static void xics_common_class_init(ObjectClass *oc, void *data)
172 DeviceClass *dc = DEVICE_CLASS(oc);
174 dc->reset = xics_common_reset;
177 static const TypeInfo xics_common_info = {
178 .name = TYPE_XICS_COMMON,
179 .parent = TYPE_SYS_BUS_DEVICE,
180 .instance_size = sizeof(XICSState),
181 .class_size = sizeof(XICSStateClass),
182 .instance_init = xics_common_initfn,
183 .class_init = xics_common_class_init,
187 * ICP: Presentation layer
190 #define XISR_MASK 0x00ffffff
191 #define CPPR_MASK 0xff000000
193 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
194 #define CPPR(ss) (((ss)->xirr) >> 24)
196 static void ics_reject(ICSState *ics, int nr);
197 static void ics_resend(ICSState *ics);
198 static void ics_eoi(ICSState *ics, int nr);
200 static void icp_check_ipi(XICSState *icp, int server)
202 ICPState *ss = icp->ss + server;
204 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
205 return;
208 trace_xics_icp_check_ipi(server, ss->mfrr);
210 if (XISR(ss)) {
211 ics_reject(icp->ics, XISR(ss));
214 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
215 ss->pending_priority = ss->mfrr;
216 qemu_irq_raise(ss->output);
219 static void icp_resend(XICSState *icp, int server)
221 ICPState *ss = icp->ss + server;
223 if (ss->mfrr < CPPR(ss)) {
224 icp_check_ipi(icp, server);
226 ics_resend(icp->ics);
229 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
231 ICPState *ss = icp->ss + server;
232 uint8_t old_cppr;
233 uint32_t old_xisr;
235 old_cppr = CPPR(ss);
236 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
238 if (cppr < old_cppr) {
239 if (XISR(ss) && (cppr <= ss->pending_priority)) {
240 old_xisr = XISR(ss);
241 ss->xirr &= ~XISR_MASK; /* Clear XISR */
242 ss->pending_priority = 0xff;
243 qemu_irq_lower(ss->output);
244 ics_reject(icp->ics, old_xisr);
246 } else {
247 if (!XISR(ss)) {
248 icp_resend(icp, server);
253 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr)
255 ICPState *ss = icp->ss + server;
257 ss->mfrr = mfrr;
258 if (mfrr < CPPR(ss)) {
259 icp_check_ipi(icp, server);
263 static uint32_t icp_accept(ICPState *ss)
265 uint32_t xirr = ss->xirr;
267 qemu_irq_lower(ss->output);
268 ss->xirr = ss->pending_priority << 24;
269 ss->pending_priority = 0xff;
271 trace_xics_icp_accept(xirr, ss->xirr);
273 return xirr;
276 static void icp_eoi(XICSState *icp, int server, uint32_t xirr)
278 ICPState *ss = icp->ss + server;
280 /* Send EOI -> ICS */
281 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
282 trace_xics_icp_eoi(server, xirr, ss->xirr);
283 ics_eoi(icp->ics, xirr & XISR_MASK);
284 if (!XISR(ss)) {
285 icp_resend(icp, server);
289 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority)
291 ICPState *ss = icp->ss + server;
293 trace_xics_icp_irq(server, nr, priority);
295 if ((priority >= CPPR(ss))
296 || (XISR(ss) && (ss->pending_priority <= priority))) {
297 ics_reject(icp->ics, nr);
298 } else {
299 if (XISR(ss)) {
300 ics_reject(icp->ics, XISR(ss));
302 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
303 ss->pending_priority = priority;
304 trace_xics_icp_raise(ss->xirr, ss->pending_priority);
305 qemu_irq_raise(ss->output);
309 static void icp_dispatch_pre_save(void *opaque)
311 ICPState *ss = opaque;
312 ICPStateClass *info = ICP_GET_CLASS(ss);
314 if (info->pre_save) {
315 info->pre_save(ss);
319 static int icp_dispatch_post_load(void *opaque, int version_id)
321 ICPState *ss = opaque;
322 ICPStateClass *info = ICP_GET_CLASS(ss);
324 if (info->post_load) {
325 return info->post_load(ss, version_id);
328 return 0;
331 static const VMStateDescription vmstate_icp_server = {
332 .name = "icp/server",
333 .version_id = 1,
334 .minimum_version_id = 1,
335 .pre_save = icp_dispatch_pre_save,
336 .post_load = icp_dispatch_post_load,
337 .fields = (VMStateField[]) {
338 /* Sanity check */
339 VMSTATE_UINT32(xirr, ICPState),
340 VMSTATE_UINT8(pending_priority, ICPState),
341 VMSTATE_UINT8(mfrr, ICPState),
342 VMSTATE_END_OF_LIST()
346 static void icp_reset(DeviceState *dev)
348 ICPState *icp = ICP(dev);
350 icp->xirr = 0;
351 icp->pending_priority = 0xff;
352 icp->mfrr = 0xff;
354 /* Make all outputs are deasserted */
355 qemu_set_irq(icp->output, 0);
358 static void icp_class_init(ObjectClass *klass, void *data)
360 DeviceClass *dc = DEVICE_CLASS(klass);
362 dc->reset = icp_reset;
363 dc->vmsd = &vmstate_icp_server;
366 static const TypeInfo icp_info = {
367 .name = TYPE_ICP,
368 .parent = TYPE_DEVICE,
369 .instance_size = sizeof(ICPState),
370 .class_init = icp_class_init,
371 .class_size = sizeof(ICPStateClass),
375 * ICS: Source layer
377 static int ics_valid_irq(ICSState *ics, uint32_t nr)
379 return (nr >= ics->offset)
380 && (nr < (ics->offset + ics->nr_irqs));
383 static void resend_msi(ICSState *ics, int srcno)
385 ICSIRQState *irq = ics->irqs + srcno;
387 /* FIXME: filter by server#? */
388 if (irq->status & XICS_STATUS_REJECTED) {
389 irq->status &= ~XICS_STATUS_REJECTED;
390 if (irq->priority != 0xff) {
391 icp_irq(ics->icp, irq->server, srcno + ics->offset,
392 irq->priority);
397 static void resend_lsi(ICSState *ics, int srcno)
399 ICSIRQState *irq = ics->irqs + srcno;
401 if ((irq->priority != 0xff)
402 && (irq->status & XICS_STATUS_ASSERTED)
403 && !(irq->status & XICS_STATUS_SENT)) {
404 irq->status |= XICS_STATUS_SENT;
405 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
409 static void set_irq_msi(ICSState *ics, int srcno, int val)
411 ICSIRQState *irq = ics->irqs + srcno;
413 trace_xics_set_irq_msi(srcno, srcno + ics->offset);
415 if (val) {
416 if (irq->priority == 0xff) {
417 irq->status |= XICS_STATUS_MASKED_PENDING;
418 trace_xics_masked_pending();
419 } else {
420 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
425 static void set_irq_lsi(ICSState *ics, int srcno, int val)
427 ICSIRQState *irq = ics->irqs + srcno;
429 trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
430 if (val) {
431 irq->status |= XICS_STATUS_ASSERTED;
432 } else {
433 irq->status &= ~XICS_STATUS_ASSERTED;
435 resend_lsi(ics, srcno);
438 static void ics_set_irq(void *opaque, int srcno, int val)
440 ICSState *ics = (ICSState *)opaque;
442 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
443 set_irq_lsi(ics, srcno, val);
444 } else {
445 set_irq_msi(ics, srcno, val);
449 static void write_xive_msi(ICSState *ics, int srcno)
451 ICSIRQState *irq = ics->irqs + srcno;
453 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
454 || (irq->priority == 0xff)) {
455 return;
458 irq->status &= ~XICS_STATUS_MASKED_PENDING;
459 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
462 static void write_xive_lsi(ICSState *ics, int srcno)
464 resend_lsi(ics, srcno);
467 static void ics_write_xive(ICSState *ics, int nr, int server,
468 uint8_t priority, uint8_t saved_priority)
470 int srcno = nr - ics->offset;
471 ICSIRQState *irq = ics->irqs + srcno;
473 irq->server = server;
474 irq->priority = priority;
475 irq->saved_priority = saved_priority;
477 trace_xics_ics_write_xive(nr, srcno, server, priority);
479 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
480 write_xive_lsi(ics, srcno);
481 } else {
482 write_xive_msi(ics, srcno);
486 static void ics_reject(ICSState *ics, int nr)
488 ICSIRQState *irq = ics->irqs + nr - ics->offset;
490 trace_xics_ics_reject(nr, nr - ics->offset);
491 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
492 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
495 static void ics_resend(ICSState *ics)
497 int i;
499 for (i = 0; i < ics->nr_irqs; i++) {
500 /* FIXME: filter by server#? */
501 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
502 resend_lsi(ics, i);
503 } else {
504 resend_msi(ics, i);
509 static void ics_eoi(ICSState *ics, int nr)
511 int srcno = nr - ics->offset;
512 ICSIRQState *irq = ics->irqs + srcno;
514 trace_xics_ics_eoi(nr);
516 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
517 irq->status &= ~XICS_STATUS_SENT;
521 static void ics_reset(DeviceState *dev)
523 ICSState *ics = ICS(dev);
524 int i;
525 uint8_t flags[ics->nr_irqs];
527 for (i = 0; i < ics->nr_irqs; i++) {
528 flags[i] = ics->irqs[i].flags;
531 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
533 for (i = 0; i < ics->nr_irqs; i++) {
534 ics->irqs[i].priority = 0xff;
535 ics->irqs[i].saved_priority = 0xff;
536 ics->irqs[i].flags = flags[i];
540 static int ics_post_load(ICSState *ics, int version_id)
542 int i;
544 for (i = 0; i < ics->icp->nr_servers; i++) {
545 icp_resend(ics->icp, i);
548 return 0;
551 static void ics_dispatch_pre_save(void *opaque)
553 ICSState *ics = opaque;
554 ICSStateClass *info = ICS_GET_CLASS(ics);
556 if (info->pre_save) {
557 info->pre_save(ics);
561 static int ics_dispatch_post_load(void *opaque, int version_id)
563 ICSState *ics = opaque;
564 ICSStateClass *info = ICS_GET_CLASS(ics);
566 if (info->post_load) {
567 return info->post_load(ics, version_id);
570 return 0;
573 static const VMStateDescription vmstate_ics_irq = {
574 .name = "ics/irq",
575 .version_id = 2,
576 .minimum_version_id = 1,
577 .fields = (VMStateField[]) {
578 VMSTATE_UINT32(server, ICSIRQState),
579 VMSTATE_UINT8(priority, ICSIRQState),
580 VMSTATE_UINT8(saved_priority, ICSIRQState),
581 VMSTATE_UINT8(status, ICSIRQState),
582 VMSTATE_UINT8(flags, ICSIRQState),
583 VMSTATE_END_OF_LIST()
587 static const VMStateDescription vmstate_ics = {
588 .name = "ics",
589 .version_id = 1,
590 .minimum_version_id = 1,
591 .pre_save = ics_dispatch_pre_save,
592 .post_load = ics_dispatch_post_load,
593 .fields = (VMStateField[]) {
594 /* Sanity check */
595 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState),
597 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
598 vmstate_ics_irq, ICSIRQState),
599 VMSTATE_END_OF_LIST()
603 static void ics_initfn(Object *obj)
605 ICSState *ics = ICS(obj);
607 ics->offset = XICS_IRQ_BASE;
610 static void ics_realize(DeviceState *dev, Error **errp)
612 ICSState *ics = ICS(dev);
614 if (!ics->nr_irqs) {
615 error_setg(errp, "Number of interrupts needs to be greater 0");
616 return;
618 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
619 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs);
622 static void ics_class_init(ObjectClass *klass, void *data)
624 DeviceClass *dc = DEVICE_CLASS(klass);
625 ICSStateClass *isc = ICS_CLASS(klass);
627 dc->realize = ics_realize;
628 dc->vmsd = &vmstate_ics;
629 dc->reset = ics_reset;
630 isc->post_load = ics_post_load;
633 static const TypeInfo ics_info = {
634 .name = TYPE_ICS,
635 .parent = TYPE_DEVICE,
636 .instance_size = sizeof(ICSState),
637 .class_init = ics_class_init,
638 .class_size = sizeof(ICSStateClass),
639 .instance_init = ics_initfn,
643 * Exported functions
645 static int xics_find_source(XICSState *icp, int irq)
647 int sources = 1;
648 int src;
650 /* FIXME: implement multiple sources */
651 for (src = 0; src < sources; ++src) {
652 ICSState *ics = &icp->ics[src];
653 if (ics_valid_irq(ics, irq)) {
654 return src;
658 return -1;
661 qemu_irq xics_get_qirq(XICSState *icp, int irq)
663 int src = xics_find_source(icp, irq);
665 if (src >= 0) {
666 ICSState *ics = &icp->ics[src];
667 return ics->qirqs[irq - ics->offset];
670 return NULL;
673 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
675 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
677 ics->irqs[srcno].flags |=
678 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
681 void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
683 int src = xics_find_source(icp, irq);
684 ICSState *ics;
686 assert(src >= 0);
688 ics = &icp->ics[src];
689 ics_set_irq_type(ics, irq - ics->offset, lsi);
692 #define ICS_IRQ_FREE(ics, srcno) \
693 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
695 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
697 int first, i;
699 for (first = 0; first < ics->nr_irqs; first += alignnum) {
700 if (num > (ics->nr_irqs - first)) {
701 return -1;
703 for (i = first; i < first + num; ++i) {
704 if (!ICS_IRQ_FREE(ics, i)) {
705 break;
708 if (i == (first + num)) {
709 return first;
713 return -1;
716 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp)
718 ICSState *ics = &icp->ics[src];
719 int irq;
721 if (irq_hint) {
722 assert(src == xics_find_source(icp, irq_hint));
723 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
724 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
725 return -1;
727 irq = irq_hint;
728 } else {
729 irq = ics_find_free_block(ics, 1, 1);
730 if (irq < 0) {
731 error_setg(errp, "can't allocate IRQ: no IRQ left");
732 return -1;
734 irq += ics->offset;
737 ics_set_irq_type(ics, irq - ics->offset, lsi);
738 trace_xics_alloc(src, irq);
740 return irq;
744 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block.
745 * If align==true, aligns the first IRQ number to num.
747 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align,
748 Error **errp)
750 int i, first = -1;
751 ICSState *ics = &icp->ics[src];
753 assert(src == 0);
755 * MSIMesage::data is used for storing VIRQ so
756 * it has to be aligned to num to support multiple
757 * MSI vectors. MSI-X is not affected by this.
758 * The hint is used for the first IRQ, the rest should
759 * be allocated continuously.
761 if (align) {
762 assert((num == 1) || (num == 2) || (num == 4) ||
763 (num == 8) || (num == 16) || (num == 32));
764 first = ics_find_free_block(ics, num, num);
765 } else {
766 first = ics_find_free_block(ics, num, 1);
768 if (first < 0) {
769 error_setg(errp, "can't find a free %d-IRQ block", num);
770 return -1;
773 if (first >= 0) {
774 for (i = first; i < first + num; ++i) {
775 ics_set_irq_type(ics, i, lsi);
778 first += ics->offset;
780 trace_xics_alloc_block(src, first, num, lsi, align);
782 return first;
785 static void ics_free(ICSState *ics, int srcno, int num)
787 int i;
789 for (i = srcno; i < srcno + num; ++i) {
790 if (ICS_IRQ_FREE(ics, i)) {
791 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset);
793 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
797 void xics_free(XICSState *icp, int irq, int num)
799 int src = xics_find_source(icp, irq);
801 if (src >= 0) {
802 ICSState *ics = &icp->ics[src];
804 /* FIXME: implement multiple sources */
805 assert(src == 0);
807 trace_xics_ics_free(ics - icp->ics, irq, num);
808 ics_free(ics, irq - ics->offset, num);
813 * Guest interfaces
816 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
817 target_ulong opcode, target_ulong *args)
819 CPUState *cs = CPU(cpu);
820 target_ulong cppr = args[0];
822 icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
823 return H_SUCCESS;
826 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
827 target_ulong opcode, target_ulong *args)
829 target_ulong server = get_cpu_index_by_dt_id(args[0]);
830 target_ulong mfrr = args[1];
832 if (server >= spapr->icp->nr_servers) {
833 return H_PARAMETER;
836 icp_set_mfrr(spapr->icp, server, mfrr);
837 return H_SUCCESS;
840 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
841 target_ulong opcode, target_ulong *args)
843 CPUState *cs = CPU(cpu);
844 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
846 args[0] = xirr;
847 return H_SUCCESS;
850 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
851 target_ulong opcode, target_ulong *args)
853 CPUState *cs = CPU(cpu);
854 ICPState *ss = &spapr->icp->ss[cs->cpu_index];
855 uint32_t xirr = icp_accept(ss);
857 args[0] = xirr;
858 args[1] = cpu_get_host_ticks();
859 return H_SUCCESS;
862 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
863 target_ulong opcode, target_ulong *args)
865 CPUState *cs = CPU(cpu);
866 target_ulong xirr = args[0];
868 icp_eoi(spapr->icp, cs->cpu_index, xirr);
869 return H_SUCCESS;
872 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
873 target_ulong opcode, target_ulong *args)
875 CPUState *cs = CPU(cpu);
876 ICPState *ss = &spapr->icp->ss[cs->cpu_index];
878 args[0] = ss->xirr;
879 args[1] = ss->mfrr;
881 return H_SUCCESS;
884 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
885 uint32_t token,
886 uint32_t nargs, target_ulong args,
887 uint32_t nret, target_ulong rets)
889 ICSState *ics = spapr->icp->ics;
890 uint32_t nr, server, priority;
892 if ((nargs != 3) || (nret != 1)) {
893 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
894 return;
897 nr = rtas_ld(args, 0);
898 server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
899 priority = rtas_ld(args, 2);
901 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
902 || (priority > 0xff)) {
903 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
904 return;
907 ics_write_xive(ics, nr, server, priority, priority);
909 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
912 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
913 uint32_t token,
914 uint32_t nargs, target_ulong args,
915 uint32_t nret, target_ulong rets)
917 ICSState *ics = spapr->icp->ics;
918 uint32_t nr;
920 if ((nargs != 1) || (nret != 3)) {
921 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
922 return;
925 nr = rtas_ld(args, 0);
927 if (!ics_valid_irq(ics, nr)) {
928 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
929 return;
932 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
933 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
934 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
937 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
938 uint32_t token,
939 uint32_t nargs, target_ulong args,
940 uint32_t nret, target_ulong rets)
942 ICSState *ics = spapr->icp->ics;
943 uint32_t nr;
945 if ((nargs != 1) || (nret != 1)) {
946 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
947 return;
950 nr = rtas_ld(args, 0);
952 if (!ics_valid_irq(ics, nr)) {
953 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
954 return;
957 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
958 ics->irqs[nr - ics->offset].priority);
960 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
963 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
964 uint32_t token,
965 uint32_t nargs, target_ulong args,
966 uint32_t nret, target_ulong rets)
968 ICSState *ics = spapr->icp->ics;
969 uint32_t nr;
971 if ((nargs != 1) || (nret != 1)) {
972 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
973 return;
976 nr = rtas_ld(args, 0);
978 if (!ics_valid_irq(ics, nr)) {
979 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
980 return;
983 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
984 ics->irqs[nr - ics->offset].saved_priority,
985 ics->irqs[nr - ics->offset].saved_priority);
987 rtas_st(rets, 0, RTAS_OUT_SUCCESS);
991 * XICS
994 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp)
996 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs;
999 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers,
1000 Error **errp)
1002 int i;
1004 icp->nr_servers = nr_servers;
1006 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
1007 for (i = 0; i < icp->nr_servers; i++) {
1008 char buffer[32];
1009 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP);
1010 snprintf(buffer, sizeof(buffer), "icp[%d]", i);
1011 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]),
1012 errp);
1016 static void xics_realize(DeviceState *dev, Error **errp)
1018 XICSState *icp = XICS(dev);
1019 Error *error = NULL;
1020 int i;
1022 if (!icp->nr_servers) {
1023 error_setg(errp, "Number of servers needs to be greater 0");
1024 return;
1027 /* Registration of global state belongs into realize */
1028 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
1029 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
1030 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
1031 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on);
1033 spapr_register_hypercall(H_CPPR, h_cppr);
1034 spapr_register_hypercall(H_IPI, h_ipi);
1035 spapr_register_hypercall(H_XIRR, h_xirr);
1036 spapr_register_hypercall(H_XIRR_X, h_xirr_x);
1037 spapr_register_hypercall(H_EOI, h_eoi);
1038 spapr_register_hypercall(H_IPOLL, h_ipoll);
1040 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error);
1041 if (error) {
1042 error_propagate(errp, error);
1043 return;
1046 for (i = 0; i < icp->nr_servers; i++) {
1047 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error);
1048 if (error) {
1049 error_propagate(errp, error);
1050 return;
1055 static void xics_initfn(Object *obj)
1057 XICSState *xics = XICS(obj);
1059 xics->ics = ICS(object_new(TYPE_ICS));
1060 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
1061 xics->ics->icp = xics;
1064 static void xics_class_init(ObjectClass *oc, void *data)
1066 DeviceClass *dc = DEVICE_CLASS(oc);
1067 XICSStateClass *xsc = XICS_CLASS(oc);
1069 dc->realize = xics_realize;
1070 xsc->set_nr_irqs = xics_set_nr_irqs;
1071 xsc->set_nr_servers = xics_set_nr_servers;
1074 static const TypeInfo xics_info = {
1075 .name = TYPE_XICS,
1076 .parent = TYPE_XICS_COMMON,
1077 .instance_size = sizeof(XICSState),
1078 .class_size = sizeof(XICSStateClass),
1079 .class_init = xics_class_init,
1080 .instance_init = xics_initfn,
1083 static void xics_register_types(void)
1085 type_register_static(&xics_common_info);
1086 type_register_static(&xics_info);
1087 type_register_static(&ics_info);
1088 type_register_static(&icp_info);
1091 type_init(xics_register_types)