qemu-char: Register ring buffer driver with correct name "ringbuf"
[qemu/ar7.git] / hw / intc / xics.c
blob6b3c071588fbcd8f93446158fc085c038dcebd0e
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
28 #include "hw/hw.h"
29 #include "trace.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/ppc/xics.h"
34 * ICP: Presentation layer
37 #define XISR_MASK 0x00ffffff
38 #define CPPR_MASK 0xff000000
40 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
41 #define CPPR(ss) (((ss)->xirr) >> 24)
43 static void ics_reject(ICSState *ics, int nr);
44 static void ics_resend(ICSState *ics);
45 static void ics_eoi(ICSState *ics, int nr);
47 static void icp_check_ipi(XICSState *icp, int server)
49 ICPState *ss = icp->ss + server;
51 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
52 return;
55 trace_xics_icp_check_ipi(server, ss->mfrr);
57 if (XISR(ss)) {
58 ics_reject(icp->ics, XISR(ss));
61 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
62 ss->pending_priority = ss->mfrr;
63 qemu_irq_raise(ss->output);
66 static void icp_resend(XICSState *icp, int server)
68 ICPState *ss = icp->ss + server;
70 if (ss->mfrr < CPPR(ss)) {
71 icp_check_ipi(icp, server);
73 ics_resend(icp->ics);
76 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
78 ICPState *ss = icp->ss + server;
79 uint8_t old_cppr;
80 uint32_t old_xisr;
82 old_cppr = CPPR(ss);
83 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
85 if (cppr < old_cppr) {
86 if (XISR(ss) && (cppr <= ss->pending_priority)) {
87 old_xisr = XISR(ss);
88 ss->xirr &= ~XISR_MASK; /* Clear XISR */
89 ss->pending_priority = 0xff;
90 qemu_irq_lower(ss->output);
91 ics_reject(icp->ics, old_xisr);
93 } else {
94 if (!XISR(ss)) {
95 icp_resend(icp, server);
100 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr)
102 ICPState *ss = icp->ss + server;
104 ss->mfrr = mfrr;
105 if (mfrr < CPPR(ss)) {
106 icp_check_ipi(icp, server);
110 static uint32_t icp_accept(ICPState *ss)
112 uint32_t xirr = ss->xirr;
114 qemu_irq_lower(ss->output);
115 ss->xirr = ss->pending_priority << 24;
116 ss->pending_priority = 0xff;
118 trace_xics_icp_accept(xirr, ss->xirr);
120 return xirr;
123 static void icp_eoi(XICSState *icp, int server, uint32_t xirr)
125 ICPState *ss = icp->ss + server;
127 /* Send EOI -> ICS */
128 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
129 trace_xics_icp_eoi(server, xirr, ss->xirr);
130 ics_eoi(icp->ics, xirr & XISR_MASK);
131 if (!XISR(ss)) {
132 icp_resend(icp, server);
136 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority)
138 ICPState *ss = icp->ss + server;
140 trace_xics_icp_irq(server, nr, priority);
142 if ((priority >= CPPR(ss))
143 || (XISR(ss) && (ss->pending_priority <= priority))) {
144 ics_reject(icp->ics, nr);
145 } else {
146 if (XISR(ss)) {
147 ics_reject(icp->ics, XISR(ss));
149 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
150 ss->pending_priority = priority;
151 trace_xics_icp_raise(ss->xirr, ss->pending_priority);
152 qemu_irq_raise(ss->output);
156 static const VMStateDescription vmstate_icp_server = {
157 .name = "icp/server",
158 .version_id = 1,
159 .minimum_version_id = 1,
160 .minimum_version_id_old = 1,
161 .fields = (VMStateField []) {
162 /* Sanity check */
163 VMSTATE_UINT32(xirr, ICPState),
164 VMSTATE_UINT8(pending_priority, ICPState),
165 VMSTATE_UINT8(mfrr, ICPState),
166 VMSTATE_END_OF_LIST()
170 static void icp_reset(DeviceState *dev)
172 ICPState *icp = ICP(dev);
174 icp->xirr = 0;
175 icp->pending_priority = 0xff;
176 icp->mfrr = 0xff;
178 /* Make all outputs are deasserted */
179 qemu_set_irq(icp->output, 0);
182 static void icp_class_init(ObjectClass *klass, void *data)
184 DeviceClass *dc = DEVICE_CLASS(klass);
186 dc->reset = icp_reset;
187 dc->vmsd = &vmstate_icp_server;
190 static TypeInfo icp_info = {
191 .name = TYPE_ICP,
192 .parent = TYPE_DEVICE,
193 .instance_size = sizeof(ICPState),
194 .class_init = icp_class_init,
198 * ICS: Source layer
200 static int ics_valid_irq(ICSState *ics, uint32_t nr)
202 return (nr >= ics->offset)
203 && (nr < (ics->offset + ics->nr_irqs));
206 static void resend_msi(ICSState *ics, int srcno)
208 ICSIRQState *irq = ics->irqs + srcno;
210 /* FIXME: filter by server#? */
211 if (irq->status & XICS_STATUS_REJECTED) {
212 irq->status &= ~XICS_STATUS_REJECTED;
213 if (irq->priority != 0xff) {
214 icp_irq(ics->icp, irq->server, srcno + ics->offset,
215 irq->priority);
220 static void resend_lsi(ICSState *ics, int srcno)
222 ICSIRQState *irq = ics->irqs + srcno;
224 if ((irq->priority != 0xff)
225 && (irq->status & XICS_STATUS_ASSERTED)
226 && !(irq->status & XICS_STATUS_SENT)) {
227 irq->status |= XICS_STATUS_SENT;
228 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
232 static void set_irq_msi(ICSState *ics, int srcno, int val)
234 ICSIRQState *irq = ics->irqs + srcno;
236 trace_xics_set_irq_msi(srcno, srcno + ics->offset);
238 if (val) {
239 if (irq->priority == 0xff) {
240 irq->status |= XICS_STATUS_MASKED_PENDING;
241 trace_xics_masked_pending();
242 } else {
243 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
248 static void set_irq_lsi(ICSState *ics, int srcno, int val)
250 ICSIRQState *irq = ics->irqs + srcno;
252 trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
253 if (val) {
254 irq->status |= XICS_STATUS_ASSERTED;
255 } else {
256 irq->status &= ~XICS_STATUS_ASSERTED;
258 resend_lsi(ics, srcno);
261 static void ics_set_irq(void *opaque, int srcno, int val)
263 ICSState *ics = (ICSState *)opaque;
265 if (ics->islsi[srcno]) {
266 set_irq_lsi(ics, srcno, val);
267 } else {
268 set_irq_msi(ics, srcno, val);
272 static void write_xive_msi(ICSState *ics, int srcno)
274 ICSIRQState *irq = ics->irqs + srcno;
276 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
277 || (irq->priority == 0xff)) {
278 return;
281 irq->status &= ~XICS_STATUS_MASKED_PENDING;
282 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
285 static void write_xive_lsi(ICSState *ics, int srcno)
287 resend_lsi(ics, srcno);
290 static void ics_write_xive(ICSState *ics, int nr, int server,
291 uint8_t priority, uint8_t saved_priority)
293 int srcno = nr - ics->offset;
294 ICSIRQState *irq = ics->irqs + srcno;
296 irq->server = server;
297 irq->priority = priority;
298 irq->saved_priority = saved_priority;
300 trace_xics_ics_write_xive(nr, srcno, server, priority);
302 if (ics->islsi[srcno]) {
303 write_xive_lsi(ics, srcno);
304 } else {
305 write_xive_msi(ics, srcno);
309 static void ics_reject(ICSState *ics, int nr)
311 ICSIRQState *irq = ics->irqs + nr - ics->offset;
313 trace_xics_ics_reject(nr, nr - ics->offset);
314 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
315 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
318 static void ics_resend(ICSState *ics)
320 int i;
322 for (i = 0; i < ics->nr_irqs; i++) {
323 /* FIXME: filter by server#? */
324 if (ics->islsi[i]) {
325 resend_lsi(ics, i);
326 } else {
327 resend_msi(ics, i);
332 static void ics_eoi(ICSState *ics, int nr)
334 int srcno = nr - ics->offset;
335 ICSIRQState *irq = ics->irqs + srcno;
337 trace_xics_ics_eoi(nr);
339 if (ics->islsi[srcno]) {
340 irq->status &= ~XICS_STATUS_SENT;
344 static void ics_reset(DeviceState *dev)
346 ICSState *ics = ICS(dev);
347 int i;
349 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
350 for (i = 0; i < ics->nr_irqs; i++) {
351 ics->irqs[i].priority = 0xff;
352 ics->irqs[i].saved_priority = 0xff;
356 static int ics_post_load(void *opaque, int version_id)
358 int i;
359 ICSState *ics = opaque;
361 for (i = 0; i < ics->icp->nr_servers; i++) {
362 icp_resend(ics->icp, i);
365 return 0;
368 static const VMStateDescription vmstate_ics_irq = {
369 .name = "ics/irq",
370 .version_id = 1,
371 .minimum_version_id = 1,
372 .minimum_version_id_old = 1,
373 .fields = (VMStateField []) {
374 VMSTATE_UINT32(server, ICSIRQState),
375 VMSTATE_UINT8(priority, ICSIRQState),
376 VMSTATE_UINT8(saved_priority, ICSIRQState),
377 VMSTATE_UINT8(status, ICSIRQState),
378 VMSTATE_END_OF_LIST()
382 static const VMStateDescription vmstate_ics = {
383 .name = "ics",
384 .version_id = 1,
385 .minimum_version_id = 1,
386 .minimum_version_id_old = 1,
387 .post_load = ics_post_load,
388 .fields = (VMStateField []) {
389 /* Sanity check */
390 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState),
392 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
393 vmstate_ics_irq, ICSIRQState),
394 VMSTATE_END_OF_LIST()
398 static int ics_realize(DeviceState *dev)
400 ICSState *ics = ICS(dev);
402 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
403 ics->islsi = g_malloc0(ics->nr_irqs * sizeof(bool));
404 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs);
406 return 0;
409 static void ics_class_init(ObjectClass *klass, void *data)
411 DeviceClass *dc = DEVICE_CLASS(klass);
413 dc->init = ics_realize;
414 dc->vmsd = &vmstate_ics;
415 dc->reset = ics_reset;
418 static TypeInfo ics_info = {
419 .name = TYPE_ICS,
420 .parent = TYPE_DEVICE,
421 .instance_size = sizeof(ICSState),
422 .class_init = ics_class_init,
426 * Exported functions
429 qemu_irq xics_get_qirq(XICSState *icp, int irq)
431 if (!ics_valid_irq(icp->ics, irq)) {
432 return NULL;
435 return icp->ics->qirqs[irq - icp->ics->offset];
438 void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
440 assert(ics_valid_irq(icp->ics, irq));
442 icp->ics->islsi[irq - icp->ics->offset] = lsi;
446 * Guest interfaces
449 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
450 target_ulong opcode, target_ulong *args)
452 CPUState *cs = CPU(cpu);
453 target_ulong cppr = args[0];
455 icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
456 return H_SUCCESS;
459 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
460 target_ulong opcode, target_ulong *args)
462 target_ulong server = args[0];
463 target_ulong mfrr = args[1];
465 if (server >= spapr->icp->nr_servers) {
466 return H_PARAMETER;
469 icp_set_mfrr(spapr->icp, server, mfrr);
470 return H_SUCCESS;
473 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
474 target_ulong opcode, target_ulong *args)
476 CPUState *cs = CPU(cpu);
477 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
479 args[0] = xirr;
480 return H_SUCCESS;
483 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
484 target_ulong opcode, target_ulong *args)
486 CPUState *cs = CPU(cpu);
487 target_ulong xirr = args[0];
489 icp_eoi(spapr->icp, cs->cpu_index, xirr);
490 return H_SUCCESS;
493 static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
494 uint32_t token,
495 uint32_t nargs, target_ulong args,
496 uint32_t nret, target_ulong rets)
498 ICSState *ics = spapr->icp->ics;
499 uint32_t nr, server, priority;
501 if ((nargs != 3) || (nret != 1)) {
502 rtas_st(rets, 0, -3);
503 return;
506 nr = rtas_ld(args, 0);
507 server = rtas_ld(args, 1);
508 priority = rtas_ld(args, 2);
510 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
511 || (priority > 0xff)) {
512 rtas_st(rets, 0, -3);
513 return;
516 ics_write_xive(ics, nr, server, priority, priority);
518 rtas_st(rets, 0, 0); /* Success */
521 static void rtas_get_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
522 uint32_t token,
523 uint32_t nargs, target_ulong args,
524 uint32_t nret, target_ulong rets)
526 ICSState *ics = spapr->icp->ics;
527 uint32_t nr;
529 if ((nargs != 1) || (nret != 3)) {
530 rtas_st(rets, 0, -3);
531 return;
534 nr = rtas_ld(args, 0);
536 if (!ics_valid_irq(ics, nr)) {
537 rtas_st(rets, 0, -3);
538 return;
541 rtas_st(rets, 0, 0); /* Success */
542 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
543 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
546 static void rtas_int_off(PowerPCCPU *cpu, sPAPREnvironment *spapr,
547 uint32_t token,
548 uint32_t nargs, target_ulong args,
549 uint32_t nret, target_ulong rets)
551 ICSState *ics = spapr->icp->ics;
552 uint32_t nr;
554 if ((nargs != 1) || (nret != 1)) {
555 rtas_st(rets, 0, -3);
556 return;
559 nr = rtas_ld(args, 0);
561 if (!ics_valid_irq(ics, nr)) {
562 rtas_st(rets, 0, -3);
563 return;
566 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
567 ics->irqs[nr - ics->offset].priority);
569 rtas_st(rets, 0, 0); /* Success */
572 static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr,
573 uint32_t token,
574 uint32_t nargs, target_ulong args,
575 uint32_t nret, target_ulong rets)
577 ICSState *ics = spapr->icp->ics;
578 uint32_t nr;
580 if ((nargs != 1) || (nret != 1)) {
581 rtas_st(rets, 0, -3);
582 return;
585 nr = rtas_ld(args, 0);
587 if (!ics_valid_irq(ics, nr)) {
588 rtas_st(rets, 0, -3);
589 return;
592 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
593 ics->irqs[nr - ics->offset].saved_priority,
594 ics->irqs[nr - ics->offset].saved_priority);
596 rtas_st(rets, 0, 0); /* Success */
600 * XICS
603 static void xics_reset(DeviceState *d)
605 XICSState *icp = XICS(d);
606 int i;
608 for (i = 0; i < icp->nr_servers; i++) {
609 device_reset(DEVICE(&icp->ss[i]));
612 device_reset(DEVICE(icp->ics));
615 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
617 CPUState *cs = CPU(cpu);
618 CPUPPCState *env = &cpu->env;
619 ICPState *ss = &icp->ss[cs->cpu_index];
621 assert(cs->cpu_index < icp->nr_servers);
623 switch (PPC_INPUT(env)) {
624 case PPC_FLAGS_INPUT_POWER7:
625 ss->output = env->irq_inputs[POWER7_INPUT_INT];
626 break;
628 case PPC_FLAGS_INPUT_970:
629 ss->output = env->irq_inputs[PPC970_INPUT_INT];
630 break;
632 default:
633 fprintf(stderr, "XICS interrupt controller does not support this CPU "
634 "bus model\n");
635 abort();
639 static void xics_realize(DeviceState *dev, Error **errp)
641 XICSState *icp = XICS(dev);
642 ICSState *ics = icp->ics;
643 int i;
645 ics->nr_irqs = icp->nr_irqs;
646 ics->offset = XICS_IRQ_BASE;
647 ics->icp = icp;
648 qdev_init_nofail(DEVICE(ics));
650 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
651 for (i = 0; i < icp->nr_servers; i++) {
652 char buffer[32];
653 object_initialize(&icp->ss[i], TYPE_ICP);
654 snprintf(buffer, sizeof(buffer), "icp[%d]", i);
655 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), NULL);
656 qdev_init_nofail(DEVICE(&icp->ss[i]));
660 static void xics_initfn(Object *obj)
662 XICSState *xics = XICS(obj);
664 xics->ics = ICS(object_new(TYPE_ICS));
665 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
668 static Property xics_properties[] = {
669 DEFINE_PROP_UINT32("nr_servers", XICSState, nr_servers, -1),
670 DEFINE_PROP_UINT32("nr_irqs", XICSState, nr_irqs, -1),
671 DEFINE_PROP_END_OF_LIST(),
674 static void xics_class_init(ObjectClass *oc, void *data)
676 DeviceClass *dc = DEVICE_CLASS(oc);
678 dc->realize = xics_realize;
679 dc->props = xics_properties;
680 dc->reset = xics_reset;
682 spapr_rtas_register("ibm,set-xive", rtas_set_xive);
683 spapr_rtas_register("ibm,get-xive", rtas_get_xive);
684 spapr_rtas_register("ibm,int-off", rtas_int_off);
685 spapr_rtas_register("ibm,int-on", rtas_int_on);
687 spapr_register_hypercall(H_CPPR, h_cppr);
688 spapr_register_hypercall(H_IPI, h_ipi);
689 spapr_register_hypercall(H_XIRR, h_xirr);
690 spapr_register_hypercall(H_EOI, h_eoi);
693 static const TypeInfo xics_info = {
694 .name = TYPE_XICS,
695 .parent = TYPE_SYS_BUS_DEVICE,
696 .instance_size = sizeof(XICSState),
697 .class_init = xics_class_init,
698 .instance_init = xics_initfn,
701 static void xics_register_types(void)
703 type_register_static(&xics_info);
704 type_register_static(&ics_info);
705 type_register_static(&icp_info);
708 type_init(xics_register_types)