Merge tag 'v9.0.0-rc3'
[qemu/ar7.git] / hw / intc / openpic.c
blob9792a112240bac3c5cddda80d0ab0438a6d1fa59
1 /*
2 * OpenPIC emulation
4 * Copyright (c) 2004 Jocelyn Mayer
5 * 2011 Alexander Graf
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
27 * Based on OpenPic implementations:
28 * - Motorola MPC8245 & MPC8540 user manuals.
29 * - Motorola Harrier programmer manual
33 #include "qemu/osdep.h"
34 #include "hw/irq.h"
35 #include "hw/pci/pci.h"
36 #include "hw/ppc/openpic.h"
37 #include "hw/ppc/ppc_e500.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/sysbus.h"
40 #include "migration/vmstate.h"
41 #include "hw/pci/msi.h"
42 #include "qapi/error.h"
43 #include "qemu/bitops.h"
44 #include "qapi/qmp/qerror.h"
45 #include "qemu/module.h"
46 #include "qemu/timer.h"
47 #include "qemu/error-report.h"
49 /* #define DEBUG_OPENPIC */
51 #ifdef DEBUG_OPENPIC
52 static const int debug_openpic = 1;
53 #else
54 static const int debug_openpic = 0;
55 #endif
57 static int get_current_cpu(void);
58 #define DPRINTF(fmt, ...) do { \
59 if (debug_openpic) { \
60 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
61 } \
62 } while (0)
64 /* OpenPIC capability flags */
65 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
66 #define OPENPIC_FLAG_ILR (2 << 0)
68 /* OpenPIC address map */
69 #define OPENPIC_GLB_REG_START 0x0
70 #define OPENPIC_GLB_REG_SIZE 0x10F0
71 #define OPENPIC_TMR_REG_START 0x10F0
72 #define OPENPIC_TMR_REG_SIZE 0x220
73 #define OPENPIC_MSI_REG_START 0x1600
74 #define OPENPIC_MSI_REG_SIZE 0x200
75 #define OPENPIC_SUMMARY_REG_START 0x3800
76 #define OPENPIC_SUMMARY_REG_SIZE 0x800
77 #define OPENPIC_SRC_REG_START 0x10000
78 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
79 #define OPENPIC_CPU_REG_START 0x20000
80 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
82 static FslMpicInfo fsl_mpic_20 = {
83 .max_ext = 12,
86 static FslMpicInfo fsl_mpic_42 = {
87 .max_ext = 12,
90 #define FRR_NIRQ_SHIFT 16
91 #define FRR_NCPU_SHIFT 8
92 #define FRR_VID_SHIFT 0
94 #define VID_REVISION_1_2 2
95 #define VID_REVISION_1_3 3
97 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
98 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
100 #define GCR_RESET 0x80000000
101 #define GCR_MODE_PASS 0x00000000
102 #define GCR_MODE_MIXED 0x20000000
103 #define GCR_MODE_PROXY 0x60000000
105 #define TBCR_CI 0x80000000 /* count inhibit */
106 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
108 #define IDR_EP_SHIFT 31
109 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
110 #define IDR_CI0_SHIFT 30
111 #define IDR_CI1_SHIFT 29
112 #define IDR_P1_SHIFT 1
113 #define IDR_P0_SHIFT 0
115 #define ILR_INTTGT_MASK 0x000000ff
116 #define ILR_INTTGT_INT 0x00
117 #define ILR_INTTGT_CINT 0x01 /* critical */
118 #define ILR_INTTGT_MCP 0x02 /* machine check */
121 * The currently supported INTTGT values happen to be the same as QEMU's
122 * openpic output codes, but don't depend on this. The output codes
123 * could change (unlikely, but...) or support could be added for
124 * more INTTGT values.
126 static const int inttgt_output[][2] = {
127 { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT },
128 { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT },
129 { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK },
132 static int inttgt_to_output(int inttgt)
134 int i;
136 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
137 if (inttgt_output[i][0] == inttgt) {
138 return inttgt_output[i][1];
142 error_report("%s: unsupported inttgt %d", __func__, inttgt);
143 return OPENPIC_OUTPUT_INT;
146 static int output_to_inttgt(int output)
148 int i;
150 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
151 if (inttgt_output[i][1] == output) {
152 return inttgt_output[i][0];
156 abort();
159 #define MSIIR_OFFSET 0x140
160 #define MSIIR_SRS_SHIFT 29
161 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
162 #define MSIIR_IBS_SHIFT 24
163 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
165 static int get_current_cpu(void)
167 if (!current_cpu) {
168 return -1;
171 return current_cpu->cpu_index;
174 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
175 int idx);
176 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
177 uint32_t val, int idx);
178 static void openpic_reset(DeviceState *d);
181 * Convert between openpic clock ticks and nanosecs. In the hardware the clock
182 * frequency is driven by board inputs to the PIC which the PIC would then
183 * divide by 4 or 8. For now hard code to 25MZ.
185 #define OPENPIC_TIMER_FREQ_MHZ 25
186 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
187 static inline uint64_t ns_to_ticks(uint64_t ns)
189 return ns / OPENPIC_TIMER_NS_PER_TICK;
191 static inline uint64_t ticks_to_ns(uint64_t ticks)
193 return ticks * OPENPIC_TIMER_NS_PER_TICK;
196 static inline void IRQ_setbit(IRQQueue *q, int n_IRQ)
198 set_bit(n_IRQ, q->queue);
201 static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ)
203 clear_bit(n_IRQ, q->queue);
206 static void IRQ_check(OpenPICState *opp, IRQQueue *q)
208 int irq = -1;
209 int next = -1;
210 int priority = -1;
212 for (;;) {
213 irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
214 if (irq == opp->max_irq) {
215 break;
218 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
219 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
221 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
222 next = irq;
223 priority = IVPR_PRIORITY(opp->src[irq].ivpr);
227 q->next = next;
228 q->priority = priority;
231 static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
233 /* XXX: optimize */
234 IRQ_check(opp, q);
236 return q->next;
239 static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
240 bool active, bool was_active)
242 IRQDest *dst;
243 IRQSource *src;
244 int priority;
246 dst = &opp->dst[n_CPU];
247 src = &opp->src[n_IRQ];
249 DPRINTF("%s: IRQ %d active %d was %d",
250 __func__, n_IRQ, active, was_active);
252 if (src->output != OPENPIC_OUTPUT_INT) {
253 DPRINTF("%s: output %d irq %d active %d was %d count %d",
254 __func__, src->output, n_IRQ, active, was_active,
255 dst->outputs_active[src->output]);
258 * On Freescale MPIC, critical interrupts ignore priority,
259 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
260 * masking.
262 if (active) {
263 if (!was_active && dst->outputs_active[src->output]++ == 0) {
264 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
265 __func__, src->output, n_CPU, n_IRQ);
266 qemu_irq_raise(dst->irqs[src->output]);
268 } else {
269 if (was_active && --dst->outputs_active[src->output] == 0) {
270 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
271 __func__, src->output, n_CPU, n_IRQ);
272 qemu_irq_lower(dst->irqs[src->output]);
276 return;
279 priority = IVPR_PRIORITY(src->ivpr);
282 * Even if the interrupt doesn't have enough priority,
283 * it is still raised, in case ctpr is lowered later.
285 if (active) {
286 IRQ_setbit(&dst->raised, n_IRQ);
287 } else {
288 IRQ_resetbit(&dst->raised, n_IRQ);
291 IRQ_check(opp, &dst->raised);
293 if (active && priority <= dst->ctpr) {
294 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
295 __func__, n_IRQ, priority, dst->ctpr, n_CPU);
296 active = 0;
299 if (active) {
300 if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
301 priority <= dst->servicing.priority) {
302 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
303 __func__, n_IRQ, dst->servicing.next, n_CPU);
304 } else {
305 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
306 __func__, n_CPU, n_IRQ, dst->raised.next);
307 qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
309 } else {
310 IRQ_get_next(opp, &dst->servicing);
311 if (dst->raised.priority > dst->ctpr &&
312 dst->raised.priority > dst->servicing.priority) {
313 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
314 __func__, n_IRQ, dst->raised.next, dst->raised.priority,
315 dst->ctpr, dst->servicing.priority, n_CPU);
316 /* IRQ line stays asserted */
317 } else {
318 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
319 __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
320 qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
325 /* update pic state because registers for n_IRQ have changed value */
326 static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
328 IRQSource *src;
329 bool active, was_active;
330 int i;
332 src = &opp->src[n_IRQ];
333 active = src->pending;
335 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
336 /* Interrupt source is disabled */
337 DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
338 active = false;
341 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
344 * We don't have a similar check for already-active because
345 * ctpr may have changed and we need to withdraw the interrupt.
347 if (!active && !was_active) {
348 DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
349 return;
352 if (active) {
353 src->ivpr |= IVPR_ACTIVITY_MASK;
354 } else {
355 src->ivpr &= ~IVPR_ACTIVITY_MASK;
358 if (src->destmask == 0) {
359 /* No target */
360 DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
361 return;
364 if (src->destmask == (1 << src->last_cpu)) {
365 /* Only one CPU is allowed to receive this IRQ */
366 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
367 } else if (!(src->ivpr & IVPR_MODE_MASK)) {
368 /* Directed delivery mode */
369 for (i = 0; i < opp->nb_cpus; i++) {
370 if (src->destmask & (1 << i)) {
371 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
374 } else {
375 /* Distributed delivery mode */
376 for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
377 if (i == opp->nb_cpus) {
378 i = 0;
380 if (src->destmask & (1 << i)) {
381 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
382 src->last_cpu = i;
383 break;
389 static void openpic_set_irq(void *opaque, int n_IRQ, int level)
391 OpenPICState *opp = opaque;
392 IRQSource *src;
394 if (n_IRQ >= OPENPIC_MAX_IRQ) {
395 error_report("%s: IRQ %d out of range", __func__, n_IRQ);
396 abort();
399 src = &opp->src[n_IRQ];
400 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
401 n_IRQ, level, src->ivpr);
402 if (src->level) {
403 /* level-sensitive irq */
404 src->pending = level;
405 openpic_update_irq(opp, n_IRQ);
406 } else {
407 /* edge-sensitive irq */
408 if (level) {
409 src->pending = 1;
410 openpic_update_irq(opp, n_IRQ);
413 if (src->output != OPENPIC_OUTPUT_INT) {
415 * Edge-triggered interrupts shouldn't be used
416 * with non-INT delivery, but just in case,
417 * try to make it do something sane rather than
418 * cause an interrupt storm. This is close to
419 * what you'd probably see happen in real hardware.
421 src->pending = 0;
422 openpic_update_irq(opp, n_IRQ);
427 static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ)
429 return opp->src[n_IRQ].idr;
432 static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ)
434 if (opp->flags & OPENPIC_FLAG_ILR) {
435 return output_to_inttgt(opp->src[n_IRQ].output);
438 return 0xffffffff;
441 static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ)
443 return opp->src[n_IRQ].ivpr;
446 static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
448 IRQSource *src = &opp->src[n_IRQ];
449 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
450 uint32_t crit_mask = 0;
451 uint32_t mask = normal_mask;
452 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
453 int i;
455 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
456 crit_mask = mask << crit_shift;
457 mask |= crit_mask | IDR_EP;
460 src->idr = val & mask;
461 DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
463 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
464 if (src->idr & crit_mask) {
465 if (src->idr & normal_mask) {
466 DPRINTF("%s: IRQ configured for multiple output types, using "
467 "critical", __func__);
470 src->output = OPENPIC_OUTPUT_CINT;
471 src->nomask = true;
472 src->destmask = 0;
474 for (i = 0; i < opp->nb_cpus; i++) {
475 int n_ci = IDR_CI0_SHIFT - i;
477 if (src->idr & (1UL << n_ci)) {
478 src->destmask |= 1UL << i;
481 } else {
482 src->output = OPENPIC_OUTPUT_INT;
483 src->nomask = false;
484 src->destmask = src->idr & normal_mask;
486 } else {
487 src->destmask = src->idr;
491 static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
493 if (opp->flags & OPENPIC_FLAG_ILR) {
494 IRQSource *src = &opp->src[n_IRQ];
496 src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
497 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
498 src->output);
500 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
504 static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
506 uint32_t mask;
509 * NOTE when implementing newer FSL MPIC models: starting with v4.0,
510 * the polarity bit is read-only on internal interrupts.
512 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
513 IVPR_POLARITY_MASK | opp->vector_mask;
515 /* ACTIVITY bit is read-only */
516 opp->src[n_IRQ].ivpr =
517 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
520 * For FSL internal interrupts, The sense bit is reserved and zero,
521 * and the interrupt is always level-triggered. Timers and IPIs
522 * have no sense or polarity bits, and are edge-triggered.
524 switch (opp->src[n_IRQ].type) {
525 case IRQ_TYPE_NORMAL:
526 opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
527 break;
529 case IRQ_TYPE_FSLINT:
530 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
531 break;
533 case IRQ_TYPE_FSLSPECIAL:
534 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
535 break;
538 openpic_update_irq(opp, n_IRQ);
539 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
540 opp->src[n_IRQ].ivpr);
543 static void openpic_gcr_write(OpenPICState *opp, uint64_t val)
545 bool mpic_proxy = false;
547 if (val & GCR_RESET) {
548 openpic_reset(DEVICE(opp));
549 return;
552 opp->gcr &= ~opp->mpic_mode_mask;
553 opp->gcr |= val & opp->mpic_mode_mask;
555 /* Set external proxy mode */
556 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) {
557 mpic_proxy = true;
560 ppce500_set_mpic_proxy(mpic_proxy);
563 static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
564 unsigned len)
566 OpenPICState *opp = opaque;
567 IRQDest *dst;
568 int idx;
570 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
571 __func__, addr, val);
572 if (addr & 0xF) {
573 return;
575 switch (addr) {
576 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
577 break;
578 case 0x40:
579 case 0x50:
580 case 0x60:
581 case 0x70:
582 case 0x80:
583 case 0x90:
584 case 0xA0:
585 case 0xB0:
586 openpic_cpu_write_internal(opp, addr, val, get_current_cpu());
587 break;
588 case 0x1000: /* FRR */
589 break;
590 case 0x1020: /* GCR */
591 openpic_gcr_write(opp, val);
592 break;
593 case 0x1080: /* VIR */
594 break;
595 case 0x1090: /* PIR */
596 for (idx = 0; idx < opp->nb_cpus; idx++) {
597 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
598 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
599 dst = &opp->dst[idx];
600 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
601 } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
602 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
603 dst = &opp->dst[idx];
604 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
607 opp->pir = val;
608 break;
609 case 0x10A0: /* IPI_IVPR */
610 case 0x10B0:
611 case 0x10C0:
612 case 0x10D0:
613 idx = (addr - 0x10A0) >> 4;
614 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
615 break;
616 case 0x10E0: /* SPVE */
617 opp->spve = val & opp->vector_mask;
618 break;
619 default:
620 break;
624 static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
626 OpenPICState *opp = opaque;
627 uint32_t retval;
629 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
630 retval = 0xFFFFFFFF;
631 if (addr & 0xF) {
632 return retval;
634 switch (addr) {
635 case 0x1000: /* FRR */
636 retval = opp->frr;
637 break;
638 case 0x1020: /* GCR */
639 retval = opp->gcr;
640 break;
641 case 0x1080: /* VIR */
642 retval = opp->vir;
643 break;
644 case 0x1090: /* PIR */
645 retval = 0x00000000;
646 break;
647 case 0x00: /* Block Revision Register1 (BRR1) */
648 retval = opp->brr1;
649 break;
650 case 0x40:
651 case 0x50:
652 case 0x60:
653 case 0x70:
654 case 0x80:
655 case 0x90:
656 case 0xA0:
657 case 0xB0:
658 retval = openpic_cpu_read_internal(opp, addr, get_current_cpu());
659 break;
660 case 0x10A0: /* IPI_IVPR */
661 case 0x10B0:
662 case 0x10C0:
663 case 0x10D0:
665 int idx;
666 idx = (addr - 0x10A0) >> 4;
667 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
669 break;
670 case 0x10E0: /* SPVE */
671 retval = opp->spve;
672 break;
673 default:
674 break;
676 DPRINTF("%s: => 0x%08x", __func__, retval);
678 return retval;
681 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
683 static void qemu_timer_cb(void *opaque)
685 OpenPICTimer *tmr = opaque;
686 OpenPICState *opp = tmr->opp;
687 uint32_t n_IRQ = tmr->n_IRQ;
688 uint32_t val = tmr->tbcr & ~TBCR_CI;
689 uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
691 DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
692 /* Reload current count from base count and setup timer. */
693 tmr->tccr = val | tog;
694 openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
695 /* Raise the interrupt. */
696 opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
697 openpic_set_irq(opp, n_IRQ, 1);
698 openpic_set_irq(opp, n_IRQ, 0);
702 * If enabled is true, arranges for an interrupt to be raised val clocks into
703 * the future, if enabled is false cancels the timer.
705 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
707 uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
709 * A count of zero causes a timer to be set to expire immediately. This
710 * effectively stops the simulation since the timer is constantly expiring
711 * which prevents guest code execution, so we don't honor that
712 * configuration. On real hardware, this situation would generate an
713 * interrupt on every clock cycle if the interrupt was unmasked.
715 if ((ns == 0) || !enabled) {
716 tmr->qemu_timer_active = false;
717 tmr->tccr = tmr->tccr & TCCR_TOG;
718 timer_del(tmr->qemu_timer); /* set timer to never expire. */
719 } else {
720 tmr->qemu_timer_active = true;
721 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
722 tmr->origin_time = now;
723 timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
728 * Returns the current tccr value, i.e., timer value (in clocks) with
729 * appropriate TOG.
731 static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
733 uint64_t retval;
734 if (!tmr->qemu_timer_active) {
735 retval = tmr->tccr;
736 } else {
737 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
738 uint64_t used = now - tmr->origin_time; /* nsecs */
739 uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
740 uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
741 retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
743 return retval;
746 static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
747 unsigned len)
749 OpenPICState *opp = opaque;
750 int idx;
752 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
753 __func__, (addr + 0x10f0), val);
754 if (addr & 0xF) {
755 return;
758 if (addr == 0) {
759 /* TFRR */
760 opp->tfrr = val;
761 return;
763 addr -= 0x10; /* correct for TFRR */
764 idx = (addr >> 6) & 0x3;
766 switch (addr & 0x30) {
767 case 0x00: /* TCCR */
768 break;
769 case 0x10: /* TBCR */
770 /* Did the enable status change? */
771 if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
772 /* Did "Count Inhibit" transition from 1 to 0? */
773 if ((val & TBCR_CI) == 0) {
774 opp->timers[idx].tccr = val & ~TCCR_TOG;
776 openpic_tmr_set_tmr(&opp->timers[idx],
777 (val & ~TBCR_CI),
778 /*enabled=*/((val & TBCR_CI) == 0));
780 opp->timers[idx].tbcr = val;
781 break;
782 case 0x20: /* TVPR */
783 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
784 break;
785 case 0x30: /* TDR */
786 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
787 break;
791 static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
793 OpenPICState *opp = opaque;
794 uint32_t retval = -1;
795 int idx;
797 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
798 if (addr & 0xF) {
799 goto out;
801 if (addr == 0) {
802 /* TFRR */
803 retval = opp->tfrr;
804 goto out;
806 addr -= 0x10; /* correct for TFRR */
807 idx = (addr >> 6) & 0x3;
808 switch (addr & 0x30) {
809 case 0x00: /* TCCR */
810 retval = openpic_tmr_get_timer(&opp->timers[idx]);
811 break;
812 case 0x10: /* TBCR */
813 retval = opp->timers[idx].tbcr;
814 break;
815 case 0x20: /* TVPR */
816 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
817 break;
818 case 0x30: /* TDR */
819 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
820 break;
823 out:
824 DPRINTF("%s: => 0x%08x", __func__, retval);
826 return retval;
829 static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
830 unsigned len)
832 OpenPICState *opp = opaque;
833 int idx;
835 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
836 __func__, addr, val);
838 addr = addr & 0xffff;
839 idx = addr >> 5;
841 switch (addr & 0x1f) {
842 case 0x00:
843 write_IRQreg_ivpr(opp, idx, val);
844 break;
845 case 0x10:
846 write_IRQreg_idr(opp, idx, val);
847 break;
848 case 0x18:
849 write_IRQreg_ilr(opp, idx, val);
850 break;
854 static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
856 OpenPICState *opp = opaque;
857 uint32_t retval;
858 int idx;
860 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
861 retval = 0xFFFFFFFF;
863 addr = addr & 0xffff;
864 idx = addr >> 5;
866 switch (addr & 0x1f) {
867 case 0x00:
868 retval = read_IRQreg_ivpr(opp, idx);
869 break;
870 case 0x10:
871 retval = read_IRQreg_idr(opp, idx);
872 break;
873 case 0x18:
874 retval = read_IRQreg_ilr(opp, idx);
875 break;
878 DPRINTF("%s: => 0x%08x", __func__, retval);
879 return retval;
882 static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
883 unsigned size)
885 OpenPICState *opp = opaque;
886 int idx = opp->irq_msi;
887 int srs, ibs;
889 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
890 __func__, addr, val);
891 if (addr & 0xF) {
892 return;
895 switch (addr) {
896 case MSIIR_OFFSET:
897 srs = val >> MSIIR_SRS_SHIFT;
898 idx += srs;
899 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
900 opp->msi[srs].msir |= 1 << ibs;
901 openpic_set_irq(opp, idx, 1);
902 break;
903 default:
904 /* most registers are read-only, thus ignored */
905 break;
909 static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
911 OpenPICState *opp = opaque;
912 uint64_t r = 0;
913 int i, srs;
915 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
916 if (addr & 0xF) {
917 return -1;
920 srs = addr >> 4;
922 switch (addr) {
923 case 0x00:
924 case 0x10:
925 case 0x20:
926 case 0x30:
927 case 0x40:
928 case 0x50:
929 case 0x60:
930 case 0x70: /* MSIRs */
931 r = opp->msi[srs].msir;
932 /* Clear on read */
933 opp->msi[srs].msir = 0;
934 openpic_set_irq(opp, opp->irq_msi + srs, 0);
935 break;
936 case 0x120: /* MSISR */
937 for (i = 0; i < MAX_MSI; i++) {
938 r |= (opp->msi[i].msir ? 1 : 0) << i;
940 break;
943 return r;
946 static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
948 uint64_t r = 0;
950 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
952 /* TODO: EISR/EIMR */
954 return r;
957 static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
958 unsigned size)
960 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
961 __func__, addr, val);
963 /* TODO: EISR/EIMR */
966 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
967 uint32_t val, int idx)
969 OpenPICState *opp = opaque;
970 IRQSource *src;
971 IRQDest *dst;
972 int s_IRQ, n_IRQ;
974 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
975 addr, val);
977 if (idx < 0 || idx >= opp->nb_cpus) {
978 return;
981 if (addr & 0xF) {
982 return;
984 dst = &opp->dst[idx];
985 addr &= 0xFF0;
986 switch (addr) {
987 case 0x40: /* IPIDR */
988 case 0x50:
989 case 0x60:
990 case 0x70:
991 idx = (addr - 0x40) >> 4;
992 /* we use IDE as mask which CPUs to deliver the IPI to still. */
993 opp->src[opp->irq_ipi0 + idx].destmask |= val;
994 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
995 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
996 break;
997 case 0x80: /* CTPR */
998 dst->ctpr = val & 0x0000000F;
1000 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
1001 __func__, idx, dst->ctpr, dst->raised.priority,
1002 dst->servicing.priority);
1004 if (dst->raised.priority <= dst->ctpr) {
1005 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
1006 __func__, idx);
1007 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1008 } else if (dst->raised.priority > dst->servicing.priority) {
1009 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1010 __func__, idx, dst->raised.next);
1011 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
1014 break;
1015 case 0x90: /* WHOAMI */
1016 /* Read-only register */
1017 break;
1018 case 0xA0: /* IACK */
1019 /* Read-only register */
1020 break;
1021 case 0xB0: /* EOI */
1022 DPRINTF("EOI");
1023 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1025 if (s_IRQ < 0) {
1026 DPRINTF("%s: EOI with no interrupt in service", __func__);
1027 break;
1030 IRQ_resetbit(&dst->servicing, s_IRQ);
1031 /* Set up next servicing IRQ */
1032 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1033 /* Check queued interrupts. */
1034 n_IRQ = IRQ_get_next(opp, &dst->raised);
1035 src = &opp->src[n_IRQ];
1036 if (n_IRQ != -1 &&
1037 (s_IRQ == -1 ||
1038 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1039 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1040 idx, n_IRQ);
1041 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
1043 break;
1044 default:
1045 break;
1049 static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val,
1050 unsigned len)
1052 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12);
1056 static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
1058 IRQSource *src;
1059 int retval, irq;
1061 DPRINTF("Lower OpenPIC INT output");
1062 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1064 irq = IRQ_get_next(opp, &dst->raised);
1065 DPRINTF("IACK: irq=%d", irq);
1067 if (irq == -1) {
1068 /* No more interrupt pending */
1069 return opp->spve;
1072 src = &opp->src[irq];
1073 if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1074 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1075 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1076 __func__, irq, dst->ctpr, src->ivpr);
1077 openpic_update_irq(opp, irq);
1078 retval = opp->spve;
1079 } else {
1080 /* IRQ enter servicing state */
1081 IRQ_setbit(&dst->servicing, irq);
1082 retval = IVPR_VECTOR(opp, src->ivpr);
1085 if (!src->level) {
1086 /* edge-sensitive IRQ */
1087 src->ivpr &= ~IVPR_ACTIVITY_MASK;
1088 src->pending = 0;
1089 IRQ_resetbit(&dst->raised, irq);
1092 /* Timers and IPIs support multicast. */
1093 if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
1094 ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
1095 DPRINTF("irq is IPI or TMR");
1096 src->destmask &= ~(1 << cpu);
1097 if (src->destmask && !src->level) {
1098 /* trigger on CPUs that didn't know about it yet */
1099 openpic_set_irq(opp, irq, 1);
1100 openpic_set_irq(opp, irq, 0);
1101 /* if all CPUs knew about it, set active bit again */
1102 src->ivpr |= IVPR_ACTIVITY_MASK;
1106 return retval;
1109 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
1110 int idx)
1112 OpenPICState *opp = opaque;
1113 IRQDest *dst;
1114 uint32_t retval;
1116 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
1117 retval = 0xFFFFFFFF;
1119 if (idx < 0 || idx >= opp->nb_cpus) {
1120 return retval;
1123 if (addr & 0xF) {
1124 return retval;
1126 dst = &opp->dst[idx];
1127 addr &= 0xFF0;
1128 switch (addr) {
1129 case 0x80: /* CTPR */
1130 retval = dst->ctpr;
1131 break;
1132 case 0x90: /* WHOAMI */
1133 retval = idx;
1134 break;
1135 case 0xA0: /* IACK */
1136 retval = openpic_iack(opp, dst, idx);
1137 break;
1138 case 0xB0: /* EOI */
1139 retval = 0;
1140 break;
1141 default:
1142 break;
1144 DPRINTF("%s: => 0x%08x", __func__, retval);
1146 return retval;
1149 static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len)
1151 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12);
1154 static const MemoryRegionOps openpic_glb_ops_le = {
1155 .write = openpic_gbl_write,
1156 .read = openpic_gbl_read,
1157 .endianness = DEVICE_LITTLE_ENDIAN,
1158 .impl = {
1159 .min_access_size = 4,
1160 .max_access_size = 4,
1164 static const MemoryRegionOps openpic_glb_ops_be = {
1165 .write = openpic_gbl_write,
1166 .read = openpic_gbl_read,
1167 .endianness = DEVICE_BIG_ENDIAN,
1168 .impl = {
1169 .min_access_size = 4,
1170 .max_access_size = 4,
1174 static const MemoryRegionOps openpic_tmr_ops_le = {
1175 .write = openpic_tmr_write,
1176 .read = openpic_tmr_read,
1177 .endianness = DEVICE_LITTLE_ENDIAN,
1178 .impl = {
1179 .min_access_size = 4,
1180 .max_access_size = 4,
1184 static const MemoryRegionOps openpic_tmr_ops_be = {
1185 .write = openpic_tmr_write,
1186 .read = openpic_tmr_read,
1187 .endianness = DEVICE_BIG_ENDIAN,
1188 .impl = {
1189 .min_access_size = 4,
1190 .max_access_size = 4,
1194 static const MemoryRegionOps openpic_cpu_ops_le = {
1195 .write = openpic_cpu_write,
1196 .read = openpic_cpu_read,
1197 .endianness = DEVICE_LITTLE_ENDIAN,
1198 .impl = {
1199 .min_access_size = 4,
1200 .max_access_size = 4,
1204 static const MemoryRegionOps openpic_cpu_ops_be = {
1205 .write = openpic_cpu_write,
1206 .read = openpic_cpu_read,
1207 .endianness = DEVICE_BIG_ENDIAN,
1208 .impl = {
1209 .min_access_size = 4,
1210 .max_access_size = 4,
1214 static const MemoryRegionOps openpic_src_ops_le = {
1215 .write = openpic_src_write,
1216 .read = openpic_src_read,
1217 .endianness = DEVICE_LITTLE_ENDIAN,
1218 .impl = {
1219 .min_access_size = 4,
1220 .max_access_size = 4,
1224 static const MemoryRegionOps openpic_src_ops_be = {
1225 .write = openpic_src_write,
1226 .read = openpic_src_read,
1227 .endianness = DEVICE_BIG_ENDIAN,
1228 .impl = {
1229 .min_access_size = 4,
1230 .max_access_size = 4,
1234 static const MemoryRegionOps openpic_msi_ops_be = {
1235 .read = openpic_msi_read,
1236 .write = openpic_msi_write,
1237 .endianness = DEVICE_BIG_ENDIAN,
1238 .impl = {
1239 .min_access_size = 4,
1240 .max_access_size = 4,
1244 static const MemoryRegionOps openpic_summary_ops_be = {
1245 .read = openpic_summary_read,
1246 .write = openpic_summary_write,
1247 .endianness = DEVICE_BIG_ENDIAN,
1248 .impl = {
1249 .min_access_size = 4,
1250 .max_access_size = 4,
1254 static void openpic_reset(DeviceState *d)
1256 OpenPICState *opp = OPENPIC(d);
1257 int i;
1259 opp->gcr = GCR_RESET;
1260 /* Initialise controller registers */
1261 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
1262 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
1263 (opp->vid << FRR_VID_SHIFT);
1265 opp->pir = 0;
1266 opp->spve = -1 & opp->vector_mask;
1267 opp->tfrr = opp->tfrr_reset;
1268 /* Initialise IRQ sources */
1269 for (i = 0; i < opp->max_irq; i++) {
1270 opp->src[i].ivpr = opp->ivpr_reset;
1271 switch (opp->src[i].type) {
1272 case IRQ_TYPE_NORMAL:
1273 opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK);
1274 break;
1276 case IRQ_TYPE_FSLINT:
1277 opp->src[i].ivpr |= IVPR_POLARITY_MASK;
1278 break;
1280 case IRQ_TYPE_FSLSPECIAL:
1281 break;
1284 /* Mask all IPI interrupts for Freescale OpenPIC */
1285 if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) ||
1286 (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) {
1287 if (i >= opp->irq_ipi0 && i < opp->irq_tim0) {
1288 write_IRQreg_idr(opp, i, 0);
1289 continue;
1293 write_IRQreg_idr(opp, i, opp->idr_reset);
1295 /* Initialise IRQ destinations */
1296 for (i = 0; i < opp->nb_cpus; i++) {
1297 opp->dst[i].ctpr = 15;
1298 opp->dst[i].raised.next = -1;
1299 opp->dst[i].raised.priority = 0;
1300 bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS);
1301 opp->dst[i].servicing.next = -1;
1302 opp->dst[i].servicing.priority = 0;
1303 bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS);
1305 /* Initialise timers */
1306 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1307 opp->timers[i].tccr = 0;
1308 opp->timers[i].tbcr = TBCR_CI;
1309 if (opp->timers[i].qemu_timer_active) {
1310 timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
1311 opp->timers[i].qemu_timer_active = false;
1314 /* Go out of RESET state */
1315 opp->gcr = 0;
1318 typedef struct MemReg {
1319 const char *name;
1320 MemoryRegionOps const *ops;
1321 hwaddr start_addr;
1322 ram_addr_t size;
1323 } MemReg;
1325 static void fsl_common_init(OpenPICState *opp)
1327 int i;
1328 int virq = OPENPIC_MAX_SRC;
1330 opp->vid = VID_REVISION_1_2;
1331 opp->vir = VIR_GENERIC;
1332 opp->vector_mask = 0xFFFF;
1333 opp->tfrr_reset = 0;
1334 opp->ivpr_reset = IVPR_MASK_MASK;
1335 opp->idr_reset = 1 << 0;
1336 opp->max_irq = OPENPIC_MAX_IRQ;
1338 opp->irq_ipi0 = virq;
1339 virq += OPENPIC_MAX_IPI;
1340 opp->irq_tim0 = virq;
1341 virq += OPENPIC_MAX_TMR;
1343 assert(virq <= OPENPIC_MAX_IRQ);
1345 opp->irq_msi = 224;
1347 msi_nonbroken = true;
1348 for (i = 0; i < opp->fsl->max_ext; i++) {
1349 opp->src[i].level = false;
1352 /* Internal interrupts, including message and MSI */
1353 for (i = 16; i < OPENPIC_MAX_SRC; i++) {
1354 opp->src[i].type = IRQ_TYPE_FSLINT;
1355 opp->src[i].level = true;
1358 /* timers and IPIs */
1359 for (i = OPENPIC_MAX_SRC; i < virq; i++) {
1360 opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1361 opp->src[i].level = false;
1364 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1365 opp->timers[i].n_IRQ = opp->irq_tim0 + i;
1366 opp->timers[i].qemu_timer_active = false;
1367 opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1368 &qemu_timer_cb,
1369 &opp->timers[i]);
1370 opp->timers[i].opp = opp;
1374 static void map_list(OpenPICState *opp, const MemReg *list, int *count)
1376 while (list->name) {
1377 assert(*count < ARRAY_SIZE(opp->sub_io_mem));
1379 memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops,
1380 opp, list->name, list->size);
1382 memory_region_add_subregion(&opp->mem, list->start_addr,
1383 &opp->sub_io_mem[*count]);
1385 (*count)++;
1386 list++;
1390 static const VMStateDescription vmstate_openpic_irq_queue = {
1391 .name = "openpic_irq_queue",
1392 .version_id = 0,
1393 .minimum_version_id = 0,
1394 .fields = (const VMStateField[]) {
1395 VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size),
1396 VMSTATE_INT32(next, IRQQueue),
1397 VMSTATE_INT32(priority, IRQQueue),
1398 VMSTATE_END_OF_LIST()
1402 static const VMStateDescription vmstate_openpic_irqdest = {
1403 .name = "openpic_irqdest",
1404 .version_id = 0,
1405 .minimum_version_id = 0,
1406 .fields = (const VMStateField[]) {
1407 VMSTATE_INT32(ctpr, IRQDest),
1408 VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue,
1409 IRQQueue),
1410 VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue,
1411 IRQQueue),
1412 VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB),
1413 VMSTATE_END_OF_LIST()
1417 static const VMStateDescription vmstate_openpic_irqsource = {
1418 .name = "openpic_irqsource",
1419 .version_id = 0,
1420 .minimum_version_id = 0,
1421 .fields = (const VMStateField[]) {
1422 VMSTATE_UINT32(ivpr, IRQSource),
1423 VMSTATE_UINT32(idr, IRQSource),
1424 VMSTATE_UINT32(destmask, IRQSource),
1425 VMSTATE_INT32(last_cpu, IRQSource),
1426 VMSTATE_INT32(pending, IRQSource),
1427 VMSTATE_END_OF_LIST()
1431 static const VMStateDescription vmstate_openpic_timer = {
1432 .name = "openpic_timer",
1433 .version_id = 0,
1434 .minimum_version_id = 0,
1435 .fields = (const VMStateField[]) {
1436 VMSTATE_UINT32(tccr, OpenPICTimer),
1437 VMSTATE_UINT32(tbcr, OpenPICTimer),
1438 VMSTATE_END_OF_LIST()
1442 static const VMStateDescription vmstate_openpic_msi = {
1443 .name = "openpic_msi",
1444 .version_id = 0,
1445 .minimum_version_id = 0,
1446 .fields = (const VMStateField[]) {
1447 VMSTATE_UINT32(msir, OpenPICMSI),
1448 VMSTATE_END_OF_LIST()
1452 static int openpic_post_load(void *opaque, int version_id)
1454 OpenPICState *opp = (OpenPICState *)opaque;
1455 int i;
1457 /* Update internal ivpr and idr variables */
1458 for (i = 0; i < opp->max_irq; i++) {
1459 write_IRQreg_idr(opp, i, opp->src[i].idr);
1460 write_IRQreg_ivpr(opp, i, opp->src[i].ivpr);
1463 return 0;
1466 static const VMStateDescription vmstate_openpic = {
1467 .name = "openpic",
1468 .version_id = 3,
1469 .minimum_version_id = 3,
1470 .post_load = openpic_post_load,
1471 .fields = (const VMStateField[]) {
1472 VMSTATE_UINT32(gcr, OpenPICState),
1473 VMSTATE_UINT32(vir, OpenPICState),
1474 VMSTATE_UINT32(pir, OpenPICState),
1475 VMSTATE_UINT32(spve, OpenPICState),
1476 VMSTATE_UINT32(tfrr, OpenPICState),
1477 VMSTATE_UINT32(max_irq, OpenPICState),
1478 VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0,
1479 vmstate_openpic_irqsource, IRQSource),
1480 VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL),
1481 VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0,
1482 vmstate_openpic_irqdest, IRQDest),
1483 VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0,
1484 vmstate_openpic_timer, OpenPICTimer),
1485 VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0,
1486 vmstate_openpic_msi, OpenPICMSI),
1487 VMSTATE_UINT32(irq_ipi0, OpenPICState),
1488 VMSTATE_UINT32(irq_tim0, OpenPICState),
1489 VMSTATE_UINT32(irq_msi, OpenPICState),
1490 VMSTATE_END_OF_LIST()
1494 static void openpic_init(Object *obj)
1496 OpenPICState *opp = OPENPIC(obj);
1498 memory_region_init(&opp->mem, obj, "openpic", 0x40000);
1501 static void openpic_realize(DeviceState *dev, Error **errp)
1503 SysBusDevice *d = SYS_BUS_DEVICE(dev);
1504 OpenPICState *opp = OPENPIC(dev);
1505 int i, j;
1506 int list_count = 0;
1507 static const MemReg list_le[] = {
1508 {"glb", &openpic_glb_ops_le,
1509 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1510 {"tmr", &openpic_tmr_ops_le,
1511 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1512 {"src", &openpic_src_ops_le,
1513 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1514 {"cpu", &openpic_cpu_ops_le,
1515 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1516 {NULL}
1518 static const MemReg list_be[] = {
1519 {"glb", &openpic_glb_ops_be,
1520 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1521 {"tmr", &openpic_tmr_ops_be,
1522 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1523 {"src", &openpic_src_ops_be,
1524 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1525 {"cpu", &openpic_cpu_ops_be,
1526 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1527 {NULL}
1529 static const MemReg list_fsl[] = {
1530 {"msi", &openpic_msi_ops_be,
1531 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
1532 {"summary", &openpic_summary_ops_be,
1533 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
1534 {NULL}
1537 if (opp->nb_cpus > MAX_CPU) {
1538 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
1539 TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
1540 (uint64_t)0, (uint64_t)MAX_CPU);
1541 return;
1544 switch (opp->model) {
1545 case OPENPIC_MODEL_FSL_MPIC_20:
1546 default:
1547 opp->fsl = &fsl_mpic_20;
1548 opp->brr1 = 0x00400200;
1549 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1550 opp->nb_irqs = 80;
1551 opp->mpic_mode_mask = GCR_MODE_MIXED;
1553 fsl_common_init(opp);
1554 map_list(opp, list_be, &list_count);
1555 map_list(opp, list_fsl, &list_count);
1557 break;
1559 case OPENPIC_MODEL_FSL_MPIC_42:
1560 opp->fsl = &fsl_mpic_42;
1561 opp->brr1 = 0x00400402;
1562 opp->flags |= OPENPIC_FLAG_ILR;
1563 opp->nb_irqs = 196;
1564 opp->mpic_mode_mask = GCR_MODE_PROXY;
1566 fsl_common_init(opp);
1567 map_list(opp, list_be, &list_count);
1568 map_list(opp, list_fsl, &list_count);
1570 break;
1572 case OPENPIC_MODEL_KEYLARGO:
1573 opp->nb_irqs = KEYLARGO_MAX_EXT;
1574 opp->vid = VID_REVISION_1_2;
1575 opp->vir = VIR_GENERIC;
1576 opp->vector_mask = 0xFF;
1577 opp->tfrr_reset = 4160000;
1578 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1579 opp->idr_reset = 0;
1580 opp->max_irq = KEYLARGO_MAX_IRQ;
1581 opp->irq_ipi0 = KEYLARGO_IPI_IRQ;
1582 opp->irq_tim0 = KEYLARGO_TMR_IRQ;
1583 opp->brr1 = -1;
1584 opp->mpic_mode_mask = GCR_MODE_MIXED;
1586 if (opp->nb_cpus != 1) {
1587 error_setg(errp, "Only UP supported today");
1588 return;
1591 map_list(opp, list_le, &list_count);
1592 break;
1595 for (i = 0; i < opp->nb_cpus; i++) {
1596 opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB);
1597 for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
1598 sysbus_init_irq(d, &opp->dst[i].irqs[j]);
1601 opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS;
1602 opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1603 opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS;
1604 opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1607 sysbus_init_mmio(d, &opp->mem);
1608 qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
1611 static Property openpic_properties[] = {
1612 DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
1613 DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
1614 DEFINE_PROP_END_OF_LIST(),
1617 static void openpic_class_init(ObjectClass *oc, void *data)
1619 DeviceClass *dc = DEVICE_CLASS(oc);
1621 dc->realize = openpic_realize;
1622 device_class_set_props(dc, openpic_properties);
1623 dc->reset = openpic_reset;
1624 dc->vmsd = &vmstate_openpic;
1625 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1628 static const TypeInfo openpic_info = {
1629 .name = TYPE_OPENPIC,
1630 .parent = TYPE_SYS_BUS_DEVICE,
1631 .instance_size = sizeof(OpenPICState),
1632 .instance_init = openpic_init,
1633 .class_init = openpic_class_init,
1636 static void openpic_register_types(void)
1638 type_register_static(&openpic_info);
1641 type_init(openpic_register_types)