specs/qcow2: Fix documentation of the compressed cluster descriptor
[qemu/ar7.git] / hw / intc / openpic.c
blob9159a06f07360e962703c9bd5664654471b3c5c1
1 /*
2 * OpenPIC emulation
4 * Copyright (c) 2004 Jocelyn Mayer
5 * 2011 Alexander Graf
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
27 * Based on OpenPic implementations:
28 * - Intel GW80314 I/O companion chip developer's manual
29 * - Motorola MPC8245 & MPC8540 user manuals.
30 * - Motorola MCP750 (aka Raven) programmer manual.
31 * - Motorola Harrier programmer manuel
33 * Serial interrupts, as implemented in Raven chipset are not supported yet.
36 #include "qemu/osdep.h"
37 #include "hw/hw.h"
38 #include "hw/ppc/mac.h"
39 #include "hw/pci/pci.h"
40 #include "hw/ppc/openpic.h"
41 #include "hw/ppc/ppc_e500.h"
42 #include "hw/sysbus.h"
43 #include "hw/pci/msi.h"
44 #include "qapi/error.h"
45 #include "qemu/bitops.h"
46 #include "qapi/qmp/qerror.h"
47 #include "qemu/log.h"
48 #include "qemu/timer.h"
49 #include "qemu/error-report.h"
51 //#define DEBUG_OPENPIC
53 #ifdef DEBUG_OPENPIC
54 static const int debug_openpic = 1;
55 #else
56 static const int debug_openpic = 0;
57 #endif
59 static int get_current_cpu(void);
60 #define DPRINTF(fmt, ...) do { \
61 if (debug_openpic) { \
62 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
63 } \
64 } while (0)
66 #define MAX_CPU 32
67 #define MAX_MSI 8
68 #define VID 0x03 /* MPIC version ID */
70 /* OpenPIC capability flags */
71 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
72 #define OPENPIC_FLAG_ILR (2 << 0)
74 /* OpenPIC address map */
75 #define OPENPIC_GLB_REG_START 0x0
76 #define OPENPIC_GLB_REG_SIZE 0x10F0
77 #define OPENPIC_TMR_REG_START 0x10F0
78 #define OPENPIC_TMR_REG_SIZE 0x220
79 #define OPENPIC_MSI_REG_START 0x1600
80 #define OPENPIC_MSI_REG_SIZE 0x200
81 #define OPENPIC_SUMMARY_REG_START 0x3800
82 #define OPENPIC_SUMMARY_REG_SIZE 0x800
83 #define OPENPIC_SRC_REG_START 0x10000
84 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
85 #define OPENPIC_CPU_REG_START 0x20000
86 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
88 /* Raven */
89 #define RAVEN_MAX_CPU 2
90 #define RAVEN_MAX_EXT 48
91 #define RAVEN_MAX_IRQ 64
92 #define RAVEN_MAX_TMR OPENPIC_MAX_TMR
93 #define RAVEN_MAX_IPI OPENPIC_MAX_IPI
95 /* KeyLargo */
96 #define KEYLARGO_MAX_CPU 4
97 #define KEYLARGO_MAX_EXT 64
98 #define KEYLARGO_MAX_IPI 4
99 #define KEYLARGO_MAX_IRQ (64 + KEYLARGO_MAX_IPI)
100 #define KEYLARGO_MAX_TMR 0
101 #define KEYLARGO_IPI_IRQ (KEYLARGO_MAX_EXT) /* First IPI IRQ */
102 /* Timers don't exist but this makes the code happy... */
103 #define KEYLARGO_TMR_IRQ (KEYLARGO_IPI_IRQ + KEYLARGO_MAX_IPI)
105 /* Interrupt definitions */
106 #define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */
107 #define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */
108 #define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */
109 #define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */
110 /* First doorbell IRQ */
111 #define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI))
113 typedef struct FslMpicInfo {
114 int max_ext;
115 } FslMpicInfo;
117 static FslMpicInfo fsl_mpic_20 = {
118 .max_ext = 12,
121 static FslMpicInfo fsl_mpic_42 = {
122 .max_ext = 12,
125 #define FRR_NIRQ_SHIFT 16
126 #define FRR_NCPU_SHIFT 8
127 #define FRR_VID_SHIFT 0
129 #define VID_REVISION_1_2 2
130 #define VID_REVISION_1_3 3
132 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
133 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
135 #define GCR_RESET 0x80000000
136 #define GCR_MODE_PASS 0x00000000
137 #define GCR_MODE_MIXED 0x20000000
138 #define GCR_MODE_PROXY 0x60000000
140 #define TBCR_CI 0x80000000 /* count inhibit */
141 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
143 #define IDR_EP_SHIFT 31
144 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
145 #define IDR_CI0_SHIFT 30
146 #define IDR_CI1_SHIFT 29
147 #define IDR_P1_SHIFT 1
148 #define IDR_P0_SHIFT 0
150 #define ILR_INTTGT_MASK 0x000000ff
151 #define ILR_INTTGT_INT 0x00
152 #define ILR_INTTGT_CINT 0x01 /* critical */
153 #define ILR_INTTGT_MCP 0x02 /* machine check */
155 /* The currently supported INTTGT values happen to be the same as QEMU's
156 * openpic output codes, but don't depend on this. The output codes
157 * could change (unlikely, but...) or support could be added for
158 * more INTTGT values.
160 static const int inttgt_output[][2] = {
161 { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT },
162 { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT },
163 { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK },
166 static int inttgt_to_output(int inttgt)
168 int i;
170 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
171 if (inttgt_output[i][0] == inttgt) {
172 return inttgt_output[i][1];
176 error_report("%s: unsupported inttgt %d", __func__, inttgt);
177 return OPENPIC_OUTPUT_INT;
180 static int output_to_inttgt(int output)
182 int i;
184 for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) {
185 if (inttgt_output[i][1] == output) {
186 return inttgt_output[i][0];
190 abort();
193 #define MSIIR_OFFSET 0x140
194 #define MSIIR_SRS_SHIFT 29
195 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
196 #define MSIIR_IBS_SHIFT 24
197 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
199 static int get_current_cpu(void)
201 if (!current_cpu) {
202 return -1;
205 return current_cpu->cpu_index;
208 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
209 int idx);
210 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
211 uint32_t val, int idx);
212 static void openpic_reset(DeviceState *d);
214 typedef enum IRQType {
215 IRQ_TYPE_NORMAL = 0,
216 IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
217 IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
218 } IRQType;
220 /* Round up to the nearest 64 IRQs so that the queue length
221 * won't change when moving between 32 and 64 bit hosts.
223 #define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63)
225 typedef struct IRQQueue {
226 unsigned long *queue;
227 int32_t queue_size; /* Only used for VMSTATE_BITMAP */
228 int next;
229 int priority;
230 } IRQQueue;
232 typedef struct IRQSource {
233 uint32_t ivpr; /* IRQ vector/priority register */
234 uint32_t idr; /* IRQ destination register */
235 uint32_t destmask; /* bitmap of CPU destinations */
236 int last_cpu;
237 int output; /* IRQ level, e.g. OPENPIC_OUTPUT_INT */
238 int pending; /* TRUE if IRQ is pending */
239 IRQType type;
240 bool level:1; /* level-triggered */
241 bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */
242 } IRQSource;
244 #define IVPR_MASK_SHIFT 31
245 #define IVPR_MASK_MASK (1U << IVPR_MASK_SHIFT)
246 #define IVPR_ACTIVITY_SHIFT 30
247 #define IVPR_ACTIVITY_MASK (1U << IVPR_ACTIVITY_SHIFT)
248 #define IVPR_MODE_SHIFT 29
249 #define IVPR_MODE_MASK (1U << IVPR_MODE_SHIFT)
250 #define IVPR_POLARITY_SHIFT 23
251 #define IVPR_POLARITY_MASK (1U << IVPR_POLARITY_SHIFT)
252 #define IVPR_SENSE_SHIFT 22
253 #define IVPR_SENSE_MASK (1U << IVPR_SENSE_SHIFT)
255 #define IVPR_PRIORITY_MASK (0xFU << 16)
256 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
257 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
259 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
260 #define IDR_EP 0x80000000 /* external pin */
261 #define IDR_CI 0x40000000 /* critical interrupt */
263 /* Convert between openpic clock ticks and nanosecs. In the hardware the clock
264 frequency is driven by board inputs to the PIC which the PIC would then
265 divide by 4 or 8. For now hard code to 25MZ.
267 #define OPENPIC_TIMER_FREQ_MHZ 25
268 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
269 static inline uint64_t ns_to_ticks(uint64_t ns)
271 return ns / OPENPIC_TIMER_NS_PER_TICK;
273 static inline uint64_t ticks_to_ns(uint64_t ticks)
275 return ticks * OPENPIC_TIMER_NS_PER_TICK;
278 typedef struct OpenPICTimer {
279 uint32_t tccr; /* Global timer current count register */
280 uint32_t tbcr; /* Global timer base count register */
281 int n_IRQ;
282 bool qemu_timer_active; /* Is the qemu_timer is running? */
283 struct QEMUTimer *qemu_timer;
284 struct OpenPICState *opp; /* Device timer is part of. */
285 /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
286 current_count written or read, only defined if qemu_timer_active. */
287 uint64_t origin_time;
288 } OpenPICTimer;
290 typedef struct OpenPICMSI {
291 uint32_t msir; /* Shared Message Signaled Interrupt Register */
292 } OpenPICMSI;
294 typedef struct IRQDest {
295 int32_t ctpr; /* CPU current task priority */
296 IRQQueue raised;
297 IRQQueue servicing;
298 qemu_irq *irqs;
300 /* Count of IRQ sources asserting on non-INT outputs */
301 uint32_t outputs_active[OPENPIC_OUTPUT_NB];
302 } IRQDest;
304 #define OPENPIC(obj) OBJECT_CHECK(OpenPICState, (obj), TYPE_OPENPIC)
306 typedef struct OpenPICState {
307 /*< private >*/
308 SysBusDevice parent_obj;
309 /*< public >*/
311 MemoryRegion mem;
313 /* Behavior control */
314 FslMpicInfo *fsl;
315 uint32_t model;
316 uint32_t flags;
317 uint32_t nb_irqs;
318 uint32_t vid;
319 uint32_t vir; /* Vendor identification register */
320 uint32_t vector_mask;
321 uint32_t tfrr_reset;
322 uint32_t ivpr_reset;
323 uint32_t idr_reset;
324 uint32_t brr1;
325 uint32_t mpic_mode_mask;
327 /* Sub-regions */
328 MemoryRegion sub_io_mem[6];
330 /* Global registers */
331 uint32_t frr; /* Feature reporting register */
332 uint32_t gcr; /* Global configuration register */
333 uint32_t pir; /* Processor initialization register */
334 uint32_t spve; /* Spurious vector register */
335 uint32_t tfrr; /* Timer frequency reporting register */
336 /* Source registers */
337 IRQSource src[OPENPIC_MAX_IRQ];
338 /* Local registers per output pin */
339 IRQDest dst[MAX_CPU];
340 uint32_t nb_cpus;
341 /* Timer registers */
342 OpenPICTimer timers[OPENPIC_MAX_TMR];
343 uint32_t max_tmr;
345 /* Shared MSI registers */
346 OpenPICMSI msi[MAX_MSI];
347 uint32_t max_irq;
348 uint32_t irq_ipi0;
349 uint32_t irq_tim0;
350 uint32_t irq_msi;
351 } OpenPICState;
353 static inline void IRQ_setbit(IRQQueue *q, int n_IRQ)
355 set_bit(n_IRQ, q->queue);
358 static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ)
360 clear_bit(n_IRQ, q->queue);
363 static void IRQ_check(OpenPICState *opp, IRQQueue *q)
365 int irq = -1;
366 int next = -1;
367 int priority = -1;
369 for (;;) {
370 irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
371 if (irq == opp->max_irq) {
372 break;
375 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
376 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
378 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
379 next = irq;
380 priority = IVPR_PRIORITY(opp->src[irq].ivpr);
384 q->next = next;
385 q->priority = priority;
388 static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
390 /* XXX: optimize */
391 IRQ_check(opp, q);
393 return q->next;
396 static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
397 bool active, bool was_active)
399 IRQDest *dst;
400 IRQSource *src;
401 int priority;
403 dst = &opp->dst[n_CPU];
404 src = &opp->src[n_IRQ];
406 DPRINTF("%s: IRQ %d active %d was %d",
407 __func__, n_IRQ, active, was_active);
409 if (src->output != OPENPIC_OUTPUT_INT) {
410 DPRINTF("%s: output %d irq %d active %d was %d count %d",
411 __func__, src->output, n_IRQ, active, was_active,
412 dst->outputs_active[src->output]);
414 /* On Freescale MPIC, critical interrupts ignore priority,
415 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
416 * masking.
418 if (active) {
419 if (!was_active && dst->outputs_active[src->output]++ == 0) {
420 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
421 __func__, src->output, n_CPU, n_IRQ);
422 qemu_irq_raise(dst->irqs[src->output]);
424 } else {
425 if (was_active && --dst->outputs_active[src->output] == 0) {
426 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
427 __func__, src->output, n_CPU, n_IRQ);
428 qemu_irq_lower(dst->irqs[src->output]);
432 return;
435 priority = IVPR_PRIORITY(src->ivpr);
437 /* Even if the interrupt doesn't have enough priority,
438 * it is still raised, in case ctpr is lowered later.
440 if (active) {
441 IRQ_setbit(&dst->raised, n_IRQ);
442 } else {
443 IRQ_resetbit(&dst->raised, n_IRQ);
446 IRQ_check(opp, &dst->raised);
448 if (active && priority <= dst->ctpr) {
449 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
450 __func__, n_IRQ, priority, dst->ctpr, n_CPU);
451 active = 0;
454 if (active) {
455 if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
456 priority <= dst->servicing.priority) {
457 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
458 __func__, n_IRQ, dst->servicing.next, n_CPU);
459 } else {
460 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
461 __func__, n_CPU, n_IRQ, dst->raised.next);
462 qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
464 } else {
465 IRQ_get_next(opp, &dst->servicing);
466 if (dst->raised.priority > dst->ctpr &&
467 dst->raised.priority > dst->servicing.priority) {
468 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
469 __func__, n_IRQ, dst->raised.next, dst->raised.priority,
470 dst->ctpr, dst->servicing.priority, n_CPU);
471 /* IRQ line stays asserted */
472 } else {
473 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
474 __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
475 qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
480 /* update pic state because registers for n_IRQ have changed value */
481 static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
483 IRQSource *src;
484 bool active, was_active;
485 int i;
487 src = &opp->src[n_IRQ];
488 active = src->pending;
490 if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
491 /* Interrupt source is disabled */
492 DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
493 active = false;
496 was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
499 * We don't have a similar check for already-active because
500 * ctpr may have changed and we need to withdraw the interrupt.
502 if (!active && !was_active) {
503 DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
504 return;
507 if (active) {
508 src->ivpr |= IVPR_ACTIVITY_MASK;
509 } else {
510 src->ivpr &= ~IVPR_ACTIVITY_MASK;
513 if (src->destmask == 0) {
514 /* No target */
515 DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
516 return;
519 if (src->destmask == (1 << src->last_cpu)) {
520 /* Only one CPU is allowed to receive this IRQ */
521 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
522 } else if (!(src->ivpr & IVPR_MODE_MASK)) {
523 /* Directed delivery mode */
524 for (i = 0; i < opp->nb_cpus; i++) {
525 if (src->destmask & (1 << i)) {
526 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
529 } else {
530 /* Distributed delivery mode */
531 for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
532 if (i == opp->nb_cpus) {
533 i = 0;
535 if (src->destmask & (1 << i)) {
536 IRQ_local_pipe(opp, i, n_IRQ, active, was_active);
537 src->last_cpu = i;
538 break;
544 static void openpic_set_irq(void *opaque, int n_IRQ, int level)
546 OpenPICState *opp = opaque;
547 IRQSource *src;
549 if (n_IRQ >= OPENPIC_MAX_IRQ) {
550 error_report("%s: IRQ %d out of range", __func__, n_IRQ);
551 abort();
554 src = &opp->src[n_IRQ];
555 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
556 n_IRQ, level, src->ivpr);
557 if (src->level) {
558 /* level-sensitive irq */
559 src->pending = level;
560 openpic_update_irq(opp, n_IRQ);
561 } else {
562 /* edge-sensitive irq */
563 if (level) {
564 src->pending = 1;
565 openpic_update_irq(opp, n_IRQ);
568 if (src->output != OPENPIC_OUTPUT_INT) {
569 /* Edge-triggered interrupts shouldn't be used
570 * with non-INT delivery, but just in case,
571 * try to make it do something sane rather than
572 * cause an interrupt storm. This is close to
573 * what you'd probably see happen in real hardware.
575 src->pending = 0;
576 openpic_update_irq(opp, n_IRQ);
581 static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ)
583 return opp->src[n_IRQ].idr;
586 static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ)
588 if (opp->flags & OPENPIC_FLAG_ILR) {
589 return output_to_inttgt(opp->src[n_IRQ].output);
592 return 0xffffffff;
595 static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ)
597 return opp->src[n_IRQ].ivpr;
600 static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
602 IRQSource *src = &opp->src[n_IRQ];
603 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
604 uint32_t crit_mask = 0;
605 uint32_t mask = normal_mask;
606 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
607 int i;
609 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
610 crit_mask = mask << crit_shift;
611 mask |= crit_mask | IDR_EP;
614 src->idr = val & mask;
615 DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
617 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
618 if (src->idr & crit_mask) {
619 if (src->idr & normal_mask) {
620 DPRINTF("%s: IRQ configured for multiple output types, using "
621 "critical", __func__);
624 src->output = OPENPIC_OUTPUT_CINT;
625 src->nomask = true;
626 src->destmask = 0;
628 for (i = 0; i < opp->nb_cpus; i++) {
629 int n_ci = IDR_CI0_SHIFT - i;
631 if (src->idr & (1UL << n_ci)) {
632 src->destmask |= 1UL << i;
635 } else {
636 src->output = OPENPIC_OUTPUT_INT;
637 src->nomask = false;
638 src->destmask = src->idr & normal_mask;
640 } else {
641 src->destmask = src->idr;
645 static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
647 if (opp->flags & OPENPIC_FLAG_ILR) {
648 IRQSource *src = &opp->src[n_IRQ];
650 src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
651 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
652 src->output);
654 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
658 static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
660 uint32_t mask;
662 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
663 * the polarity bit is read-only on internal interrupts.
665 mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
666 IVPR_POLARITY_MASK | opp->vector_mask;
668 /* ACTIVITY bit is read-only */
669 opp->src[n_IRQ].ivpr =
670 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
672 /* For FSL internal interrupts, The sense bit is reserved and zero,
673 * and the interrupt is always level-triggered. Timers and IPIs
674 * have no sense or polarity bits, and are edge-triggered.
676 switch (opp->src[n_IRQ].type) {
677 case IRQ_TYPE_NORMAL:
678 opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
679 break;
681 case IRQ_TYPE_FSLINT:
682 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
683 break;
685 case IRQ_TYPE_FSLSPECIAL:
686 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
687 break;
690 openpic_update_irq(opp, n_IRQ);
691 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
692 opp->src[n_IRQ].ivpr);
695 static void openpic_gcr_write(OpenPICState *opp, uint64_t val)
697 bool mpic_proxy = false;
699 if (val & GCR_RESET) {
700 openpic_reset(DEVICE(opp));
701 return;
704 opp->gcr &= ~opp->mpic_mode_mask;
705 opp->gcr |= val & opp->mpic_mode_mask;
707 /* Set external proxy mode */
708 if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) {
709 mpic_proxy = true;
712 ppce500_set_mpic_proxy(mpic_proxy);
715 static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
716 unsigned len)
718 OpenPICState *opp = opaque;
719 IRQDest *dst;
720 int idx;
722 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
723 __func__, addr, val);
724 if (addr & 0xF) {
725 return;
727 switch (addr) {
728 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
729 break;
730 case 0x40:
731 case 0x50:
732 case 0x60:
733 case 0x70:
734 case 0x80:
735 case 0x90:
736 case 0xA0:
737 case 0xB0:
738 openpic_cpu_write_internal(opp, addr, val, get_current_cpu());
739 break;
740 case 0x1000: /* FRR */
741 break;
742 case 0x1020: /* GCR */
743 openpic_gcr_write(opp, val);
744 break;
745 case 0x1080: /* VIR */
746 break;
747 case 0x1090: /* PIR */
748 for (idx = 0; idx < opp->nb_cpus; idx++) {
749 if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
750 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
751 dst = &opp->dst[idx];
752 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
753 } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
754 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
755 dst = &opp->dst[idx];
756 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
759 opp->pir = val;
760 break;
761 case 0x10A0: /* IPI_IVPR */
762 case 0x10B0:
763 case 0x10C0:
764 case 0x10D0:
766 int idx;
767 idx = (addr - 0x10A0) >> 4;
768 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
770 break;
771 case 0x10E0: /* SPVE */
772 opp->spve = val & opp->vector_mask;
773 break;
774 default:
775 break;
779 static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
781 OpenPICState *opp = opaque;
782 uint32_t retval;
784 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
785 retval = 0xFFFFFFFF;
786 if (addr & 0xF) {
787 return retval;
789 switch (addr) {
790 case 0x1000: /* FRR */
791 retval = opp->frr;
792 break;
793 case 0x1020: /* GCR */
794 retval = opp->gcr;
795 break;
796 case 0x1080: /* VIR */
797 retval = opp->vir;
798 break;
799 case 0x1090: /* PIR */
800 retval = 0x00000000;
801 break;
802 case 0x00: /* Block Revision Register1 (BRR1) */
803 retval = opp->brr1;
804 break;
805 case 0x40:
806 case 0x50:
807 case 0x60:
808 case 0x70:
809 case 0x80:
810 case 0x90:
811 case 0xA0:
812 case 0xB0:
813 retval = openpic_cpu_read_internal(opp, addr, get_current_cpu());
814 break;
815 case 0x10A0: /* IPI_IVPR */
816 case 0x10B0:
817 case 0x10C0:
818 case 0x10D0:
820 int idx;
821 idx = (addr - 0x10A0) >> 4;
822 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
824 break;
825 case 0x10E0: /* SPVE */
826 retval = opp->spve;
827 break;
828 default:
829 break;
831 DPRINTF("%s: => 0x%08x", __func__, retval);
833 return retval;
836 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
838 static void qemu_timer_cb(void *opaque)
840 OpenPICTimer *tmr = opaque;
841 OpenPICState *opp = tmr->opp;
842 uint32_t n_IRQ = tmr->n_IRQ;
843 uint32_t val = tmr->tbcr & ~TBCR_CI;
844 uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
846 DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
847 /* Reload current count from base count and setup timer. */
848 tmr->tccr = val | tog;
849 openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
850 /* Raise the interrupt. */
851 opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
852 openpic_set_irq(opp, n_IRQ, 1);
853 openpic_set_irq(opp, n_IRQ, 0);
856 /* If enabled is true, arranges for an interrupt to be raised val clocks into
857 the future, if enabled is false cancels the timer. */
858 static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
860 uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
861 /* A count of zero causes a timer to be set to expire immediately. This
862 effectively stops the simulation since the timer is constantly expiring
863 which prevents guest code execution, so we don't honor that
864 configuration. On real hardware, this situation would generate an
865 interrupt on every clock cycle if the interrupt was unmasked. */
866 if ((ns == 0) || !enabled) {
867 tmr->qemu_timer_active = false;
868 tmr->tccr = tmr->tccr & TCCR_TOG;
869 timer_del(tmr->qemu_timer); /* set timer to never expire. */
870 } else {
871 tmr->qemu_timer_active = true;
872 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
873 tmr->origin_time = now;
874 timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
878 /* Returns the currrent tccr value, i.e., timer value (in clocks) with
879 appropriate TOG. */
880 static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
882 uint64_t retval;
883 if (!tmr->qemu_timer_active) {
884 retval = tmr->tccr;
885 } else {
886 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
887 uint64_t used = now - tmr->origin_time; /* nsecs */
888 uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
889 uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
890 retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
892 return retval;
895 static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
896 unsigned len)
898 OpenPICState *opp = opaque;
899 int idx;
901 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
902 __func__, (addr + 0x10f0), val);
903 if (addr & 0xF) {
904 return;
907 if (addr == 0) {
908 /* TFRR */
909 opp->tfrr = val;
910 return;
912 addr -= 0x10; /* correct for TFRR */
913 idx = (addr >> 6) & 0x3;
915 switch (addr & 0x30) {
916 case 0x00: /* TCCR */
917 break;
918 case 0x10: /* TBCR */
919 /* Did the enable status change? */
920 if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
921 /* Did "Count Inhibit" transition from 1 to 0? */
922 if ((val & TBCR_CI) == 0) {
923 opp->timers[idx].tccr = val & ~TCCR_TOG;
925 openpic_tmr_set_tmr(&opp->timers[idx],
926 (val & ~TBCR_CI),
927 /*enabled=*/((val & TBCR_CI) == 0));
929 opp->timers[idx].tbcr = val;
930 break;
931 case 0x20: /* TVPR */
932 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
933 break;
934 case 0x30: /* TDR */
935 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
936 break;
940 static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
942 OpenPICState *opp = opaque;
943 uint32_t retval = -1;
944 int idx;
946 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
947 if (addr & 0xF) {
948 goto out;
950 if (addr == 0) {
951 /* TFRR */
952 retval = opp->tfrr;
953 goto out;
955 addr -= 0x10; /* correct for TFRR */
956 idx = (addr >> 6) & 0x3;
957 switch (addr & 0x30) {
958 case 0x00: /* TCCR */
959 retval = openpic_tmr_get_timer(&opp->timers[idx]);
960 break;
961 case 0x10: /* TBCR */
962 retval = opp->timers[idx].tbcr;
963 break;
964 case 0x20: /* TVPR */
965 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
966 break;
967 case 0x30: /* TDR */
968 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
969 break;
972 out:
973 DPRINTF("%s: => 0x%08x", __func__, retval);
975 return retval;
978 static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
979 unsigned len)
981 OpenPICState *opp = opaque;
982 int idx;
984 DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
985 __func__, addr, val);
987 addr = addr & 0xffff;
988 idx = addr >> 5;
990 switch (addr & 0x1f) {
991 case 0x00:
992 write_IRQreg_ivpr(opp, idx, val);
993 break;
994 case 0x10:
995 write_IRQreg_idr(opp, idx, val);
996 break;
997 case 0x18:
998 write_IRQreg_ilr(opp, idx, val);
999 break;
1003 static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
1005 OpenPICState *opp = opaque;
1006 uint32_t retval;
1007 int idx;
1009 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
1010 retval = 0xFFFFFFFF;
1012 addr = addr & 0xffff;
1013 idx = addr >> 5;
1015 switch (addr & 0x1f) {
1016 case 0x00:
1017 retval = read_IRQreg_ivpr(opp, idx);
1018 break;
1019 case 0x10:
1020 retval = read_IRQreg_idr(opp, idx);
1021 break;
1022 case 0x18:
1023 retval = read_IRQreg_ilr(opp, idx);
1024 break;
1027 DPRINTF("%s: => 0x%08x", __func__, retval);
1028 return retval;
1031 static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
1032 unsigned size)
1034 OpenPICState *opp = opaque;
1035 int idx = opp->irq_msi;
1036 int srs, ibs;
1038 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
1039 __func__, addr, val);
1040 if (addr & 0xF) {
1041 return;
1044 switch (addr) {
1045 case MSIIR_OFFSET:
1046 srs = val >> MSIIR_SRS_SHIFT;
1047 idx += srs;
1048 ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
1049 opp->msi[srs].msir |= 1 << ibs;
1050 openpic_set_irq(opp, idx, 1);
1051 break;
1052 default:
1053 /* most registers are read-only, thus ignored */
1054 break;
1058 static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
1060 OpenPICState *opp = opaque;
1061 uint64_t r = 0;
1062 int i, srs;
1064 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
1065 if (addr & 0xF) {
1066 return -1;
1069 srs = addr >> 4;
1071 switch (addr) {
1072 case 0x00:
1073 case 0x10:
1074 case 0x20:
1075 case 0x30:
1076 case 0x40:
1077 case 0x50:
1078 case 0x60:
1079 case 0x70: /* MSIRs */
1080 r = opp->msi[srs].msir;
1081 /* Clear on read */
1082 opp->msi[srs].msir = 0;
1083 openpic_set_irq(opp, opp->irq_msi + srs, 0);
1084 break;
1085 case 0x120: /* MSISR */
1086 for (i = 0; i < MAX_MSI; i++) {
1087 r |= (opp->msi[i].msir ? 1 : 0) << i;
1089 break;
1092 return r;
1095 static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
1097 uint64_t r = 0;
1099 DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
1101 /* TODO: EISR/EIMR */
1103 return r;
1106 static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
1107 unsigned size)
1109 DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
1110 __func__, addr, val);
1112 /* TODO: EISR/EIMR */
1115 static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
1116 uint32_t val, int idx)
1118 OpenPICState *opp = opaque;
1119 IRQSource *src;
1120 IRQDest *dst;
1121 int s_IRQ, n_IRQ;
1123 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
1124 addr, val);
1126 if (idx < 0 || idx >= opp->nb_cpus) {
1127 return;
1130 if (addr & 0xF) {
1131 return;
1133 dst = &opp->dst[idx];
1134 addr &= 0xFF0;
1135 switch (addr) {
1136 case 0x40: /* IPIDR */
1137 case 0x50:
1138 case 0x60:
1139 case 0x70:
1140 idx = (addr - 0x40) >> 4;
1141 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1142 opp->src[opp->irq_ipi0 + idx].destmask |= val;
1143 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
1144 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
1145 break;
1146 case 0x80: /* CTPR */
1147 dst->ctpr = val & 0x0000000F;
1149 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
1150 __func__, idx, dst->ctpr, dst->raised.priority,
1151 dst->servicing.priority);
1153 if (dst->raised.priority <= dst->ctpr) {
1154 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
1155 __func__, idx);
1156 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1157 } else if (dst->raised.priority > dst->servicing.priority) {
1158 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1159 __func__, idx, dst->raised.next);
1160 qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
1163 break;
1164 case 0x90: /* WHOAMI */
1165 /* Read-only register */
1166 break;
1167 case 0xA0: /* IACK */
1168 /* Read-only register */
1169 break;
1170 case 0xB0: /* EOI */
1171 DPRINTF("EOI");
1172 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1174 if (s_IRQ < 0) {
1175 DPRINTF("%s: EOI with no interrupt in service", __func__);
1176 break;
1179 IRQ_resetbit(&dst->servicing, s_IRQ);
1180 /* Set up next servicing IRQ */
1181 s_IRQ = IRQ_get_next(opp, &dst->servicing);
1182 /* Check queued interrupts. */
1183 n_IRQ = IRQ_get_next(opp, &dst->raised);
1184 src = &opp->src[n_IRQ];
1185 if (n_IRQ != -1 &&
1186 (s_IRQ == -1 ||
1187 IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
1188 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1189 idx, n_IRQ);
1190 qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
1192 break;
1193 default:
1194 break;
1198 static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val,
1199 unsigned len)
1201 openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12);
1205 static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
1207 IRQSource *src;
1208 int retval, irq;
1210 DPRINTF("Lower OpenPIC INT output");
1211 qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
1213 irq = IRQ_get_next(opp, &dst->raised);
1214 DPRINTF("IACK: irq=%d", irq);
1216 if (irq == -1) {
1217 /* No more interrupt pending */
1218 return opp->spve;
1221 src = &opp->src[irq];
1222 if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
1223 !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
1224 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1225 __func__, irq, dst->ctpr, src->ivpr);
1226 openpic_update_irq(opp, irq);
1227 retval = opp->spve;
1228 } else {
1229 /* IRQ enter servicing state */
1230 IRQ_setbit(&dst->servicing, irq);
1231 retval = IVPR_VECTOR(opp, src->ivpr);
1234 if (!src->level) {
1235 /* edge-sensitive IRQ */
1236 src->ivpr &= ~IVPR_ACTIVITY_MASK;
1237 src->pending = 0;
1238 IRQ_resetbit(&dst->raised, irq);
1241 /* Timers and IPIs support multicast. */
1242 if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
1243 ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
1244 DPRINTF("irq is IPI or TMR");
1245 src->destmask &= ~(1 << cpu);
1246 if (src->destmask && !src->level) {
1247 /* trigger on CPUs that didn't know about it yet */
1248 openpic_set_irq(opp, irq, 1);
1249 openpic_set_irq(opp, irq, 0);
1250 /* if all CPUs knew about it, set active bit again */
1251 src->ivpr |= IVPR_ACTIVITY_MASK;
1255 return retval;
1258 static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
1259 int idx)
1261 OpenPICState *opp = opaque;
1262 IRQDest *dst;
1263 uint32_t retval;
1265 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
1266 retval = 0xFFFFFFFF;
1268 if (idx < 0 || idx >= opp->nb_cpus) {
1269 return retval;
1272 if (addr & 0xF) {
1273 return retval;
1275 dst = &opp->dst[idx];
1276 addr &= 0xFF0;
1277 switch (addr) {
1278 case 0x80: /* CTPR */
1279 retval = dst->ctpr;
1280 break;
1281 case 0x90: /* WHOAMI */
1282 retval = idx;
1283 break;
1284 case 0xA0: /* IACK */
1285 retval = openpic_iack(opp, dst, idx);
1286 break;
1287 case 0xB0: /* EOI */
1288 retval = 0;
1289 break;
1290 default:
1291 break;
1293 DPRINTF("%s: => 0x%08x", __func__, retval);
1295 return retval;
1298 static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len)
1300 return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12);
1303 static const MemoryRegionOps openpic_glb_ops_le = {
1304 .write = openpic_gbl_write,
1305 .read = openpic_gbl_read,
1306 .endianness = DEVICE_LITTLE_ENDIAN,
1307 .impl = {
1308 .min_access_size = 4,
1309 .max_access_size = 4,
1313 static const MemoryRegionOps openpic_glb_ops_be = {
1314 .write = openpic_gbl_write,
1315 .read = openpic_gbl_read,
1316 .endianness = DEVICE_BIG_ENDIAN,
1317 .impl = {
1318 .min_access_size = 4,
1319 .max_access_size = 4,
1323 static const MemoryRegionOps openpic_tmr_ops_le = {
1324 .write = openpic_tmr_write,
1325 .read = openpic_tmr_read,
1326 .endianness = DEVICE_LITTLE_ENDIAN,
1327 .impl = {
1328 .min_access_size = 4,
1329 .max_access_size = 4,
1333 static const MemoryRegionOps openpic_tmr_ops_be = {
1334 .write = openpic_tmr_write,
1335 .read = openpic_tmr_read,
1336 .endianness = DEVICE_BIG_ENDIAN,
1337 .impl = {
1338 .min_access_size = 4,
1339 .max_access_size = 4,
1343 static const MemoryRegionOps openpic_cpu_ops_le = {
1344 .write = openpic_cpu_write,
1345 .read = openpic_cpu_read,
1346 .endianness = DEVICE_LITTLE_ENDIAN,
1347 .impl = {
1348 .min_access_size = 4,
1349 .max_access_size = 4,
1353 static const MemoryRegionOps openpic_cpu_ops_be = {
1354 .write = openpic_cpu_write,
1355 .read = openpic_cpu_read,
1356 .endianness = DEVICE_BIG_ENDIAN,
1357 .impl = {
1358 .min_access_size = 4,
1359 .max_access_size = 4,
1363 static const MemoryRegionOps openpic_src_ops_le = {
1364 .write = openpic_src_write,
1365 .read = openpic_src_read,
1366 .endianness = DEVICE_LITTLE_ENDIAN,
1367 .impl = {
1368 .min_access_size = 4,
1369 .max_access_size = 4,
1373 static const MemoryRegionOps openpic_src_ops_be = {
1374 .write = openpic_src_write,
1375 .read = openpic_src_read,
1376 .endianness = DEVICE_BIG_ENDIAN,
1377 .impl = {
1378 .min_access_size = 4,
1379 .max_access_size = 4,
1383 static const MemoryRegionOps openpic_msi_ops_be = {
1384 .read = openpic_msi_read,
1385 .write = openpic_msi_write,
1386 .endianness = DEVICE_BIG_ENDIAN,
1387 .impl = {
1388 .min_access_size = 4,
1389 .max_access_size = 4,
1393 static const MemoryRegionOps openpic_summary_ops_be = {
1394 .read = openpic_summary_read,
1395 .write = openpic_summary_write,
1396 .endianness = DEVICE_BIG_ENDIAN,
1397 .impl = {
1398 .min_access_size = 4,
1399 .max_access_size = 4,
1403 static void openpic_reset(DeviceState *d)
1405 OpenPICState *opp = OPENPIC(d);
1406 int i;
1408 opp->gcr = GCR_RESET;
1409 /* Initialise controller registers */
1410 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
1411 ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) |
1412 (opp->vid << FRR_VID_SHIFT);
1414 opp->pir = 0;
1415 opp->spve = -1 & opp->vector_mask;
1416 opp->tfrr = opp->tfrr_reset;
1417 /* Initialise IRQ sources */
1418 for (i = 0; i < opp->max_irq; i++) {
1419 opp->src[i].ivpr = opp->ivpr_reset;
1420 switch (opp->src[i].type) {
1421 case IRQ_TYPE_NORMAL:
1422 opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK);
1423 break;
1425 case IRQ_TYPE_FSLINT:
1426 opp->src[i].ivpr |= IVPR_POLARITY_MASK;
1427 break;
1429 case IRQ_TYPE_FSLSPECIAL:
1430 break;
1433 write_IRQreg_idr(opp, i, opp->idr_reset);
1435 /* Initialise IRQ destinations */
1436 for (i = 0; i < opp->nb_cpus; i++) {
1437 opp->dst[i].ctpr = 15;
1438 opp->dst[i].raised.next = -1;
1439 opp->dst[i].raised.priority = 0;
1440 bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS);
1441 opp->dst[i].servicing.next = -1;
1442 opp->dst[i].servicing.priority = 0;
1443 bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS);
1445 /* Initialise timers */
1446 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1447 opp->timers[i].tccr = 0;
1448 opp->timers[i].tbcr = TBCR_CI;
1449 if (opp->timers[i].qemu_timer_active) {
1450 timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
1451 opp->timers[i].qemu_timer_active = false;
1454 /* Go out of RESET state */
1455 opp->gcr = 0;
1458 typedef struct MemReg {
1459 const char *name;
1460 MemoryRegionOps const *ops;
1461 hwaddr start_addr;
1462 ram_addr_t size;
1463 } MemReg;
1465 static void fsl_common_init(OpenPICState *opp)
1467 int i;
1468 int virq = OPENPIC_MAX_SRC;
1470 opp->vid = VID_REVISION_1_2;
1471 opp->vir = VIR_GENERIC;
1472 opp->vector_mask = 0xFFFF;
1473 opp->tfrr_reset = 0;
1474 opp->ivpr_reset = IVPR_MASK_MASK;
1475 opp->idr_reset = 1 << 0;
1476 opp->max_irq = OPENPIC_MAX_IRQ;
1478 opp->irq_ipi0 = virq;
1479 virq += OPENPIC_MAX_IPI;
1480 opp->irq_tim0 = virq;
1481 virq += OPENPIC_MAX_TMR;
1483 assert(virq <= OPENPIC_MAX_IRQ);
1485 opp->irq_msi = 224;
1487 msi_nonbroken = true;
1488 for (i = 0; i < opp->fsl->max_ext; i++) {
1489 opp->src[i].level = false;
1492 /* Internal interrupts, including message and MSI */
1493 for (i = 16; i < OPENPIC_MAX_SRC; i++) {
1494 opp->src[i].type = IRQ_TYPE_FSLINT;
1495 opp->src[i].level = true;
1498 /* timers and IPIs */
1499 for (i = OPENPIC_MAX_SRC; i < virq; i++) {
1500 opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
1501 opp->src[i].level = false;
1504 for (i = 0; i < OPENPIC_MAX_TMR; i++) {
1505 opp->timers[i].n_IRQ = opp->irq_tim0 + i;
1506 opp->timers[i].qemu_timer_active = false;
1507 opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1508 &qemu_timer_cb,
1509 &opp->timers[i]);
1510 opp->timers[i].opp = opp;
1514 static void map_list(OpenPICState *opp, const MemReg *list, int *count)
1516 while (list->name) {
1517 assert(*count < ARRAY_SIZE(opp->sub_io_mem));
1519 memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops,
1520 opp, list->name, list->size);
1522 memory_region_add_subregion(&opp->mem, list->start_addr,
1523 &opp->sub_io_mem[*count]);
1525 (*count)++;
1526 list++;
1530 static const VMStateDescription vmstate_openpic_irq_queue = {
1531 .name = "openpic_irq_queue",
1532 .version_id = 0,
1533 .minimum_version_id = 0,
1534 .fields = (VMStateField[]) {
1535 VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size),
1536 VMSTATE_INT32(next, IRQQueue),
1537 VMSTATE_INT32(priority, IRQQueue),
1538 VMSTATE_END_OF_LIST()
1542 static const VMStateDescription vmstate_openpic_irqdest = {
1543 .name = "openpic_irqdest",
1544 .version_id = 0,
1545 .minimum_version_id = 0,
1546 .fields = (VMStateField[]) {
1547 VMSTATE_INT32(ctpr, IRQDest),
1548 VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue,
1549 IRQQueue),
1550 VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue,
1551 IRQQueue),
1552 VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB),
1553 VMSTATE_END_OF_LIST()
1557 static const VMStateDescription vmstate_openpic_irqsource = {
1558 .name = "openpic_irqsource",
1559 .version_id = 0,
1560 .minimum_version_id = 0,
1561 .fields = (VMStateField[]) {
1562 VMSTATE_UINT32(ivpr, IRQSource),
1563 VMSTATE_UINT32(idr, IRQSource),
1564 VMSTATE_UINT32(destmask, IRQSource),
1565 VMSTATE_INT32(last_cpu, IRQSource),
1566 VMSTATE_INT32(pending, IRQSource),
1567 VMSTATE_END_OF_LIST()
1571 static const VMStateDescription vmstate_openpic_timer = {
1572 .name = "openpic_timer",
1573 .version_id = 0,
1574 .minimum_version_id = 0,
1575 .fields = (VMStateField[]) {
1576 VMSTATE_UINT32(tccr, OpenPICTimer),
1577 VMSTATE_UINT32(tbcr, OpenPICTimer),
1578 VMSTATE_END_OF_LIST()
1582 static const VMStateDescription vmstate_openpic_msi = {
1583 .name = "openpic_msi",
1584 .version_id = 0,
1585 .minimum_version_id = 0,
1586 .fields = (VMStateField[]) {
1587 VMSTATE_UINT32(msir, OpenPICMSI),
1588 VMSTATE_END_OF_LIST()
1592 static int openpic_post_load(void *opaque, int version_id)
1594 OpenPICState *opp = (OpenPICState *)opaque;
1595 int i;
1597 /* Update internal ivpr and idr variables */
1598 for (i = 0; i < opp->max_irq; i++) {
1599 write_IRQreg_idr(opp, i, opp->src[i].idr);
1600 write_IRQreg_ivpr(opp, i, opp->src[i].ivpr);
1603 return 0;
1606 static const VMStateDescription vmstate_openpic = {
1607 .name = "openpic",
1608 .version_id = 3,
1609 .minimum_version_id = 3,
1610 .post_load = openpic_post_load,
1611 .fields = (VMStateField[]) {
1612 VMSTATE_UINT32(gcr, OpenPICState),
1613 VMSTATE_UINT32(vir, OpenPICState),
1614 VMSTATE_UINT32(pir, OpenPICState),
1615 VMSTATE_UINT32(spve, OpenPICState),
1616 VMSTATE_UINT32(tfrr, OpenPICState),
1617 VMSTATE_UINT32(max_irq, OpenPICState),
1618 VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0,
1619 vmstate_openpic_irqsource, IRQSource),
1620 VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL),
1621 VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0,
1622 vmstate_openpic_irqdest, IRQDest),
1623 VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0,
1624 vmstate_openpic_timer, OpenPICTimer),
1625 VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0,
1626 vmstate_openpic_msi, OpenPICMSI),
1627 VMSTATE_UINT32(irq_ipi0, OpenPICState),
1628 VMSTATE_UINT32(irq_tim0, OpenPICState),
1629 VMSTATE_UINT32(irq_msi, OpenPICState),
1630 VMSTATE_END_OF_LIST()
1634 static void openpic_init(Object *obj)
1636 OpenPICState *opp = OPENPIC(obj);
1638 memory_region_init(&opp->mem, obj, "openpic", 0x40000);
1641 static void openpic_realize(DeviceState *dev, Error **errp)
1643 SysBusDevice *d = SYS_BUS_DEVICE(dev);
1644 OpenPICState *opp = OPENPIC(dev);
1645 int i, j;
1646 int list_count = 0;
1647 static const MemReg list_le[] = {
1648 {"glb", &openpic_glb_ops_le,
1649 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1650 {"tmr", &openpic_tmr_ops_le,
1651 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1652 {"src", &openpic_src_ops_le,
1653 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1654 {"cpu", &openpic_cpu_ops_le,
1655 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1656 {NULL}
1658 static const MemReg list_be[] = {
1659 {"glb", &openpic_glb_ops_be,
1660 OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE},
1661 {"tmr", &openpic_tmr_ops_be,
1662 OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE},
1663 {"src", &openpic_src_ops_be,
1664 OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE},
1665 {"cpu", &openpic_cpu_ops_be,
1666 OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE},
1667 {NULL}
1669 static const MemReg list_fsl[] = {
1670 {"msi", &openpic_msi_ops_be,
1671 OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE},
1672 {"summary", &openpic_summary_ops_be,
1673 OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE},
1674 {NULL}
1677 if (opp->nb_cpus > MAX_CPU) {
1678 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
1679 TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
1680 (uint64_t)0, (uint64_t)MAX_CPU);
1681 return;
1684 switch (opp->model) {
1685 case OPENPIC_MODEL_FSL_MPIC_20:
1686 default:
1687 opp->fsl = &fsl_mpic_20;
1688 opp->brr1 = 0x00400200;
1689 opp->flags |= OPENPIC_FLAG_IDR_CRIT;
1690 opp->nb_irqs = 80;
1691 opp->mpic_mode_mask = GCR_MODE_MIXED;
1693 fsl_common_init(opp);
1694 map_list(opp, list_be, &list_count);
1695 map_list(opp, list_fsl, &list_count);
1697 break;
1699 case OPENPIC_MODEL_FSL_MPIC_42:
1700 opp->fsl = &fsl_mpic_42;
1701 opp->brr1 = 0x00400402;
1702 opp->flags |= OPENPIC_FLAG_ILR;
1703 opp->nb_irqs = 196;
1704 opp->mpic_mode_mask = GCR_MODE_PROXY;
1706 fsl_common_init(opp);
1707 map_list(opp, list_be, &list_count);
1708 map_list(opp, list_fsl, &list_count);
1710 break;
1712 case OPENPIC_MODEL_RAVEN:
1713 opp->nb_irqs = RAVEN_MAX_EXT;
1714 opp->vid = VID_REVISION_1_3;
1715 opp->vir = VIR_GENERIC;
1716 opp->vector_mask = 0xFF;
1717 opp->tfrr_reset = 4160000;
1718 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1719 opp->idr_reset = 0;
1720 opp->max_irq = RAVEN_MAX_IRQ;
1721 opp->irq_ipi0 = RAVEN_IPI_IRQ;
1722 opp->irq_tim0 = RAVEN_TMR_IRQ;
1723 opp->brr1 = -1;
1724 opp->mpic_mode_mask = GCR_MODE_MIXED;
1726 if (opp->nb_cpus != 1) {
1727 error_setg(errp, "Only UP supported today");
1728 return;
1731 map_list(opp, list_le, &list_count);
1732 break;
1734 case OPENPIC_MODEL_KEYLARGO:
1735 opp->nb_irqs = KEYLARGO_MAX_EXT;
1736 opp->vid = VID_REVISION_1_2;
1737 opp->vir = VIR_GENERIC;
1738 opp->vector_mask = 0xFF;
1739 opp->tfrr_reset = 4160000;
1740 opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK;
1741 opp->idr_reset = 0;
1742 opp->max_irq = KEYLARGO_MAX_IRQ;
1743 opp->irq_ipi0 = KEYLARGO_IPI_IRQ;
1744 opp->irq_tim0 = KEYLARGO_TMR_IRQ;
1745 opp->brr1 = -1;
1746 opp->mpic_mode_mask = GCR_MODE_MIXED;
1748 if (opp->nb_cpus != 1) {
1749 error_setg(errp, "Only UP supported today");
1750 return;
1753 map_list(opp, list_le, &list_count);
1754 break;
1757 for (i = 0; i < opp->nb_cpus; i++) {
1758 opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB);
1759 for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
1760 sysbus_init_irq(d, &opp->dst[i].irqs[j]);
1763 opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS;
1764 opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1765 opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS;
1766 opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS);
1769 sysbus_init_mmio(d, &opp->mem);
1770 qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq);
1773 static Property openpic_properties[] = {
1774 DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20),
1775 DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1),
1776 DEFINE_PROP_END_OF_LIST(),
1779 static void openpic_class_init(ObjectClass *oc, void *data)
1781 DeviceClass *dc = DEVICE_CLASS(oc);
1783 dc->realize = openpic_realize;
1784 dc->props = openpic_properties;
1785 dc->reset = openpic_reset;
1786 dc->vmsd = &vmstate_openpic;
1787 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1790 static const TypeInfo openpic_info = {
1791 .name = TYPE_OPENPIC,
1792 .parent = TYPE_SYS_BUS_DEVICE,
1793 .instance_size = sizeof(OpenPICState),
1794 .instance_init = openpic_init,
1795 .class_init = openpic_class_init,
1798 static void openpic_register_types(void)
1800 type_register_static(&openpic_info);
1803 type_init(openpic_register_types)