4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * Based on OpenPic implementations:
28 * - Motorola MPC8245 & MPC8540 user manuals.
29 * - Motorola Harrier programmer manual
33 #include "qemu/osdep.h"
35 #include "hw/ppc/mac.h"
36 #include "hw/pci/pci.h"
37 #include "hw/ppc/openpic.h"
38 #include "hw/ppc/ppc_e500.h"
39 #include "hw/qdev-properties.h"
40 #include "hw/sysbus.h"
41 #include "migration/vmstate.h"
42 #include "hw/pci/msi.h"
43 #include "qapi/error.h"
44 #include "qemu/bitops.h"
45 #include "qapi/qmp/qerror.h"
46 #include "qemu/module.h"
47 #include "qemu/timer.h"
48 #include "qemu/error-report.h"
50 /* #define DEBUG_OPENPIC */
53 static const int debug_openpic
= 1;
55 static const int debug_openpic
= 0;
58 static int get_current_cpu(void);
59 #define DPRINTF(fmt, ...) do { \
60 if (debug_openpic) { \
61 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
65 /* OpenPIC capability flags */
66 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
67 #define OPENPIC_FLAG_ILR (2 << 0)
69 /* OpenPIC address map */
70 #define OPENPIC_GLB_REG_START 0x0
71 #define OPENPIC_GLB_REG_SIZE 0x10F0
72 #define OPENPIC_TMR_REG_START 0x10F0
73 #define OPENPIC_TMR_REG_SIZE 0x220
74 #define OPENPIC_MSI_REG_START 0x1600
75 #define OPENPIC_MSI_REG_SIZE 0x200
76 #define OPENPIC_SUMMARY_REG_START 0x3800
77 #define OPENPIC_SUMMARY_REG_SIZE 0x800
78 #define OPENPIC_SRC_REG_START 0x10000
79 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
80 #define OPENPIC_CPU_REG_START 0x20000
81 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
83 static FslMpicInfo fsl_mpic_20
= {
87 static FslMpicInfo fsl_mpic_42
= {
91 #define FRR_NIRQ_SHIFT 16
92 #define FRR_NCPU_SHIFT 8
93 #define FRR_VID_SHIFT 0
95 #define VID_REVISION_1_2 2
96 #define VID_REVISION_1_3 3
98 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
99 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
101 #define GCR_RESET 0x80000000
102 #define GCR_MODE_PASS 0x00000000
103 #define GCR_MODE_MIXED 0x20000000
104 #define GCR_MODE_PROXY 0x60000000
106 #define TBCR_CI 0x80000000 /* count inhibit */
107 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
109 #define IDR_EP_SHIFT 31
110 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
111 #define IDR_CI0_SHIFT 30
112 #define IDR_CI1_SHIFT 29
113 #define IDR_P1_SHIFT 1
114 #define IDR_P0_SHIFT 0
116 #define ILR_INTTGT_MASK 0x000000ff
117 #define ILR_INTTGT_INT 0x00
118 #define ILR_INTTGT_CINT 0x01 /* critical */
119 #define ILR_INTTGT_MCP 0x02 /* machine check */
122 * The currently supported INTTGT values happen to be the same as QEMU's
123 * openpic output codes, but don't depend on this. The output codes
124 * could change (unlikely, but...) or support could be added for
125 * more INTTGT values.
127 static const int inttgt_output
[][2] = {
128 { ILR_INTTGT_INT
, OPENPIC_OUTPUT_INT
},
129 { ILR_INTTGT_CINT
, OPENPIC_OUTPUT_CINT
},
130 { ILR_INTTGT_MCP
, OPENPIC_OUTPUT_MCK
},
133 static int inttgt_to_output(int inttgt
)
137 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
138 if (inttgt_output
[i
][0] == inttgt
) {
139 return inttgt_output
[i
][1];
143 error_report("%s: unsupported inttgt %d", __func__
, inttgt
);
144 return OPENPIC_OUTPUT_INT
;
147 static int output_to_inttgt(int output
)
151 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
152 if (inttgt_output
[i
][1] == output
) {
153 return inttgt_output
[i
][0];
160 #define MSIIR_OFFSET 0x140
161 #define MSIIR_SRS_SHIFT 29
162 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
163 #define MSIIR_IBS_SHIFT 24
164 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
166 static int get_current_cpu(void)
172 return current_cpu
->cpu_index
;
175 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
177 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
178 uint32_t val
, int idx
);
179 static void openpic_reset(DeviceState
*d
);
182 * Convert between openpic clock ticks and nanosecs. In the hardware the clock
183 * frequency is driven by board inputs to the PIC which the PIC would then
184 * divide by 4 or 8. For now hard code to 25MZ.
186 #define OPENPIC_TIMER_FREQ_MHZ 25
187 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
188 static inline uint64_t ns_to_ticks(uint64_t ns
)
190 return ns
/ OPENPIC_TIMER_NS_PER_TICK
;
192 static inline uint64_t ticks_to_ns(uint64_t ticks
)
194 return ticks
* OPENPIC_TIMER_NS_PER_TICK
;
197 static inline void IRQ_setbit(IRQQueue
*q
, int n_IRQ
)
199 set_bit(n_IRQ
, q
->queue
);
202 static inline void IRQ_resetbit(IRQQueue
*q
, int n_IRQ
)
204 clear_bit(n_IRQ
, q
->queue
);
207 static void IRQ_check(OpenPICState
*opp
, IRQQueue
*q
)
214 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
215 if (irq
== opp
->max_irq
) {
219 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
220 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
222 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
224 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
229 q
->priority
= priority
;
232 static int IRQ_get_next(OpenPICState
*opp
, IRQQueue
*q
)
240 static void IRQ_local_pipe(OpenPICState
*opp
, int n_CPU
, int n_IRQ
,
241 bool active
, bool was_active
)
247 dst
= &opp
->dst
[n_CPU
];
248 src
= &opp
->src
[n_IRQ
];
250 DPRINTF("%s: IRQ %d active %d was %d",
251 __func__
, n_IRQ
, active
, was_active
);
253 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
254 DPRINTF("%s: output %d irq %d active %d was %d count %d",
255 __func__
, src
->output
, n_IRQ
, active
, was_active
,
256 dst
->outputs_active
[src
->output
]);
259 * On Freescale MPIC, critical interrupts ignore priority,
260 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
264 if (!was_active
&& dst
->outputs_active
[src
->output
]++ == 0) {
265 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
266 __func__
, src
->output
, n_CPU
, n_IRQ
);
267 qemu_irq_raise(dst
->irqs
[src
->output
]);
270 if (was_active
&& --dst
->outputs_active
[src
->output
] == 0) {
271 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
272 __func__
, src
->output
, n_CPU
, n_IRQ
);
273 qemu_irq_lower(dst
->irqs
[src
->output
]);
280 priority
= IVPR_PRIORITY(src
->ivpr
);
283 * Even if the interrupt doesn't have enough priority,
284 * it is still raised, in case ctpr is lowered later.
287 IRQ_setbit(&dst
->raised
, n_IRQ
);
289 IRQ_resetbit(&dst
->raised
, n_IRQ
);
292 IRQ_check(opp
, &dst
->raised
);
294 if (active
&& priority
<= dst
->ctpr
) {
295 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
296 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
301 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
302 priority
<= dst
->servicing
.priority
) {
303 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
304 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
306 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
307 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
308 qemu_irq_raise(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
311 IRQ_get_next(opp
, &dst
->servicing
);
312 if (dst
->raised
.priority
> dst
->ctpr
&&
313 dst
->raised
.priority
> dst
->servicing
.priority
) {
314 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
315 __func__
, n_IRQ
, dst
->raised
.next
, dst
->raised
.priority
,
316 dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
317 /* IRQ line stays asserted */
319 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
320 __func__
, n_IRQ
, dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
321 qemu_irq_lower(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
326 /* update pic state because registers for n_IRQ have changed value */
327 static void openpic_update_irq(OpenPICState
*opp
, int n_IRQ
)
330 bool active
, was_active
;
333 src
= &opp
->src
[n_IRQ
];
334 active
= src
->pending
;
336 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
337 /* Interrupt source is disabled */
338 DPRINTF("%s: IRQ %d is disabled", __func__
, n_IRQ
);
342 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
345 * We don't have a similar check for already-active because
346 * ctpr may have changed and we need to withdraw the interrupt.
348 if (!active
&& !was_active
) {
349 DPRINTF("%s: IRQ %d is already inactive", __func__
, n_IRQ
);
354 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
356 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
359 if (src
->destmask
== 0) {
361 DPRINTF("%s: IRQ %d has no target", __func__
, n_IRQ
);
365 if (src
->destmask
== (1 << src
->last_cpu
)) {
366 /* Only one CPU is allowed to receive this IRQ */
367 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
368 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
369 /* Directed delivery mode */
370 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
371 if (src
->destmask
& (1 << i
)) {
372 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
376 /* Distributed delivery mode */
377 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
378 if (i
== opp
->nb_cpus
) {
381 if (src
->destmask
& (1 << i
)) {
382 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
390 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
392 OpenPICState
*opp
= opaque
;
395 if (n_IRQ
>= OPENPIC_MAX_IRQ
) {
396 error_report("%s: IRQ %d out of range", __func__
, n_IRQ
);
400 src
= &opp
->src
[n_IRQ
];
401 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
402 n_IRQ
, level
, src
->ivpr
);
404 /* level-sensitive irq */
405 src
->pending
= level
;
406 openpic_update_irq(opp
, n_IRQ
);
408 /* edge-sensitive irq */
411 openpic_update_irq(opp
, n_IRQ
);
414 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
416 * Edge-triggered interrupts shouldn't be used
417 * with non-INT delivery, but just in case,
418 * try to make it do something sane rather than
419 * cause an interrupt storm. This is close to
420 * what you'd probably see happen in real hardware.
423 openpic_update_irq(opp
, n_IRQ
);
428 static inline uint32_t read_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
)
430 return opp
->src
[n_IRQ
].idr
;
433 static inline uint32_t read_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
)
435 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
436 return output_to_inttgt(opp
->src
[n_IRQ
].output
);
442 static inline uint32_t read_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
)
444 return opp
->src
[n_IRQ
].ivpr
;
447 static inline void write_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
449 IRQSource
*src
= &opp
->src
[n_IRQ
];
450 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
451 uint32_t crit_mask
= 0;
452 uint32_t mask
= normal_mask
;
453 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
456 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
457 crit_mask
= mask
<< crit_shift
;
458 mask
|= crit_mask
| IDR_EP
;
461 src
->idr
= val
& mask
;
462 DPRINTF("Set IDR %d to 0x%08x", n_IRQ
, src
->idr
);
464 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
465 if (src
->idr
& crit_mask
) {
466 if (src
->idr
& normal_mask
) {
467 DPRINTF("%s: IRQ configured for multiple output types, using "
468 "critical", __func__
);
471 src
->output
= OPENPIC_OUTPUT_CINT
;
475 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
476 int n_ci
= IDR_CI0_SHIFT
- i
;
478 if (src
->idr
& (1UL << n_ci
)) {
479 src
->destmask
|= 1UL << i
;
483 src
->output
= OPENPIC_OUTPUT_INT
;
485 src
->destmask
= src
->idr
& normal_mask
;
488 src
->destmask
= src
->idr
;
492 static inline void write_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
494 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
495 IRQSource
*src
= &opp
->src
[n_IRQ
];
497 src
->output
= inttgt_to_output(val
& ILR_INTTGT_MASK
);
498 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ
, src
->idr
,
501 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
505 static inline void write_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
510 * NOTE when implementing newer FSL MPIC models: starting with v4.0,
511 * the polarity bit is read-only on internal interrupts.
513 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
514 IVPR_POLARITY_MASK
| opp
->vector_mask
;
516 /* ACTIVITY bit is read-only */
517 opp
->src
[n_IRQ
].ivpr
=
518 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
521 * For FSL internal interrupts, The sense bit is reserved and zero,
522 * and the interrupt is always level-triggered. Timers and IPIs
523 * have no sense or polarity bits, and are edge-triggered.
525 switch (opp
->src
[n_IRQ
].type
) {
526 case IRQ_TYPE_NORMAL
:
527 opp
->src
[n_IRQ
].level
= !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
530 case IRQ_TYPE_FSLINT
:
531 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
534 case IRQ_TYPE_FSLSPECIAL
:
535 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
539 openpic_update_irq(opp
, n_IRQ
);
540 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ
, val
,
541 opp
->src
[n_IRQ
].ivpr
);
544 static void openpic_gcr_write(OpenPICState
*opp
, uint64_t val
)
546 bool mpic_proxy
= false;
548 if (val
& GCR_RESET
) {
549 openpic_reset(DEVICE(opp
));
553 opp
->gcr
&= ~opp
->mpic_mode_mask
;
554 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
556 /* Set external proxy mode */
557 if ((val
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
) {
561 ppce500_set_mpic_proxy(mpic_proxy
);
564 static void openpic_gbl_write(void *opaque
, hwaddr addr
, uint64_t val
,
567 OpenPICState
*opp
= opaque
;
571 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
572 __func__
, addr
, val
);
577 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
587 openpic_cpu_write_internal(opp
, addr
, val
, get_current_cpu());
589 case 0x1000: /* FRR */
591 case 0x1020: /* GCR */
592 openpic_gcr_write(opp
, val
);
594 case 0x1080: /* VIR */
596 case 0x1090: /* PIR */
597 for (idx
= 0; idx
< opp
->nb_cpus
; idx
++) {
598 if ((val
& (1 << idx
)) && !(opp
->pir
& (1 << idx
))) {
599 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx
);
600 dst
= &opp
->dst
[idx
];
601 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
602 } else if (!(val
& (1 << idx
)) && (opp
->pir
& (1 << idx
))) {
603 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx
);
604 dst
= &opp
->dst
[idx
];
605 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
610 case 0x10A0: /* IPI_IVPR */
616 idx
= (addr
- 0x10A0) >> 4;
617 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
620 case 0x10E0: /* SPVE */
621 opp
->spve
= val
& opp
->vector_mask
;
628 static uint64_t openpic_gbl_read(void *opaque
, hwaddr addr
, unsigned len
)
630 OpenPICState
*opp
= opaque
;
633 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
639 case 0x1000: /* FRR */
642 case 0x1020: /* GCR */
645 case 0x1080: /* VIR */
648 case 0x1090: /* PIR */
651 case 0x00: /* Block Revision Register1 (BRR1) */
662 retval
= openpic_cpu_read_internal(opp
, addr
, get_current_cpu());
664 case 0x10A0: /* IPI_IVPR */
670 idx
= (addr
- 0x10A0) >> 4;
671 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
674 case 0x10E0: /* SPVE */
680 DPRINTF("%s: => 0x%08x", __func__
, retval
);
685 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
);
687 static void qemu_timer_cb(void *opaque
)
689 OpenPICTimer
*tmr
= opaque
;
690 OpenPICState
*opp
= tmr
->opp
;
691 uint32_t n_IRQ
= tmr
->n_IRQ
;
692 uint32_t val
= tmr
->tbcr
& ~TBCR_CI
;
693 uint32_t tog
= ((tmr
->tccr
& TCCR_TOG
) ^ TCCR_TOG
); /* invert toggle. */
695 DPRINTF("%s n_IRQ=%d", __func__
, n_IRQ
);
696 /* Reload current count from base count and setup timer. */
697 tmr
->tccr
= val
| tog
;
698 openpic_tmr_set_tmr(tmr
, val
, /*enabled=*/true);
699 /* Raise the interrupt. */
700 opp
->src
[n_IRQ
].destmask
= read_IRQreg_idr(opp
, n_IRQ
);
701 openpic_set_irq(opp
, n_IRQ
, 1);
702 openpic_set_irq(opp
, n_IRQ
, 0);
706 * If enabled is true, arranges for an interrupt to be raised val clocks into
707 * the future, if enabled is false cancels the timer.
709 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
)
711 uint64_t ns
= ticks_to_ns(val
& ~TCCR_TOG
);
713 * A count of zero causes a timer to be set to expire immediately. This
714 * effectively stops the simulation since the timer is constantly expiring
715 * which prevents guest code execution, so we don't honor that
716 * configuration. On real hardware, this situation would generate an
717 * interrupt on every clock cycle if the interrupt was unmasked.
719 if ((ns
== 0) || !enabled
) {
720 tmr
->qemu_timer_active
= false;
721 tmr
->tccr
= tmr
->tccr
& TCCR_TOG
;
722 timer_del(tmr
->qemu_timer
); /* set timer to never expire. */
724 tmr
->qemu_timer_active
= true;
725 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
726 tmr
->origin_time
= now
;
727 timer_mod(tmr
->qemu_timer
, now
+ ns
); /* set timer expiration. */
732 * Returns the currrent tccr value, i.e., timer value (in clocks) with
735 static uint64_t openpic_tmr_get_timer(OpenPICTimer
*tmr
)
738 if (!tmr
->qemu_timer_active
) {
741 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
742 uint64_t used
= now
- tmr
->origin_time
; /* nsecs */
743 uint32_t used_ticks
= (uint32_t)ns_to_ticks(used
);
744 uint32_t count
= (tmr
->tccr
& ~TCCR_TOG
) - used_ticks
;
745 retval
= (uint32_t)((tmr
->tccr
& TCCR_TOG
) | (count
& ~TCCR_TOG
));
750 static void openpic_tmr_write(void *opaque
, hwaddr addr
, uint64_t val
,
753 OpenPICState
*opp
= opaque
;
756 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
757 __func__
, (addr
+ 0x10f0), val
);
767 addr
-= 0x10; /* correct for TFRR */
768 idx
= (addr
>> 6) & 0x3;
770 switch (addr
& 0x30) {
771 case 0x00: /* TCCR */
773 case 0x10: /* TBCR */
774 /* Did the enable status change? */
775 if ((opp
->timers
[idx
].tbcr
& TBCR_CI
) != (val
& TBCR_CI
)) {
776 /* Did "Count Inhibit" transition from 1 to 0? */
777 if ((val
& TBCR_CI
) == 0) {
778 opp
->timers
[idx
].tccr
= val
& ~TCCR_TOG
;
780 openpic_tmr_set_tmr(&opp
->timers
[idx
],
782 /*enabled=*/((val
& TBCR_CI
) == 0));
784 opp
->timers
[idx
].tbcr
= val
;
786 case 0x20: /* TVPR */
787 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
790 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
795 static uint64_t openpic_tmr_read(void *opaque
, hwaddr addr
, unsigned len
)
797 OpenPICState
*opp
= opaque
;
798 uint32_t retval
= -1;
801 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
+ 0x10f0);
810 addr
-= 0x10; /* correct for TFRR */
811 idx
= (addr
>> 6) & 0x3;
812 switch (addr
& 0x30) {
813 case 0x00: /* TCCR */
814 retval
= openpic_tmr_get_timer(&opp
->timers
[idx
]);
816 case 0x10: /* TBCR */
817 retval
= opp
->timers
[idx
].tbcr
;
819 case 0x20: /* TVPR */
820 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
823 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
828 DPRINTF("%s: => 0x%08x", __func__
, retval
);
833 static void openpic_src_write(void *opaque
, hwaddr addr
, uint64_t val
,
836 OpenPICState
*opp
= opaque
;
839 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
840 __func__
, addr
, val
);
842 addr
= addr
& 0xffff;
845 switch (addr
& 0x1f) {
847 write_IRQreg_ivpr(opp
, idx
, val
);
850 write_IRQreg_idr(opp
, idx
, val
);
853 write_IRQreg_ilr(opp
, idx
, val
);
858 static uint64_t openpic_src_read(void *opaque
, uint64_t addr
, unsigned len
)
860 OpenPICState
*opp
= opaque
;
864 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
867 addr
= addr
& 0xffff;
870 switch (addr
& 0x1f) {
872 retval
= read_IRQreg_ivpr(opp
, idx
);
875 retval
= read_IRQreg_idr(opp
, idx
);
878 retval
= read_IRQreg_ilr(opp
, idx
);
882 DPRINTF("%s: => 0x%08x", __func__
, retval
);
886 static void openpic_msi_write(void *opaque
, hwaddr addr
, uint64_t val
,
889 OpenPICState
*opp
= opaque
;
890 int idx
= opp
->irq_msi
;
893 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
,
894 __func__
, addr
, val
);
901 srs
= val
>> MSIIR_SRS_SHIFT
;
903 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
904 opp
->msi
[srs
].msir
|= 1 << ibs
;
905 openpic_set_irq(opp
, idx
, 1);
908 /* most registers are read-only, thus ignored */
913 static uint64_t openpic_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
915 OpenPICState
*opp
= opaque
;
919 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
934 case 0x70: /* MSIRs */
935 r
= opp
->msi
[srs
].msir
;
937 opp
->msi
[srs
].msir
= 0;
938 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
940 case 0x120: /* MSISR */
941 for (i
= 0; i
< MAX_MSI
; i
++) {
942 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
950 static uint64_t openpic_summary_read(void *opaque
, hwaddr addr
, unsigned size
)
954 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
956 /* TODO: EISR/EIMR */
961 static void openpic_summary_write(void *opaque
, hwaddr addr
, uint64_t val
,
964 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
,
965 __func__
, addr
, val
);
967 /* TODO: EISR/EIMR */
970 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
971 uint32_t val
, int idx
)
973 OpenPICState
*opp
= opaque
;
978 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
" <= 0x%08x", __func__
, idx
,
981 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
988 dst
= &opp
->dst
[idx
];
991 case 0x40: /* IPIDR */
995 idx
= (addr
- 0x40) >> 4;
996 /* we use IDE as mask which CPUs to deliver the IPI to still. */
997 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
998 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
999 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
1001 case 0x80: /* CTPR */
1002 dst
->ctpr
= val
& 0x0000000F;
1004 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
1005 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
1006 dst
->servicing
.priority
);
1008 if (dst
->raised
.priority
<= dst
->ctpr
) {
1009 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
1011 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1012 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1013 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1014 __func__
, idx
, dst
->raised
.next
);
1015 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1019 case 0x90: /* WHOAMI */
1020 /* Read-only register */
1022 case 0xA0: /* IACK */
1023 /* Read-only register */
1025 case 0xB0: /* EOI */
1027 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1030 DPRINTF("%s: EOI with no interrupt in service", __func__
);
1034 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1035 /* Set up next servicing IRQ */
1036 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1037 /* Check queued interrupts. */
1038 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1039 src
= &opp
->src
[n_IRQ
];
1042 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1043 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1045 qemu_irq_raise(opp
->dst
[idx
].irqs
[OPENPIC_OUTPUT_INT
]);
1053 static void openpic_cpu_write(void *opaque
, hwaddr addr
, uint64_t val
,
1056 openpic_cpu_write_internal(opaque
, addr
, val
, (addr
& 0x1f000) >> 12);
1060 static uint32_t openpic_iack(OpenPICState
*opp
, IRQDest
*dst
, int cpu
)
1065 DPRINTF("Lower OpenPIC INT output");
1066 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1068 irq
= IRQ_get_next(opp
, &dst
->raised
);
1069 DPRINTF("IACK: irq=%d", irq
);
1072 /* No more interrupt pending */
1076 src
= &opp
->src
[irq
];
1077 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1078 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1079 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1080 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1081 openpic_update_irq(opp
, irq
);
1084 /* IRQ enter servicing state */
1085 IRQ_setbit(&dst
->servicing
, irq
);
1086 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1090 /* edge-sensitive IRQ */
1091 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1093 IRQ_resetbit(&dst
->raised
, irq
);
1096 /* Timers and IPIs support multicast. */
1097 if (((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ OPENPIC_MAX_IPI
))) ||
1098 ((irq
>= opp
->irq_tim0
) && (irq
< (opp
->irq_tim0
+ OPENPIC_MAX_TMR
)))) {
1099 DPRINTF("irq is IPI or TMR");
1100 src
->destmask
&= ~(1 << cpu
);
1101 if (src
->destmask
&& !src
->level
) {
1102 /* trigger on CPUs that didn't know about it yet */
1103 openpic_set_irq(opp
, irq
, 1);
1104 openpic_set_irq(opp
, irq
, 0);
1105 /* if all CPUs knew about it, set active bit again */
1106 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1113 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
1116 OpenPICState
*opp
= opaque
;
1120 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
, __func__
, idx
, addr
);
1121 retval
= 0xFFFFFFFF;
1123 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
1130 dst
= &opp
->dst
[idx
];
1133 case 0x80: /* CTPR */
1136 case 0x90: /* WHOAMI */
1139 case 0xA0: /* IACK */
1140 retval
= openpic_iack(opp
, dst
, idx
);
1142 case 0xB0: /* EOI */
1148 DPRINTF("%s: => 0x%08x", __func__
, retval
);
1153 static uint64_t openpic_cpu_read(void *opaque
, hwaddr addr
, unsigned len
)
1155 return openpic_cpu_read_internal(opaque
, addr
, (addr
& 0x1f000) >> 12);
1158 static const MemoryRegionOps openpic_glb_ops_le
= {
1159 .write
= openpic_gbl_write
,
1160 .read
= openpic_gbl_read
,
1161 .endianness
= DEVICE_LITTLE_ENDIAN
,
1163 .min_access_size
= 4,
1164 .max_access_size
= 4,
1168 static const MemoryRegionOps openpic_glb_ops_be
= {
1169 .write
= openpic_gbl_write
,
1170 .read
= openpic_gbl_read
,
1171 .endianness
= DEVICE_BIG_ENDIAN
,
1173 .min_access_size
= 4,
1174 .max_access_size
= 4,
1178 static const MemoryRegionOps openpic_tmr_ops_le
= {
1179 .write
= openpic_tmr_write
,
1180 .read
= openpic_tmr_read
,
1181 .endianness
= DEVICE_LITTLE_ENDIAN
,
1183 .min_access_size
= 4,
1184 .max_access_size
= 4,
1188 static const MemoryRegionOps openpic_tmr_ops_be
= {
1189 .write
= openpic_tmr_write
,
1190 .read
= openpic_tmr_read
,
1191 .endianness
= DEVICE_BIG_ENDIAN
,
1193 .min_access_size
= 4,
1194 .max_access_size
= 4,
1198 static const MemoryRegionOps openpic_cpu_ops_le
= {
1199 .write
= openpic_cpu_write
,
1200 .read
= openpic_cpu_read
,
1201 .endianness
= DEVICE_LITTLE_ENDIAN
,
1203 .min_access_size
= 4,
1204 .max_access_size
= 4,
1208 static const MemoryRegionOps openpic_cpu_ops_be
= {
1209 .write
= openpic_cpu_write
,
1210 .read
= openpic_cpu_read
,
1211 .endianness
= DEVICE_BIG_ENDIAN
,
1213 .min_access_size
= 4,
1214 .max_access_size
= 4,
1218 static const MemoryRegionOps openpic_src_ops_le
= {
1219 .write
= openpic_src_write
,
1220 .read
= openpic_src_read
,
1221 .endianness
= DEVICE_LITTLE_ENDIAN
,
1223 .min_access_size
= 4,
1224 .max_access_size
= 4,
1228 static const MemoryRegionOps openpic_src_ops_be
= {
1229 .write
= openpic_src_write
,
1230 .read
= openpic_src_read
,
1231 .endianness
= DEVICE_BIG_ENDIAN
,
1233 .min_access_size
= 4,
1234 .max_access_size
= 4,
1238 static const MemoryRegionOps openpic_msi_ops_be
= {
1239 .read
= openpic_msi_read
,
1240 .write
= openpic_msi_write
,
1241 .endianness
= DEVICE_BIG_ENDIAN
,
1243 .min_access_size
= 4,
1244 .max_access_size
= 4,
1248 static const MemoryRegionOps openpic_summary_ops_be
= {
1249 .read
= openpic_summary_read
,
1250 .write
= openpic_summary_write
,
1251 .endianness
= DEVICE_BIG_ENDIAN
,
1253 .min_access_size
= 4,
1254 .max_access_size
= 4,
1258 static void openpic_reset(DeviceState
*d
)
1260 OpenPICState
*opp
= OPENPIC(d
);
1263 opp
->gcr
= GCR_RESET
;
1264 /* Initialise controller registers */
1265 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
1266 ((opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
) |
1267 (opp
->vid
<< FRR_VID_SHIFT
);
1270 opp
->spve
= -1 & opp
->vector_mask
;
1271 opp
->tfrr
= opp
->tfrr_reset
;
1272 /* Initialise IRQ sources */
1273 for (i
= 0; i
< opp
->max_irq
; i
++) {
1274 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
1275 switch (opp
->src
[i
].type
) {
1276 case IRQ_TYPE_NORMAL
:
1277 opp
->src
[i
].level
= !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
1280 case IRQ_TYPE_FSLINT
:
1281 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
1284 case IRQ_TYPE_FSLSPECIAL
:
1288 /* Mask all IPI interrupts for Freescale OpenPIC */
1289 if ((opp
->model
== OPENPIC_MODEL_FSL_MPIC_20
) ||
1290 (opp
->model
== OPENPIC_MODEL_FSL_MPIC_42
)) {
1291 if (i
>= opp
->irq_ipi0
&& i
< opp
->irq_tim0
) {
1292 write_IRQreg_idr(opp
, i
, 0);
1297 write_IRQreg_idr(opp
, i
, opp
->idr_reset
);
1299 /* Initialise IRQ destinations */
1300 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1301 opp
->dst
[i
].ctpr
= 15;
1302 opp
->dst
[i
].raised
.next
= -1;
1303 opp
->dst
[i
].raised
.priority
= 0;
1304 bitmap_clear(opp
->dst
[i
].raised
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1305 opp
->dst
[i
].servicing
.next
= -1;
1306 opp
->dst
[i
].servicing
.priority
= 0;
1307 bitmap_clear(opp
->dst
[i
].servicing
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1309 /* Initialise timers */
1310 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1311 opp
->timers
[i
].tccr
= 0;
1312 opp
->timers
[i
].tbcr
= TBCR_CI
;
1313 if (opp
->timers
[i
].qemu_timer_active
) {
1314 timer_del(opp
->timers
[i
].qemu_timer
); /* Inhibit timer */
1315 opp
->timers
[i
].qemu_timer_active
= false;
1318 /* Go out of RESET state */
1322 typedef struct MemReg
{
1324 MemoryRegionOps
const *ops
;
1329 static void fsl_common_init(OpenPICState
*opp
)
1332 int virq
= OPENPIC_MAX_SRC
;
1334 opp
->vid
= VID_REVISION_1_2
;
1335 opp
->vir
= VIR_GENERIC
;
1336 opp
->vector_mask
= 0xFFFF;
1337 opp
->tfrr_reset
= 0;
1338 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1339 opp
->idr_reset
= 1 << 0;
1340 opp
->max_irq
= OPENPIC_MAX_IRQ
;
1342 opp
->irq_ipi0
= virq
;
1343 virq
+= OPENPIC_MAX_IPI
;
1344 opp
->irq_tim0
= virq
;
1345 virq
+= OPENPIC_MAX_TMR
;
1347 assert(virq
<= OPENPIC_MAX_IRQ
);
1351 msi_nonbroken
= true;
1352 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++) {
1353 opp
->src
[i
].level
= false;
1356 /* Internal interrupts, including message and MSI */
1357 for (i
= 16; i
< OPENPIC_MAX_SRC
; i
++) {
1358 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1359 opp
->src
[i
].level
= true;
1362 /* timers and IPIs */
1363 for (i
= OPENPIC_MAX_SRC
; i
< virq
; i
++) {
1364 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1365 opp
->src
[i
].level
= false;
1368 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1369 opp
->timers
[i
].n_IRQ
= opp
->irq_tim0
+ i
;
1370 opp
->timers
[i
].qemu_timer_active
= false;
1371 opp
->timers
[i
].qemu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1374 opp
->timers
[i
].opp
= opp
;
1378 static void map_list(OpenPICState
*opp
, const MemReg
*list
, int *count
)
1380 while (list
->name
) {
1381 assert(*count
< ARRAY_SIZE(opp
->sub_io_mem
));
1383 memory_region_init_io(&opp
->sub_io_mem
[*count
], OBJECT(opp
), list
->ops
,
1384 opp
, list
->name
, list
->size
);
1386 memory_region_add_subregion(&opp
->mem
, list
->start_addr
,
1387 &opp
->sub_io_mem
[*count
]);
1394 static const VMStateDescription vmstate_openpic_irq_queue
= {
1395 .name
= "openpic_irq_queue",
1397 .minimum_version_id
= 0,
1398 .fields
= (VMStateField
[]) {
1399 VMSTATE_BITMAP(queue
, IRQQueue
, 0, queue_size
),
1400 VMSTATE_INT32(next
, IRQQueue
),
1401 VMSTATE_INT32(priority
, IRQQueue
),
1402 VMSTATE_END_OF_LIST()
1406 static const VMStateDescription vmstate_openpic_irqdest
= {
1407 .name
= "openpic_irqdest",
1409 .minimum_version_id
= 0,
1410 .fields
= (VMStateField
[]) {
1411 VMSTATE_INT32(ctpr
, IRQDest
),
1412 VMSTATE_STRUCT(raised
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1414 VMSTATE_STRUCT(servicing
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1416 VMSTATE_UINT32_ARRAY(outputs_active
, IRQDest
, OPENPIC_OUTPUT_NB
),
1417 VMSTATE_END_OF_LIST()
1421 static const VMStateDescription vmstate_openpic_irqsource
= {
1422 .name
= "openpic_irqsource",
1424 .minimum_version_id
= 0,
1425 .fields
= (VMStateField
[]) {
1426 VMSTATE_UINT32(ivpr
, IRQSource
),
1427 VMSTATE_UINT32(idr
, IRQSource
),
1428 VMSTATE_UINT32(destmask
, IRQSource
),
1429 VMSTATE_INT32(last_cpu
, IRQSource
),
1430 VMSTATE_INT32(pending
, IRQSource
),
1431 VMSTATE_END_OF_LIST()
1435 static const VMStateDescription vmstate_openpic_timer
= {
1436 .name
= "openpic_timer",
1438 .minimum_version_id
= 0,
1439 .fields
= (VMStateField
[]) {
1440 VMSTATE_UINT32(tccr
, OpenPICTimer
),
1441 VMSTATE_UINT32(tbcr
, OpenPICTimer
),
1442 VMSTATE_END_OF_LIST()
1446 static const VMStateDescription vmstate_openpic_msi
= {
1447 .name
= "openpic_msi",
1449 .minimum_version_id
= 0,
1450 .fields
= (VMStateField
[]) {
1451 VMSTATE_UINT32(msir
, OpenPICMSI
),
1452 VMSTATE_END_OF_LIST()
1456 static int openpic_post_load(void *opaque
, int version_id
)
1458 OpenPICState
*opp
= (OpenPICState
*)opaque
;
1461 /* Update internal ivpr and idr variables */
1462 for (i
= 0; i
< opp
->max_irq
; i
++) {
1463 write_IRQreg_idr(opp
, i
, opp
->src
[i
].idr
);
1464 write_IRQreg_ivpr(opp
, i
, opp
->src
[i
].ivpr
);
1470 static const VMStateDescription vmstate_openpic
= {
1473 .minimum_version_id
= 3,
1474 .post_load
= openpic_post_load
,
1475 .fields
= (VMStateField
[]) {
1476 VMSTATE_UINT32(gcr
, OpenPICState
),
1477 VMSTATE_UINT32(vir
, OpenPICState
),
1478 VMSTATE_UINT32(pir
, OpenPICState
),
1479 VMSTATE_UINT32(spve
, OpenPICState
),
1480 VMSTATE_UINT32(tfrr
, OpenPICState
),
1481 VMSTATE_UINT32(max_irq
, OpenPICState
),
1482 VMSTATE_STRUCT_VARRAY_UINT32(src
, OpenPICState
, max_irq
, 0,
1483 vmstate_openpic_irqsource
, IRQSource
),
1484 VMSTATE_UINT32_EQUAL(nb_cpus
, OpenPICState
, NULL
),
1485 VMSTATE_STRUCT_VARRAY_UINT32(dst
, OpenPICState
, nb_cpus
, 0,
1486 vmstate_openpic_irqdest
, IRQDest
),
1487 VMSTATE_STRUCT_ARRAY(timers
, OpenPICState
, OPENPIC_MAX_TMR
, 0,
1488 vmstate_openpic_timer
, OpenPICTimer
),
1489 VMSTATE_STRUCT_ARRAY(msi
, OpenPICState
, MAX_MSI
, 0,
1490 vmstate_openpic_msi
, OpenPICMSI
),
1491 VMSTATE_UINT32(irq_ipi0
, OpenPICState
),
1492 VMSTATE_UINT32(irq_tim0
, OpenPICState
),
1493 VMSTATE_UINT32(irq_msi
, OpenPICState
),
1494 VMSTATE_END_OF_LIST()
1498 static void openpic_init(Object
*obj
)
1500 OpenPICState
*opp
= OPENPIC(obj
);
1502 memory_region_init(&opp
->mem
, obj
, "openpic", 0x40000);
1505 static void openpic_realize(DeviceState
*dev
, Error
**errp
)
1507 SysBusDevice
*d
= SYS_BUS_DEVICE(dev
);
1508 OpenPICState
*opp
= OPENPIC(dev
);
1511 static const MemReg list_le
[] = {
1512 {"glb", &openpic_glb_ops_le
,
1513 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1514 {"tmr", &openpic_tmr_ops_le
,
1515 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1516 {"src", &openpic_src_ops_le
,
1517 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1518 {"cpu", &openpic_cpu_ops_le
,
1519 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1522 static const MemReg list_be
[] = {
1523 {"glb", &openpic_glb_ops_be
,
1524 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1525 {"tmr", &openpic_tmr_ops_be
,
1526 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1527 {"src", &openpic_src_ops_be
,
1528 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1529 {"cpu", &openpic_cpu_ops_be
,
1530 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1533 static const MemReg list_fsl
[] = {
1534 {"msi", &openpic_msi_ops_be
,
1535 OPENPIC_MSI_REG_START
, OPENPIC_MSI_REG_SIZE
},
1536 {"summary", &openpic_summary_ops_be
,
1537 OPENPIC_SUMMARY_REG_START
, OPENPIC_SUMMARY_REG_SIZE
},
1541 if (opp
->nb_cpus
> MAX_CPU
) {
1542 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
,
1543 TYPE_OPENPIC
, "nb_cpus", (uint64_t)opp
->nb_cpus
,
1544 (uint64_t)0, (uint64_t)MAX_CPU
);
1548 switch (opp
->model
) {
1549 case OPENPIC_MODEL_FSL_MPIC_20
:
1551 opp
->fsl
= &fsl_mpic_20
;
1552 opp
->brr1
= 0x00400200;
1553 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1555 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1557 fsl_common_init(opp
);
1558 map_list(opp
, list_be
, &list_count
);
1559 map_list(opp
, list_fsl
, &list_count
);
1563 case OPENPIC_MODEL_FSL_MPIC_42
:
1564 opp
->fsl
= &fsl_mpic_42
;
1565 opp
->brr1
= 0x00400402;
1566 opp
->flags
|= OPENPIC_FLAG_ILR
;
1568 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1570 fsl_common_init(opp
);
1571 map_list(opp
, list_be
, &list_count
);
1572 map_list(opp
, list_fsl
, &list_count
);
1576 case OPENPIC_MODEL_KEYLARGO
:
1577 opp
->nb_irqs
= KEYLARGO_MAX_EXT
;
1578 opp
->vid
= VID_REVISION_1_2
;
1579 opp
->vir
= VIR_GENERIC
;
1580 opp
->vector_mask
= 0xFF;
1581 opp
->tfrr_reset
= 4160000;
1582 opp
->ivpr_reset
= IVPR_MASK_MASK
| IVPR_MODE_MASK
;
1584 opp
->max_irq
= KEYLARGO_MAX_IRQ
;
1585 opp
->irq_ipi0
= KEYLARGO_IPI_IRQ
;
1586 opp
->irq_tim0
= KEYLARGO_TMR_IRQ
;
1588 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1590 if (opp
->nb_cpus
!= 1) {
1591 error_setg(errp
, "Only UP supported today");
1595 map_list(opp
, list_le
, &list_count
);
1599 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1600 opp
->dst
[i
].irqs
= g_new0(qemu_irq
, OPENPIC_OUTPUT_NB
);
1601 for (j
= 0; j
< OPENPIC_OUTPUT_NB
; j
++) {
1602 sysbus_init_irq(d
, &opp
->dst
[i
].irqs
[j
]);
1605 opp
->dst
[i
].raised
.queue_size
= IRQQUEUE_SIZE_BITS
;
1606 opp
->dst
[i
].raised
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1607 opp
->dst
[i
].servicing
.queue_size
= IRQQUEUE_SIZE_BITS
;
1608 opp
->dst
[i
].servicing
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1611 sysbus_init_mmio(d
, &opp
->mem
);
1612 qdev_init_gpio_in(dev
, openpic_set_irq
, opp
->max_irq
);
1615 static Property openpic_properties
[] = {
1616 DEFINE_PROP_UINT32("model", OpenPICState
, model
, OPENPIC_MODEL_FSL_MPIC_20
),
1617 DEFINE_PROP_UINT32("nb_cpus", OpenPICState
, nb_cpus
, 1),
1618 DEFINE_PROP_END_OF_LIST(),
1621 static void openpic_class_init(ObjectClass
*oc
, void *data
)
1623 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1625 dc
->realize
= openpic_realize
;
1626 device_class_set_props(dc
, openpic_properties
);
1627 dc
->reset
= openpic_reset
;
1628 dc
->vmsd
= &vmstate_openpic
;
1629 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1632 static const TypeInfo openpic_info
= {
1633 .name
= TYPE_OPENPIC
,
1634 .parent
= TYPE_SYS_BUS_DEVICE
,
1635 .instance_size
= sizeof(OpenPICState
),
1636 .instance_init
= openpic_init
,
1637 .class_init
= openpic_class_init
,
1640 static void openpic_register_types(void)
1642 type_register_static(&openpic_info
);
1645 type_init(openpic_register_types
)