4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * Based on OpenPic implementations:
28 * - Intel GW80314 I/O companion chip developer's manual
29 * - Motorola MPC8245 & MPC8540 user manuals.
30 * - Motorola MCP750 (aka Raven) programmer manual.
31 * - Motorola Harrier programmer manuel
33 * Serial interrupts, as implemented in Raven chipset are not supported yet.
36 #include "qemu/osdep.h"
38 #include "hw/ppc/mac.h"
39 #include "hw/pci/pci.h"
40 #include "hw/ppc/openpic.h"
41 #include "hw/ppc/ppc_e500.h"
42 #include "hw/sysbus.h"
43 #include "hw/pci/msi.h"
44 #include "qapi/error.h"
45 #include "qemu/bitops.h"
46 #include "qapi/qmp/qerror.h"
48 #include "qemu/timer.h"
50 //#define DEBUG_OPENPIC
53 static const int debug_openpic
= 1;
55 static const int debug_openpic
= 0;
58 static int get_current_cpu(void);
59 #define DPRINTF(fmt, ...) do { \
60 if (debug_openpic) { \
61 printf("Core%d: ", get_current_cpu()); \
62 printf(fmt , ## __VA_ARGS__); \
68 #define VID 0x03 /* MPIC version ID */
70 /* OpenPIC capability flags */
71 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
72 #define OPENPIC_FLAG_ILR (2 << 0)
74 /* OpenPIC address map */
75 #define OPENPIC_GLB_REG_START 0x0
76 #define OPENPIC_GLB_REG_SIZE 0x10F0
77 #define OPENPIC_TMR_REG_START 0x10F0
78 #define OPENPIC_TMR_REG_SIZE 0x220
79 #define OPENPIC_MSI_REG_START 0x1600
80 #define OPENPIC_MSI_REG_SIZE 0x200
81 #define OPENPIC_SUMMARY_REG_START 0x3800
82 #define OPENPIC_SUMMARY_REG_SIZE 0x800
83 #define OPENPIC_SRC_REG_START 0x10000
84 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
85 #define OPENPIC_CPU_REG_START 0x20000
86 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
89 #define RAVEN_MAX_CPU 2
90 #define RAVEN_MAX_EXT 48
91 #define RAVEN_MAX_IRQ 64
92 #define RAVEN_MAX_TMR OPENPIC_MAX_TMR
93 #define RAVEN_MAX_IPI OPENPIC_MAX_IPI
95 /* Interrupt definitions */
96 #define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */
97 #define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */
98 #define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */
99 #define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */
100 /* First doorbell IRQ */
101 #define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI))
103 typedef struct FslMpicInfo
{
107 static FslMpicInfo fsl_mpic_20
= {
111 static FslMpicInfo fsl_mpic_42
= {
115 #define FRR_NIRQ_SHIFT 16
116 #define FRR_NCPU_SHIFT 8
117 #define FRR_VID_SHIFT 0
119 #define VID_REVISION_1_2 2
120 #define VID_REVISION_1_3 3
122 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
124 #define GCR_RESET 0x80000000
125 #define GCR_MODE_PASS 0x00000000
126 #define GCR_MODE_MIXED 0x20000000
127 #define GCR_MODE_PROXY 0x60000000
129 #define TBCR_CI 0x80000000 /* count inhibit */
130 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
132 #define IDR_EP_SHIFT 31
133 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
134 #define IDR_CI0_SHIFT 30
135 #define IDR_CI1_SHIFT 29
136 #define IDR_P1_SHIFT 1
137 #define IDR_P0_SHIFT 0
139 #define ILR_INTTGT_MASK 0x000000ff
140 #define ILR_INTTGT_INT 0x00
141 #define ILR_INTTGT_CINT 0x01 /* critical */
142 #define ILR_INTTGT_MCP 0x02 /* machine check */
144 /* The currently supported INTTGT values happen to be the same as QEMU's
145 * openpic output codes, but don't depend on this. The output codes
146 * could change (unlikely, but...) or support could be added for
147 * more INTTGT values.
149 static const int inttgt_output
[][2] = {
150 { ILR_INTTGT_INT
, OPENPIC_OUTPUT_INT
},
151 { ILR_INTTGT_CINT
, OPENPIC_OUTPUT_CINT
},
152 { ILR_INTTGT_MCP
, OPENPIC_OUTPUT_MCK
},
155 static int inttgt_to_output(int inttgt
)
159 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
160 if (inttgt_output
[i
][0] == inttgt
) {
161 return inttgt_output
[i
][1];
165 fprintf(stderr
, "%s: unsupported inttgt %d\n", __func__
, inttgt
);
166 return OPENPIC_OUTPUT_INT
;
169 static int output_to_inttgt(int output
)
173 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
174 if (inttgt_output
[i
][1] == output
) {
175 return inttgt_output
[i
][0];
182 #define MSIIR_OFFSET 0x140
183 #define MSIIR_SRS_SHIFT 29
184 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
185 #define MSIIR_IBS_SHIFT 24
186 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
188 static int get_current_cpu(void)
194 return current_cpu
->cpu_index
;
197 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
199 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
200 uint32_t val
, int idx
);
201 static void openpic_reset(DeviceState
*d
);
203 typedef enum IRQType
{
205 IRQ_TYPE_FSLINT
, /* FSL internal interrupt -- level only */
206 IRQ_TYPE_FSLSPECIAL
, /* FSL timer/IPI interrupt, edge, no polarity */
209 /* Round up to the nearest 64 IRQs so that the queue length
210 * won't change when moving between 32 and 64 bit hosts.
212 #define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63)
214 typedef struct IRQQueue
{
215 unsigned long *queue
;
216 int32_t queue_size
; /* Only used for VMSTATE_BITMAP */
221 typedef struct IRQSource
{
222 uint32_t ivpr
; /* IRQ vector/priority register */
223 uint32_t idr
; /* IRQ destination register */
224 uint32_t destmask
; /* bitmap of CPU destinations */
226 int output
; /* IRQ level, e.g. OPENPIC_OUTPUT_INT */
227 int pending
; /* TRUE if IRQ is pending */
229 bool level
:1; /* level-triggered */
230 bool nomask
:1; /* critical interrupts ignore mask on some FSL MPICs */
233 #define IVPR_MASK_SHIFT 31
234 #define IVPR_MASK_MASK (1U << IVPR_MASK_SHIFT)
235 #define IVPR_ACTIVITY_SHIFT 30
236 #define IVPR_ACTIVITY_MASK (1U << IVPR_ACTIVITY_SHIFT)
237 #define IVPR_MODE_SHIFT 29
238 #define IVPR_MODE_MASK (1U << IVPR_MODE_SHIFT)
239 #define IVPR_POLARITY_SHIFT 23
240 #define IVPR_POLARITY_MASK (1U << IVPR_POLARITY_SHIFT)
241 #define IVPR_SENSE_SHIFT 22
242 #define IVPR_SENSE_MASK (1U << IVPR_SENSE_SHIFT)
244 #define IVPR_PRIORITY_MASK (0xFU << 16)
245 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
246 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
248 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
249 #define IDR_EP 0x80000000 /* external pin */
250 #define IDR_CI 0x40000000 /* critical interrupt */
252 /* Convert between openpic clock ticks and nanosecs. In the hardware the clock
253 frequency is driven by board inputs to the PIC which the PIC would then
254 divide by 4 or 8. For now hard code to 25MZ.
256 #define OPENPIC_TIMER_FREQ_MHZ 25
257 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
258 static inline uint64_t ns_to_ticks(uint64_t ns
)
260 return ns
/ OPENPIC_TIMER_NS_PER_TICK
;
262 static inline uint64_t ticks_to_ns(uint64_t ticks
)
264 return ticks
* OPENPIC_TIMER_NS_PER_TICK
;
267 typedef struct OpenPICTimer
{
268 uint32_t tccr
; /* Global timer current count register */
269 uint32_t tbcr
; /* Global timer base count register */
271 bool qemu_timer_active
; /* Is the qemu_timer is running? */
272 struct QEMUTimer
*qemu_timer
;
273 struct OpenPICState
*opp
; /* Device timer is part of. */
274 /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
275 current_count written or read, only defined if qemu_timer_active. */
276 uint64_t origin_time
;
279 typedef struct OpenPICMSI
{
280 uint32_t msir
; /* Shared Message Signaled Interrupt Register */
283 typedef struct IRQDest
{
284 int32_t ctpr
; /* CPU current task priority */
289 /* Count of IRQ sources asserting on non-INT outputs */
290 uint32_t outputs_active
[OPENPIC_OUTPUT_NB
];
293 #define OPENPIC(obj) OBJECT_CHECK(OpenPICState, (obj), TYPE_OPENPIC)
295 typedef struct OpenPICState
{
297 SysBusDevice parent_obj
;
302 /* Behavior control */
308 uint32_t vir
; /* Vendor identification register */
309 uint32_t vector_mask
;
314 uint32_t mpic_mode_mask
;
317 MemoryRegion sub_io_mem
[6];
319 /* Global registers */
320 uint32_t frr
; /* Feature reporting register */
321 uint32_t gcr
; /* Global configuration register */
322 uint32_t pir
; /* Processor initialization register */
323 uint32_t spve
; /* Spurious vector register */
324 uint32_t tfrr
; /* Timer frequency reporting register */
325 /* Source registers */
326 IRQSource src
[OPENPIC_MAX_IRQ
];
327 /* Local registers per output pin */
328 IRQDest dst
[MAX_CPU
];
330 /* Timer registers */
331 OpenPICTimer timers
[OPENPIC_MAX_TMR
];
332 /* Shared MSI registers */
333 OpenPICMSI msi
[MAX_MSI
];
340 static inline void IRQ_setbit(IRQQueue
*q
, int n_IRQ
)
342 set_bit(n_IRQ
, q
->queue
);
345 static inline void IRQ_resetbit(IRQQueue
*q
, int n_IRQ
)
347 clear_bit(n_IRQ
, q
->queue
);
350 static void IRQ_check(OpenPICState
*opp
, IRQQueue
*q
)
357 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
358 if (irq
== opp
->max_irq
) {
362 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
363 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
365 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
367 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
372 q
->priority
= priority
;
375 static int IRQ_get_next(OpenPICState
*opp
, IRQQueue
*q
)
383 static void IRQ_local_pipe(OpenPICState
*opp
, int n_CPU
, int n_IRQ
,
384 bool active
, bool was_active
)
390 dst
= &opp
->dst
[n_CPU
];
391 src
= &opp
->src
[n_IRQ
];
393 DPRINTF("%s: IRQ %d active %d was %d\n",
394 __func__
, n_IRQ
, active
, was_active
);
396 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
397 DPRINTF("%s: output %d irq %d active %d was %d count %d\n",
398 __func__
, src
->output
, n_IRQ
, active
, was_active
,
399 dst
->outputs_active
[src
->output
]);
401 /* On Freescale MPIC, critical interrupts ignore priority,
402 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
406 if (!was_active
&& dst
->outputs_active
[src
->output
]++ == 0) {
407 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d\n",
408 __func__
, src
->output
, n_CPU
, n_IRQ
);
409 qemu_irq_raise(dst
->irqs
[src
->output
]);
412 if (was_active
&& --dst
->outputs_active
[src
->output
] == 0) {
413 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d\n",
414 __func__
, src
->output
, n_CPU
, n_IRQ
);
415 qemu_irq_lower(dst
->irqs
[src
->output
]);
422 priority
= IVPR_PRIORITY(src
->ivpr
);
424 /* Even if the interrupt doesn't have enough priority,
425 * it is still raised, in case ctpr is lowered later.
428 IRQ_setbit(&dst
->raised
, n_IRQ
);
430 IRQ_resetbit(&dst
->raised
, n_IRQ
);
433 IRQ_check(opp
, &dst
->raised
);
435 if (active
&& priority
<= dst
->ctpr
) {
436 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
437 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
442 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
443 priority
<= dst
->servicing
.priority
) {
444 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
445 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
447 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
448 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
449 qemu_irq_raise(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
452 IRQ_get_next(opp
, &dst
->servicing
);
453 if (dst
->raised
.priority
> dst
->ctpr
&&
454 dst
->raised
.priority
> dst
->servicing
.priority
) {
455 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
456 __func__
, n_IRQ
, dst
->raised
.next
, dst
->raised
.priority
,
457 dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
458 /* IRQ line stays asserted */
460 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
461 __func__
, n_IRQ
, dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
462 qemu_irq_lower(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
467 /* update pic state because registers for n_IRQ have changed value */
468 static void openpic_update_irq(OpenPICState
*opp
, int n_IRQ
)
471 bool active
, was_active
;
474 src
= &opp
->src
[n_IRQ
];
475 active
= src
->pending
;
477 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
478 /* Interrupt source is disabled */
479 DPRINTF("%s: IRQ %d is disabled\n", __func__
, n_IRQ
);
483 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
486 * We don't have a similar check for already-active because
487 * ctpr may have changed and we need to withdraw the interrupt.
489 if (!active
&& !was_active
) {
490 DPRINTF("%s: IRQ %d is already inactive\n", __func__
, n_IRQ
);
495 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
497 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
500 if (src
->destmask
== 0) {
502 DPRINTF("%s: IRQ %d has no target\n", __func__
, n_IRQ
);
506 if (src
->destmask
== (1 << src
->last_cpu
)) {
507 /* Only one CPU is allowed to receive this IRQ */
508 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
509 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
510 /* Directed delivery mode */
511 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
512 if (src
->destmask
& (1 << i
)) {
513 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
517 /* Distributed delivery mode */
518 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
519 if (i
== opp
->nb_cpus
) {
522 if (src
->destmask
& (1 << i
)) {
523 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
531 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
533 OpenPICState
*opp
= opaque
;
536 if (n_IRQ
>= OPENPIC_MAX_IRQ
) {
537 fprintf(stderr
, "%s: IRQ %d out of range\n", __func__
, n_IRQ
);
541 src
= &opp
->src
[n_IRQ
];
542 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x\n",
543 n_IRQ
, level
, src
->ivpr
);
545 /* level-sensitive irq */
546 src
->pending
= level
;
547 openpic_update_irq(opp
, n_IRQ
);
549 /* edge-sensitive irq */
552 openpic_update_irq(opp
, n_IRQ
);
555 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
556 /* Edge-triggered interrupts shouldn't be used
557 * with non-INT delivery, but just in case,
558 * try to make it do something sane rather than
559 * cause an interrupt storm. This is close to
560 * what you'd probably see happen in real hardware.
563 openpic_update_irq(opp
, n_IRQ
);
568 static inline uint32_t read_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
)
570 return opp
->src
[n_IRQ
].idr
;
573 static inline uint32_t read_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
)
575 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
576 return output_to_inttgt(opp
->src
[n_IRQ
].output
);
582 static inline uint32_t read_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
)
584 return opp
->src
[n_IRQ
].ivpr
;
587 static inline void write_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
589 IRQSource
*src
= &opp
->src
[n_IRQ
];
590 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
591 uint32_t crit_mask
= 0;
592 uint32_t mask
= normal_mask
;
593 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
596 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
597 crit_mask
= mask
<< crit_shift
;
598 mask
|= crit_mask
| IDR_EP
;
601 src
->idr
= val
& mask
;
602 DPRINTF("Set IDR %d to 0x%08x\n", n_IRQ
, src
->idr
);
604 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
605 if (src
->idr
& crit_mask
) {
606 if (src
->idr
& normal_mask
) {
607 DPRINTF("%s: IRQ configured for multiple output types, using "
608 "critical\n", __func__
);
611 src
->output
= OPENPIC_OUTPUT_CINT
;
615 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
616 int n_ci
= IDR_CI0_SHIFT
- i
;
618 if (src
->idr
& (1UL << n_ci
)) {
619 src
->destmask
|= 1UL << i
;
623 src
->output
= OPENPIC_OUTPUT_INT
;
625 src
->destmask
= src
->idr
& normal_mask
;
628 src
->destmask
= src
->idr
;
632 static inline void write_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
634 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
635 IRQSource
*src
= &opp
->src
[n_IRQ
];
637 src
->output
= inttgt_to_output(val
& ILR_INTTGT_MASK
);
638 DPRINTF("Set ILR %d to 0x%08x, output %d\n", n_IRQ
, src
->idr
,
641 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
645 static inline void write_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
649 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
650 * the polarity bit is read-only on internal interrupts.
652 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
653 IVPR_POLARITY_MASK
| opp
->vector_mask
;
655 /* ACTIVITY bit is read-only */
656 opp
->src
[n_IRQ
].ivpr
=
657 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
659 /* For FSL internal interrupts, The sense bit is reserved and zero,
660 * and the interrupt is always level-triggered. Timers and IPIs
661 * have no sense or polarity bits, and are edge-triggered.
663 switch (opp
->src
[n_IRQ
].type
) {
664 case IRQ_TYPE_NORMAL
:
665 opp
->src
[n_IRQ
].level
= !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
668 case IRQ_TYPE_FSLINT
:
669 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
672 case IRQ_TYPE_FSLSPECIAL
:
673 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
677 openpic_update_irq(opp
, n_IRQ
);
678 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ
, val
,
679 opp
->src
[n_IRQ
].ivpr
);
682 static void openpic_gcr_write(OpenPICState
*opp
, uint64_t val
)
684 bool mpic_proxy
= false;
686 if (val
& GCR_RESET
) {
687 openpic_reset(DEVICE(opp
));
691 opp
->gcr
&= ~opp
->mpic_mode_mask
;
692 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
694 /* Set external proxy mode */
695 if ((val
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
) {
699 ppce500_set_mpic_proxy(mpic_proxy
);
702 static void openpic_gbl_write(void *opaque
, hwaddr addr
, uint64_t val
,
705 OpenPICState
*opp
= opaque
;
709 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
"\n",
710 __func__
, addr
, val
);
715 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
725 openpic_cpu_write_internal(opp
, addr
, val
, get_current_cpu());
727 case 0x1000: /* FRR */
729 case 0x1020: /* GCR */
730 openpic_gcr_write(opp
, val
);
732 case 0x1080: /* VIR */
734 case 0x1090: /* PIR */
735 for (idx
= 0; idx
< opp
->nb_cpus
; idx
++) {
736 if ((val
& (1 << idx
)) && !(opp
->pir
& (1 << idx
))) {
737 DPRINTF("Raise OpenPIC RESET output for CPU %d\n", idx
);
738 dst
= &opp
->dst
[idx
];
739 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
740 } else if (!(val
& (1 << idx
)) && (opp
->pir
& (1 << idx
))) {
741 DPRINTF("Lower OpenPIC RESET output for CPU %d\n", idx
);
742 dst
= &opp
->dst
[idx
];
743 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
748 case 0x10A0: /* IPI_IVPR */
754 idx
= (addr
- 0x10A0) >> 4;
755 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
758 case 0x10E0: /* SPVE */
759 opp
->spve
= val
& opp
->vector_mask
;
766 static uint64_t openpic_gbl_read(void *opaque
, hwaddr addr
, unsigned len
)
768 OpenPICState
*opp
= opaque
;
771 DPRINTF("%s: addr %#" HWADDR_PRIx
"\n", __func__
, addr
);
777 case 0x1000: /* FRR */
780 case 0x1020: /* GCR */
783 case 0x1080: /* VIR */
786 case 0x1090: /* PIR */
789 case 0x00: /* Block Revision Register1 (BRR1) */
800 retval
= openpic_cpu_read_internal(opp
, addr
, get_current_cpu());
802 case 0x10A0: /* IPI_IVPR */
808 idx
= (addr
- 0x10A0) >> 4;
809 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
812 case 0x10E0: /* SPVE */
818 DPRINTF("%s: => 0x%08x\n", __func__
, retval
);
823 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
);
825 static void qemu_timer_cb(void *opaque
)
827 OpenPICTimer
*tmr
= opaque
;
828 OpenPICState
*opp
= tmr
->opp
;
829 uint32_t n_IRQ
= tmr
->n_IRQ
;
830 uint32_t val
= tmr
->tbcr
& ~TBCR_CI
;
831 uint32_t tog
= ((tmr
->tccr
& TCCR_TOG
) ^ TCCR_TOG
); /* invert toggle. */
833 DPRINTF("%s n_IRQ=%d\n", __func__
, n_IRQ
);
834 /* Reload current count from base count and setup timer. */
835 tmr
->tccr
= val
| tog
;
836 openpic_tmr_set_tmr(tmr
, val
, /*enabled=*/true);
837 /* Raise the interrupt. */
838 opp
->src
[n_IRQ
].destmask
= read_IRQreg_idr(opp
, n_IRQ
);
839 openpic_set_irq(opp
, n_IRQ
, 1);
840 openpic_set_irq(opp
, n_IRQ
, 0);
843 /* If enabled is true, arranges for an interrupt to be raised val clocks into
844 the future, if enabled is false cancels the timer. */
845 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
)
847 uint64_t ns
= ticks_to_ns(val
& ~TCCR_TOG
);
848 /* A count of zero causes a timer to be set to expire immediately. This
849 effectively stops the simulation since the timer is constantly expiring
850 which prevents guest code execution, so we don't honor that
851 configuration. On real hardware, this situation would generate an
852 interrupt on every clock cycle if the interrupt was unmasked. */
853 if ((ns
== 0) || !enabled
) {
854 tmr
->qemu_timer_active
= false;
855 tmr
->tccr
= tmr
->tccr
& TCCR_TOG
;
856 timer_del(tmr
->qemu_timer
); /* set timer to never expire. */
858 tmr
->qemu_timer_active
= true;
859 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
860 tmr
->origin_time
= now
;
861 timer_mod(tmr
->qemu_timer
, now
+ ns
); /* set timer expiration. */
865 /* Returns the currrent tccr value, i.e., timer value (in clocks) with
867 static uint64_t openpic_tmr_get_timer(OpenPICTimer
*tmr
)
870 if (!tmr
->qemu_timer_active
) {
873 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
874 uint64_t used
= now
- tmr
->origin_time
; /* nsecs */
875 uint32_t used_ticks
= (uint32_t)ns_to_ticks(used
);
876 uint32_t count
= (tmr
->tccr
& ~TCCR_TOG
) - used_ticks
;
877 retval
= (uint32_t)((tmr
->tccr
& TCCR_TOG
) | (count
& ~TCCR_TOG
));
882 static void openpic_tmr_write(void *opaque
, hwaddr addr
, uint64_t val
,
885 OpenPICState
*opp
= opaque
;
888 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
"\n",
889 __func__
, (addr
+ 0x10f0), val
);
899 addr
-= 0x10; /* correct for TFRR */
900 idx
= (addr
>> 6) & 0x3;
902 switch (addr
& 0x30) {
903 case 0x00: /* TCCR */
905 case 0x10: /* TBCR */
906 /* Did the enable status change? */
907 if ((opp
->timers
[idx
].tbcr
& TBCR_CI
) != (val
& TBCR_CI
)) {
908 /* Did "Count Inhibit" transition from 1 to 0? */
909 if ((val
& TBCR_CI
) == 0) {
910 opp
->timers
[idx
].tccr
= val
& ~TCCR_TOG
;
912 openpic_tmr_set_tmr(&opp
->timers
[idx
],
914 /*enabled=*/((val
& TBCR_CI
) == 0));
916 opp
->timers
[idx
].tbcr
= val
;
918 case 0x20: /* TVPR */
919 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
922 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
927 static uint64_t openpic_tmr_read(void *opaque
, hwaddr addr
, unsigned len
)
929 OpenPICState
*opp
= opaque
;
930 uint32_t retval
= -1;
933 DPRINTF("%s: addr %#" HWADDR_PRIx
"\n", __func__
, addr
+ 0x10f0);
942 addr
-= 0x10; /* correct for TFRR */
943 idx
= (addr
>> 6) & 0x3;
944 switch (addr
& 0x30) {
945 case 0x00: /* TCCR */
946 retval
= openpic_tmr_get_timer(&opp
->timers
[idx
]);
948 case 0x10: /* TBCR */
949 retval
= opp
->timers
[idx
].tbcr
;
951 case 0x20: /* TVPR */
952 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
955 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
960 DPRINTF("%s: => 0x%08x\n", __func__
, retval
);
965 static void openpic_src_write(void *opaque
, hwaddr addr
, uint64_t val
,
968 OpenPICState
*opp
= opaque
;
971 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
"\n",
972 __func__
, addr
, val
);
974 addr
= addr
& 0xffff;
977 switch (addr
& 0x1f) {
979 write_IRQreg_ivpr(opp
, idx
, val
);
982 write_IRQreg_idr(opp
, idx
, val
);
985 write_IRQreg_ilr(opp
, idx
, val
);
990 static uint64_t openpic_src_read(void *opaque
, uint64_t addr
, unsigned len
)
992 OpenPICState
*opp
= opaque
;
996 DPRINTF("%s: addr %#" HWADDR_PRIx
"\n", __func__
, addr
);
999 addr
= addr
& 0xffff;
1002 switch (addr
& 0x1f) {
1004 retval
= read_IRQreg_ivpr(opp
, idx
);
1007 retval
= read_IRQreg_idr(opp
, idx
);
1010 retval
= read_IRQreg_ilr(opp
, idx
);
1014 DPRINTF("%s: => 0x%08x\n", __func__
, retval
);
1018 static void openpic_msi_write(void *opaque
, hwaddr addr
, uint64_t val
,
1021 OpenPICState
*opp
= opaque
;
1022 int idx
= opp
->irq_msi
;
1025 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
"\n",
1026 __func__
, addr
, val
);
1033 srs
= val
>> MSIIR_SRS_SHIFT
;
1035 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
1036 opp
->msi
[srs
].msir
|= 1 << ibs
;
1037 openpic_set_irq(opp
, idx
, 1);
1040 /* most registers are read-only, thus ignored */
1045 static uint64_t openpic_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
1047 OpenPICState
*opp
= opaque
;
1051 DPRINTF("%s: addr %#" HWADDR_PRIx
"\n", __func__
, addr
);
1066 case 0x70: /* MSIRs */
1067 r
= opp
->msi
[srs
].msir
;
1069 opp
->msi
[srs
].msir
= 0;
1070 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
1072 case 0x120: /* MSISR */
1073 for (i
= 0; i
< MAX_MSI
; i
++) {
1074 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
1082 static uint64_t openpic_summary_read(void *opaque
, hwaddr addr
, unsigned size
)
1086 DPRINTF("%s: addr %#" HWADDR_PRIx
"\n", __func__
, addr
);
1088 /* TODO: EISR/EIMR */
1093 static void openpic_summary_write(void *opaque
, hwaddr addr
, uint64_t val
,
1096 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
"\n",
1097 __func__
, addr
, val
);
1099 /* TODO: EISR/EIMR */
1102 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
1103 uint32_t val
, int idx
)
1105 OpenPICState
*opp
= opaque
;
1110 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
" <= 0x%08x\n", __func__
, idx
,
1113 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
1120 dst
= &opp
->dst
[idx
];
1123 case 0x40: /* IPIDR */
1127 idx
= (addr
- 0x40) >> 4;
1128 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1129 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
1130 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
1131 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
1133 case 0x80: /* CTPR */
1134 dst
->ctpr
= val
& 0x0000000F;
1136 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1137 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
1138 dst
->servicing
.priority
);
1140 if (dst
->raised
.priority
<= dst
->ctpr
) {
1141 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1143 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1144 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1145 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1146 __func__
, idx
, dst
->raised
.next
);
1147 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1151 case 0x90: /* WHOAMI */
1152 /* Read-only register */
1154 case 0xA0: /* IACK */
1155 /* Read-only register */
1157 case 0xB0: /* EOI */
1159 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1162 DPRINTF("%s: EOI with no interrupt in service\n", __func__
);
1166 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1167 /* Set up next servicing IRQ */
1168 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1169 /* Check queued interrupts. */
1170 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1171 src
= &opp
->src
[n_IRQ
];
1174 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1175 DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n",
1177 qemu_irq_raise(opp
->dst
[idx
].irqs
[OPENPIC_OUTPUT_INT
]);
1185 static void openpic_cpu_write(void *opaque
, hwaddr addr
, uint64_t val
,
1188 openpic_cpu_write_internal(opaque
, addr
, val
, (addr
& 0x1f000) >> 12);
1192 static uint32_t openpic_iack(OpenPICState
*opp
, IRQDest
*dst
, int cpu
)
1197 DPRINTF("Lower OpenPIC INT output\n");
1198 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1200 irq
= IRQ_get_next(opp
, &dst
->raised
);
1201 DPRINTF("IACK: irq=%d\n", irq
);
1204 /* No more interrupt pending */
1208 src
= &opp
->src
[irq
];
1209 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1210 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1211 fprintf(stderr
, "%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1212 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1213 openpic_update_irq(opp
, irq
);
1216 /* IRQ enter servicing state */
1217 IRQ_setbit(&dst
->servicing
, irq
);
1218 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1222 /* edge-sensitive IRQ */
1223 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1225 IRQ_resetbit(&dst
->raised
, irq
);
1228 /* Timers and IPIs support multicast. */
1229 if (((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ OPENPIC_MAX_IPI
))) ||
1230 ((irq
>= opp
->irq_tim0
) && (irq
< (opp
->irq_tim0
+ OPENPIC_MAX_TMR
)))) {
1231 DPRINTF("irq is IPI or TMR\n");
1232 src
->destmask
&= ~(1 << cpu
);
1233 if (src
->destmask
&& !src
->level
) {
1234 /* trigger on CPUs that didn't know about it yet */
1235 openpic_set_irq(opp
, irq
, 1);
1236 openpic_set_irq(opp
, irq
, 0);
1237 /* if all CPUs knew about it, set active bit again */
1238 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1245 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
1248 OpenPICState
*opp
= opaque
;
1252 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
"\n", __func__
, idx
, addr
);
1253 retval
= 0xFFFFFFFF;
1255 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
1262 dst
= &opp
->dst
[idx
];
1265 case 0x80: /* CTPR */
1268 case 0x90: /* WHOAMI */
1271 case 0xA0: /* IACK */
1272 retval
= openpic_iack(opp
, dst
, idx
);
1274 case 0xB0: /* EOI */
1280 DPRINTF("%s: => 0x%08x\n", __func__
, retval
);
1285 static uint64_t openpic_cpu_read(void *opaque
, hwaddr addr
, unsigned len
)
1287 return openpic_cpu_read_internal(opaque
, addr
, (addr
& 0x1f000) >> 12);
1290 static const MemoryRegionOps openpic_glb_ops_le
= {
1291 .write
= openpic_gbl_write
,
1292 .read
= openpic_gbl_read
,
1293 .endianness
= DEVICE_LITTLE_ENDIAN
,
1295 .min_access_size
= 4,
1296 .max_access_size
= 4,
1300 static const MemoryRegionOps openpic_glb_ops_be
= {
1301 .write
= openpic_gbl_write
,
1302 .read
= openpic_gbl_read
,
1303 .endianness
= DEVICE_BIG_ENDIAN
,
1305 .min_access_size
= 4,
1306 .max_access_size
= 4,
1310 static const MemoryRegionOps openpic_tmr_ops_le
= {
1311 .write
= openpic_tmr_write
,
1312 .read
= openpic_tmr_read
,
1313 .endianness
= DEVICE_LITTLE_ENDIAN
,
1315 .min_access_size
= 4,
1316 .max_access_size
= 4,
1320 static const MemoryRegionOps openpic_tmr_ops_be
= {
1321 .write
= openpic_tmr_write
,
1322 .read
= openpic_tmr_read
,
1323 .endianness
= DEVICE_BIG_ENDIAN
,
1325 .min_access_size
= 4,
1326 .max_access_size
= 4,
1330 static const MemoryRegionOps openpic_cpu_ops_le
= {
1331 .write
= openpic_cpu_write
,
1332 .read
= openpic_cpu_read
,
1333 .endianness
= DEVICE_LITTLE_ENDIAN
,
1335 .min_access_size
= 4,
1336 .max_access_size
= 4,
1340 static const MemoryRegionOps openpic_cpu_ops_be
= {
1341 .write
= openpic_cpu_write
,
1342 .read
= openpic_cpu_read
,
1343 .endianness
= DEVICE_BIG_ENDIAN
,
1345 .min_access_size
= 4,
1346 .max_access_size
= 4,
1350 static const MemoryRegionOps openpic_src_ops_le
= {
1351 .write
= openpic_src_write
,
1352 .read
= openpic_src_read
,
1353 .endianness
= DEVICE_LITTLE_ENDIAN
,
1355 .min_access_size
= 4,
1356 .max_access_size
= 4,
1360 static const MemoryRegionOps openpic_src_ops_be
= {
1361 .write
= openpic_src_write
,
1362 .read
= openpic_src_read
,
1363 .endianness
= DEVICE_BIG_ENDIAN
,
1365 .min_access_size
= 4,
1366 .max_access_size
= 4,
1370 static const MemoryRegionOps openpic_msi_ops_be
= {
1371 .read
= openpic_msi_read
,
1372 .write
= openpic_msi_write
,
1373 .endianness
= DEVICE_BIG_ENDIAN
,
1375 .min_access_size
= 4,
1376 .max_access_size
= 4,
1380 static const MemoryRegionOps openpic_summary_ops_be
= {
1381 .read
= openpic_summary_read
,
1382 .write
= openpic_summary_write
,
1383 .endianness
= DEVICE_BIG_ENDIAN
,
1385 .min_access_size
= 4,
1386 .max_access_size
= 4,
1390 static void openpic_reset(DeviceState
*d
)
1392 OpenPICState
*opp
= OPENPIC(d
);
1395 opp
->gcr
= GCR_RESET
;
1396 /* Initialise controller registers */
1397 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
1398 ((opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
) |
1399 (opp
->vid
<< FRR_VID_SHIFT
);
1402 opp
->spve
= -1 & opp
->vector_mask
;
1403 opp
->tfrr
= opp
->tfrr_reset
;
1404 /* Initialise IRQ sources */
1405 for (i
= 0; i
< opp
->max_irq
; i
++) {
1406 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
1407 switch (opp
->src
[i
].type
) {
1408 case IRQ_TYPE_NORMAL
:
1409 opp
->src
[i
].level
= !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
1412 case IRQ_TYPE_FSLINT
:
1413 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
1416 case IRQ_TYPE_FSLSPECIAL
:
1420 write_IRQreg_idr(opp
, i
, opp
->idr_reset
);
1422 /* Initialise IRQ destinations */
1423 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1424 opp
->dst
[i
].ctpr
= 15;
1425 opp
->dst
[i
].raised
.next
= -1;
1426 opp
->dst
[i
].raised
.priority
= 0;
1427 bitmap_clear(opp
->dst
[i
].raised
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1428 opp
->dst
[i
].servicing
.next
= -1;
1429 opp
->dst
[i
].servicing
.priority
= 0;
1430 bitmap_clear(opp
->dst
[i
].servicing
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1432 /* Initialise timers */
1433 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1434 opp
->timers
[i
].tccr
= 0;
1435 opp
->timers
[i
].tbcr
= TBCR_CI
;
1436 if (opp
->timers
[i
].qemu_timer_active
) {
1437 timer_del(opp
->timers
[i
].qemu_timer
); /* Inhibit timer */
1438 opp
->timers
[i
].qemu_timer_active
= false;
1441 /* Go out of RESET state */
1445 typedef struct MemReg
{
1447 MemoryRegionOps
const *ops
;
1452 static void fsl_common_init(OpenPICState
*opp
)
1455 int virq
= OPENPIC_MAX_SRC
;
1457 opp
->vid
= VID_REVISION_1_2
;
1458 opp
->vir
= VIR_GENERIC
;
1459 opp
->vector_mask
= 0xFFFF;
1460 opp
->tfrr_reset
= 0;
1461 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1462 opp
->idr_reset
= 1 << 0;
1463 opp
->max_irq
= OPENPIC_MAX_IRQ
;
1465 opp
->irq_ipi0
= virq
;
1466 virq
+= OPENPIC_MAX_IPI
;
1467 opp
->irq_tim0
= virq
;
1468 virq
+= OPENPIC_MAX_TMR
;
1470 assert(virq
<= OPENPIC_MAX_IRQ
);
1474 msi_nonbroken
= true;
1475 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++) {
1476 opp
->src
[i
].level
= false;
1479 /* Internal interrupts, including message and MSI */
1480 for (i
= 16; i
< OPENPIC_MAX_SRC
; i
++) {
1481 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1482 opp
->src
[i
].level
= true;
1485 /* timers and IPIs */
1486 for (i
= OPENPIC_MAX_SRC
; i
< virq
; i
++) {
1487 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1488 opp
->src
[i
].level
= false;
1491 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1492 opp
->timers
[i
].n_IRQ
= opp
->irq_tim0
+ i
;
1493 opp
->timers
[i
].qemu_timer_active
= false;
1494 opp
->timers
[i
].qemu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1497 opp
->timers
[i
].opp
= opp
;
1501 static void map_list(OpenPICState
*opp
, const MemReg
*list
, int *count
)
1503 while (list
->name
) {
1504 assert(*count
< ARRAY_SIZE(opp
->sub_io_mem
));
1506 memory_region_init_io(&opp
->sub_io_mem
[*count
], OBJECT(opp
), list
->ops
,
1507 opp
, list
->name
, list
->size
);
1509 memory_region_add_subregion(&opp
->mem
, list
->start_addr
,
1510 &opp
->sub_io_mem
[*count
]);
1517 static const VMStateDescription vmstate_openpic_irq_queue
= {
1518 .name
= "openpic_irq_queue",
1520 .minimum_version_id
= 0,
1521 .fields
= (VMStateField
[]) {
1522 VMSTATE_BITMAP(queue
, IRQQueue
, 0, queue_size
),
1523 VMSTATE_INT32(next
, IRQQueue
),
1524 VMSTATE_INT32(priority
, IRQQueue
),
1525 VMSTATE_END_OF_LIST()
1529 static const VMStateDescription vmstate_openpic_irqdest
= {
1530 .name
= "openpic_irqdest",
1532 .minimum_version_id
= 0,
1533 .fields
= (VMStateField
[]) {
1534 VMSTATE_INT32(ctpr
, IRQDest
),
1535 VMSTATE_STRUCT(raised
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1537 VMSTATE_STRUCT(servicing
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1539 VMSTATE_UINT32_ARRAY(outputs_active
, IRQDest
, OPENPIC_OUTPUT_NB
),
1540 VMSTATE_END_OF_LIST()
1544 static const VMStateDescription vmstate_openpic_irqsource
= {
1545 .name
= "openpic_irqsource",
1547 .minimum_version_id
= 0,
1548 .fields
= (VMStateField
[]) {
1549 VMSTATE_UINT32(ivpr
, IRQSource
),
1550 VMSTATE_UINT32(idr
, IRQSource
),
1551 VMSTATE_UINT32(destmask
, IRQSource
),
1552 VMSTATE_INT32(last_cpu
, IRQSource
),
1553 VMSTATE_INT32(pending
, IRQSource
),
1554 VMSTATE_END_OF_LIST()
1558 static const VMStateDescription vmstate_openpic_timer
= {
1559 .name
= "openpic_timer",
1561 .minimum_version_id
= 0,
1562 .fields
= (VMStateField
[]) {
1563 VMSTATE_UINT32(tccr
, OpenPICTimer
),
1564 VMSTATE_UINT32(tbcr
, OpenPICTimer
),
1565 VMSTATE_END_OF_LIST()
1569 static const VMStateDescription vmstate_openpic_msi
= {
1570 .name
= "openpic_msi",
1572 .minimum_version_id
= 0,
1573 .fields
= (VMStateField
[]) {
1574 VMSTATE_UINT32(msir
, OpenPICMSI
),
1575 VMSTATE_END_OF_LIST()
1579 static int openpic_post_load(void *opaque
, int version_id
)
1581 OpenPICState
*opp
= (OpenPICState
*)opaque
;
1584 /* Update internal ivpr and idr variables */
1585 for (i
= 0; i
< opp
->max_irq
; i
++) {
1586 write_IRQreg_idr(opp
, i
, opp
->src
[i
].idr
);
1587 write_IRQreg_ivpr(opp
, i
, opp
->src
[i
].ivpr
);
1593 static const VMStateDescription vmstate_openpic
= {
1596 .minimum_version_id
= 3,
1597 .post_load
= openpic_post_load
,
1598 .fields
= (VMStateField
[]) {
1599 VMSTATE_UINT32(gcr
, OpenPICState
),
1600 VMSTATE_UINT32(vir
, OpenPICState
),
1601 VMSTATE_UINT32(pir
, OpenPICState
),
1602 VMSTATE_UINT32(spve
, OpenPICState
),
1603 VMSTATE_UINT32(tfrr
, OpenPICState
),
1604 VMSTATE_UINT32(max_irq
, OpenPICState
),
1605 VMSTATE_STRUCT_VARRAY_UINT32(src
, OpenPICState
, max_irq
, 0,
1606 vmstate_openpic_irqsource
, IRQSource
),
1607 VMSTATE_UINT32_EQUAL(nb_cpus
, OpenPICState
, NULL
),
1608 VMSTATE_STRUCT_VARRAY_UINT32(dst
, OpenPICState
, nb_cpus
, 0,
1609 vmstate_openpic_irqdest
, IRQDest
),
1610 VMSTATE_STRUCT_ARRAY(timers
, OpenPICState
, OPENPIC_MAX_TMR
, 0,
1611 vmstate_openpic_timer
, OpenPICTimer
),
1612 VMSTATE_STRUCT_ARRAY(msi
, OpenPICState
, MAX_MSI
, 0,
1613 vmstate_openpic_msi
, OpenPICMSI
),
1614 VMSTATE_UINT32(irq_ipi0
, OpenPICState
),
1615 VMSTATE_UINT32(irq_tim0
, OpenPICState
),
1616 VMSTATE_UINT32(irq_msi
, OpenPICState
),
1617 VMSTATE_END_OF_LIST()
1621 static void openpic_init(Object
*obj
)
1623 OpenPICState
*opp
= OPENPIC(obj
);
1625 memory_region_init(&opp
->mem
, obj
, "openpic", 0x40000);
1628 static void openpic_realize(DeviceState
*dev
, Error
**errp
)
1630 SysBusDevice
*d
= SYS_BUS_DEVICE(dev
);
1631 OpenPICState
*opp
= OPENPIC(dev
);
1634 static const MemReg list_le
[] = {
1635 {"glb", &openpic_glb_ops_le
,
1636 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1637 {"tmr", &openpic_tmr_ops_le
,
1638 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1639 {"src", &openpic_src_ops_le
,
1640 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1641 {"cpu", &openpic_cpu_ops_le
,
1642 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1645 static const MemReg list_be
[] = {
1646 {"glb", &openpic_glb_ops_be
,
1647 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1648 {"tmr", &openpic_tmr_ops_be
,
1649 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1650 {"src", &openpic_src_ops_be
,
1651 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1652 {"cpu", &openpic_cpu_ops_be
,
1653 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1656 static const MemReg list_fsl
[] = {
1657 {"msi", &openpic_msi_ops_be
,
1658 OPENPIC_MSI_REG_START
, OPENPIC_MSI_REG_SIZE
},
1659 {"summary", &openpic_summary_ops_be
,
1660 OPENPIC_SUMMARY_REG_START
, OPENPIC_SUMMARY_REG_SIZE
},
1664 if (opp
->nb_cpus
> MAX_CPU
) {
1665 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
,
1666 TYPE_OPENPIC
, "nb_cpus", (uint64_t)opp
->nb_cpus
,
1667 (uint64_t)0, (uint64_t)MAX_CPU
);
1671 switch (opp
->model
) {
1672 case OPENPIC_MODEL_FSL_MPIC_20
:
1674 opp
->fsl
= &fsl_mpic_20
;
1675 opp
->brr1
= 0x00400200;
1676 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1678 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1680 fsl_common_init(opp
);
1681 map_list(opp
, list_be
, &list_count
);
1682 map_list(opp
, list_fsl
, &list_count
);
1686 case OPENPIC_MODEL_FSL_MPIC_42
:
1687 opp
->fsl
= &fsl_mpic_42
;
1688 opp
->brr1
= 0x00400402;
1689 opp
->flags
|= OPENPIC_FLAG_ILR
;
1691 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1693 fsl_common_init(opp
);
1694 map_list(opp
, list_be
, &list_count
);
1695 map_list(opp
, list_fsl
, &list_count
);
1699 case OPENPIC_MODEL_RAVEN
:
1700 opp
->nb_irqs
= RAVEN_MAX_EXT
;
1701 opp
->vid
= VID_REVISION_1_3
;
1702 opp
->vir
= VIR_GENERIC
;
1703 opp
->vector_mask
= 0xFF;
1704 opp
->tfrr_reset
= 4160000;
1705 opp
->ivpr_reset
= IVPR_MASK_MASK
| IVPR_MODE_MASK
;
1707 opp
->max_irq
= RAVEN_MAX_IRQ
;
1708 opp
->irq_ipi0
= RAVEN_IPI_IRQ
;
1709 opp
->irq_tim0
= RAVEN_TMR_IRQ
;
1711 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1713 if (opp
->nb_cpus
!= 1) {
1714 error_setg(errp
, "Only UP supported today");
1718 map_list(opp
, list_le
, &list_count
);
1722 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1723 opp
->dst
[i
].irqs
= g_new0(qemu_irq
, OPENPIC_OUTPUT_NB
);
1724 for (j
= 0; j
< OPENPIC_OUTPUT_NB
; j
++) {
1725 sysbus_init_irq(d
, &opp
->dst
[i
].irqs
[j
]);
1728 opp
->dst
[i
].raised
.queue_size
= IRQQUEUE_SIZE_BITS
;
1729 opp
->dst
[i
].raised
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1730 opp
->dst
[i
].servicing
.queue_size
= IRQQUEUE_SIZE_BITS
;
1731 opp
->dst
[i
].servicing
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1734 sysbus_init_mmio(d
, &opp
->mem
);
1735 qdev_init_gpio_in(dev
, openpic_set_irq
, opp
->max_irq
);
1738 static Property openpic_properties
[] = {
1739 DEFINE_PROP_UINT32("model", OpenPICState
, model
, OPENPIC_MODEL_FSL_MPIC_20
),
1740 DEFINE_PROP_UINT32("nb_cpus", OpenPICState
, nb_cpus
, 1),
1741 DEFINE_PROP_END_OF_LIST(),
1744 static void openpic_class_init(ObjectClass
*oc
, void *data
)
1746 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1748 dc
->realize
= openpic_realize
;
1749 dc
->props
= openpic_properties
;
1750 dc
->reset
= openpic_reset
;
1751 dc
->vmsd
= &vmstate_openpic
;
1752 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1755 static const TypeInfo openpic_info
= {
1756 .name
= TYPE_OPENPIC
,
1757 .parent
= TYPE_SYS_BUS_DEVICE
,
1758 .instance_size
= sizeof(OpenPICState
),
1759 .instance_init
= openpic_init
,
1760 .class_init
= openpic_class_init
,
1763 static void openpic_register_types(void)
1765 type_register_static(&openpic_info
);
1768 type_init(openpic_register_types
)