4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * Based on OpenPic implementations:
28 * - Intel GW80314 I/O companion chip developer's manual
29 * - Motorola MPC8245 & MPC8540 user manuals.
30 * - Motorola MCP750 (aka Raven) programmer manual.
31 * - Motorola Harrier programmer manuel
33 * Serial interrupts, as implemented in Raven chipset are not supported yet.
36 #include "qemu/osdep.h"
38 #include "hw/ppc/mac.h"
39 #include "hw/pci/pci.h"
40 #include "hw/ppc/openpic.h"
41 #include "hw/ppc/ppc_e500.h"
42 #include "hw/sysbus.h"
43 #include "hw/pci/msi.h"
44 #include "qapi/error.h"
45 #include "qemu/bitops.h"
46 #include "qapi/qmp/qerror.h"
48 #include "qemu/timer.h"
49 #include "qemu/error-report.h"
51 //#define DEBUG_OPENPIC
54 static const int debug_openpic
= 1;
56 static const int debug_openpic
= 0;
59 static int get_current_cpu(void);
60 #define DPRINTF(fmt, ...) do { \
61 if (debug_openpic) { \
62 info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
66 /* OpenPIC capability flags */
67 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
68 #define OPENPIC_FLAG_ILR (2 << 0)
70 /* OpenPIC address map */
71 #define OPENPIC_GLB_REG_START 0x0
72 #define OPENPIC_GLB_REG_SIZE 0x10F0
73 #define OPENPIC_TMR_REG_START 0x10F0
74 #define OPENPIC_TMR_REG_SIZE 0x220
75 #define OPENPIC_MSI_REG_START 0x1600
76 #define OPENPIC_MSI_REG_SIZE 0x200
77 #define OPENPIC_SUMMARY_REG_START 0x3800
78 #define OPENPIC_SUMMARY_REG_SIZE 0x800
79 #define OPENPIC_SRC_REG_START 0x10000
80 #define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20)
81 #define OPENPIC_CPU_REG_START 0x20000
82 #define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000)
84 static FslMpicInfo fsl_mpic_20
= {
88 static FslMpicInfo fsl_mpic_42
= {
92 #define FRR_NIRQ_SHIFT 16
93 #define FRR_NCPU_SHIFT 8
94 #define FRR_VID_SHIFT 0
96 #define VID_REVISION_1_2 2
97 #define VID_REVISION_1_3 3
99 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
100 #define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */
102 #define GCR_RESET 0x80000000
103 #define GCR_MODE_PASS 0x00000000
104 #define GCR_MODE_MIXED 0x20000000
105 #define GCR_MODE_PROXY 0x60000000
107 #define TBCR_CI 0x80000000 /* count inhibit */
108 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
110 #define IDR_EP_SHIFT 31
111 #define IDR_EP_MASK (1U << IDR_EP_SHIFT)
112 #define IDR_CI0_SHIFT 30
113 #define IDR_CI1_SHIFT 29
114 #define IDR_P1_SHIFT 1
115 #define IDR_P0_SHIFT 0
117 #define ILR_INTTGT_MASK 0x000000ff
118 #define ILR_INTTGT_INT 0x00
119 #define ILR_INTTGT_CINT 0x01 /* critical */
120 #define ILR_INTTGT_MCP 0x02 /* machine check */
122 /* The currently supported INTTGT values happen to be the same as QEMU's
123 * openpic output codes, but don't depend on this. The output codes
124 * could change (unlikely, but...) or support could be added for
125 * more INTTGT values.
127 static const int inttgt_output
[][2] = {
128 { ILR_INTTGT_INT
, OPENPIC_OUTPUT_INT
},
129 { ILR_INTTGT_CINT
, OPENPIC_OUTPUT_CINT
},
130 { ILR_INTTGT_MCP
, OPENPIC_OUTPUT_MCK
},
133 static int inttgt_to_output(int inttgt
)
137 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
138 if (inttgt_output
[i
][0] == inttgt
) {
139 return inttgt_output
[i
][1];
143 error_report("%s: unsupported inttgt %d", __func__
, inttgt
);
144 return OPENPIC_OUTPUT_INT
;
147 static int output_to_inttgt(int output
)
151 for (i
= 0; i
< ARRAY_SIZE(inttgt_output
); i
++) {
152 if (inttgt_output
[i
][1] == output
) {
153 return inttgt_output
[i
][0];
160 #define MSIIR_OFFSET 0x140
161 #define MSIIR_SRS_SHIFT 29
162 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
163 #define MSIIR_IBS_SHIFT 24
164 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
166 static int get_current_cpu(void)
172 return current_cpu
->cpu_index
;
175 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
177 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
178 uint32_t val
, int idx
);
179 static void openpic_reset(DeviceState
*d
);
181 /* Convert between openpic clock ticks and nanosecs. In the hardware the clock
182 frequency is driven by board inputs to the PIC which the PIC would then
183 divide by 4 or 8. For now hard code to 25MZ.
185 #define OPENPIC_TIMER_FREQ_MHZ 25
186 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
187 static inline uint64_t ns_to_ticks(uint64_t ns
)
189 return ns
/ OPENPIC_TIMER_NS_PER_TICK
;
191 static inline uint64_t ticks_to_ns(uint64_t ticks
)
193 return ticks
* OPENPIC_TIMER_NS_PER_TICK
;
196 static inline void IRQ_setbit(IRQQueue
*q
, int n_IRQ
)
198 set_bit(n_IRQ
, q
->queue
);
201 static inline void IRQ_resetbit(IRQQueue
*q
, int n_IRQ
)
203 clear_bit(n_IRQ
, q
->queue
);
206 static void IRQ_check(OpenPICState
*opp
, IRQQueue
*q
)
213 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
214 if (irq
== opp
->max_irq
) {
218 DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
219 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
221 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
223 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
228 q
->priority
= priority
;
231 static int IRQ_get_next(OpenPICState
*opp
, IRQQueue
*q
)
239 static void IRQ_local_pipe(OpenPICState
*opp
, int n_CPU
, int n_IRQ
,
240 bool active
, bool was_active
)
246 dst
= &opp
->dst
[n_CPU
];
247 src
= &opp
->src
[n_IRQ
];
249 DPRINTF("%s: IRQ %d active %d was %d",
250 __func__
, n_IRQ
, active
, was_active
);
252 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
253 DPRINTF("%s: output %d irq %d active %d was %d count %d",
254 __func__
, src
->output
, n_IRQ
, active
, was_active
,
255 dst
->outputs_active
[src
->output
]);
257 /* On Freescale MPIC, critical interrupts ignore priority,
258 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
262 if (!was_active
&& dst
->outputs_active
[src
->output
]++ == 0) {
263 DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
264 __func__
, src
->output
, n_CPU
, n_IRQ
);
265 qemu_irq_raise(dst
->irqs
[src
->output
]);
268 if (was_active
&& --dst
->outputs_active
[src
->output
] == 0) {
269 DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
270 __func__
, src
->output
, n_CPU
, n_IRQ
);
271 qemu_irq_lower(dst
->irqs
[src
->output
]);
278 priority
= IVPR_PRIORITY(src
->ivpr
);
280 /* Even if the interrupt doesn't have enough priority,
281 * it is still raised, in case ctpr is lowered later.
284 IRQ_setbit(&dst
->raised
, n_IRQ
);
286 IRQ_resetbit(&dst
->raised
, n_IRQ
);
289 IRQ_check(opp
, &dst
->raised
);
291 if (active
&& priority
<= dst
->ctpr
) {
292 DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
293 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
298 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
299 priority
<= dst
->servicing
.priority
) {
300 DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
301 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
303 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
304 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
305 qemu_irq_raise(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
308 IRQ_get_next(opp
, &dst
->servicing
);
309 if (dst
->raised
.priority
> dst
->ctpr
&&
310 dst
->raised
.priority
> dst
->servicing
.priority
) {
311 DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
312 __func__
, n_IRQ
, dst
->raised
.next
, dst
->raised
.priority
,
313 dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
314 /* IRQ line stays asserted */
316 DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
317 __func__
, n_IRQ
, dst
->ctpr
, dst
->servicing
.priority
, n_CPU
);
318 qemu_irq_lower(opp
->dst
[n_CPU
].irqs
[OPENPIC_OUTPUT_INT
]);
323 /* update pic state because registers for n_IRQ have changed value */
324 static void openpic_update_irq(OpenPICState
*opp
, int n_IRQ
)
327 bool active
, was_active
;
330 src
= &opp
->src
[n_IRQ
];
331 active
= src
->pending
;
333 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
334 /* Interrupt source is disabled */
335 DPRINTF("%s: IRQ %d is disabled", __func__
, n_IRQ
);
339 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
342 * We don't have a similar check for already-active because
343 * ctpr may have changed and we need to withdraw the interrupt.
345 if (!active
&& !was_active
) {
346 DPRINTF("%s: IRQ %d is already inactive", __func__
, n_IRQ
);
351 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
353 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
356 if (src
->destmask
== 0) {
358 DPRINTF("%s: IRQ %d has no target", __func__
, n_IRQ
);
362 if (src
->destmask
== (1 << src
->last_cpu
)) {
363 /* Only one CPU is allowed to receive this IRQ */
364 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
365 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
366 /* Directed delivery mode */
367 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
368 if (src
->destmask
& (1 << i
)) {
369 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
373 /* Distributed delivery mode */
374 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
375 if (i
== opp
->nb_cpus
) {
378 if (src
->destmask
& (1 << i
)) {
379 IRQ_local_pipe(opp
, i
, n_IRQ
, active
, was_active
);
387 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
389 OpenPICState
*opp
= opaque
;
392 if (n_IRQ
>= OPENPIC_MAX_IRQ
) {
393 error_report("%s: IRQ %d out of range", __func__
, n_IRQ
);
397 src
= &opp
->src
[n_IRQ
];
398 DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
399 n_IRQ
, level
, src
->ivpr
);
401 /* level-sensitive irq */
402 src
->pending
= level
;
403 openpic_update_irq(opp
, n_IRQ
);
405 /* edge-sensitive irq */
408 openpic_update_irq(opp
, n_IRQ
);
411 if (src
->output
!= OPENPIC_OUTPUT_INT
) {
412 /* Edge-triggered interrupts shouldn't be used
413 * with non-INT delivery, but just in case,
414 * try to make it do something sane rather than
415 * cause an interrupt storm. This is close to
416 * what you'd probably see happen in real hardware.
419 openpic_update_irq(opp
, n_IRQ
);
424 static inline uint32_t read_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
)
426 return opp
->src
[n_IRQ
].idr
;
429 static inline uint32_t read_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
)
431 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
432 return output_to_inttgt(opp
->src
[n_IRQ
].output
);
438 static inline uint32_t read_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
)
440 return opp
->src
[n_IRQ
].ivpr
;
443 static inline void write_IRQreg_idr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
445 IRQSource
*src
= &opp
->src
[n_IRQ
];
446 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
447 uint32_t crit_mask
= 0;
448 uint32_t mask
= normal_mask
;
449 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
452 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
453 crit_mask
= mask
<< crit_shift
;
454 mask
|= crit_mask
| IDR_EP
;
457 src
->idr
= val
& mask
;
458 DPRINTF("Set IDR %d to 0x%08x", n_IRQ
, src
->idr
);
460 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
461 if (src
->idr
& crit_mask
) {
462 if (src
->idr
& normal_mask
) {
463 DPRINTF("%s: IRQ configured for multiple output types, using "
464 "critical", __func__
);
467 src
->output
= OPENPIC_OUTPUT_CINT
;
471 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
472 int n_ci
= IDR_CI0_SHIFT
- i
;
474 if (src
->idr
& (1UL << n_ci
)) {
475 src
->destmask
|= 1UL << i
;
479 src
->output
= OPENPIC_OUTPUT_INT
;
481 src
->destmask
= src
->idr
& normal_mask
;
484 src
->destmask
= src
->idr
;
488 static inline void write_IRQreg_ilr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
490 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
491 IRQSource
*src
= &opp
->src
[n_IRQ
];
493 src
->output
= inttgt_to_output(val
& ILR_INTTGT_MASK
);
494 DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ
, src
->idr
,
497 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
501 static inline void write_IRQreg_ivpr(OpenPICState
*opp
, int n_IRQ
, uint32_t val
)
505 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
506 * the polarity bit is read-only on internal interrupts.
508 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
509 IVPR_POLARITY_MASK
| opp
->vector_mask
;
511 /* ACTIVITY bit is read-only */
512 opp
->src
[n_IRQ
].ivpr
=
513 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
515 /* For FSL internal interrupts, The sense bit is reserved and zero,
516 * and the interrupt is always level-triggered. Timers and IPIs
517 * have no sense or polarity bits, and are edge-triggered.
519 switch (opp
->src
[n_IRQ
].type
) {
520 case IRQ_TYPE_NORMAL
:
521 opp
->src
[n_IRQ
].level
= !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
524 case IRQ_TYPE_FSLINT
:
525 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
528 case IRQ_TYPE_FSLSPECIAL
:
529 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
533 openpic_update_irq(opp
, n_IRQ
);
534 DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ
, val
,
535 opp
->src
[n_IRQ
].ivpr
);
538 static void openpic_gcr_write(OpenPICState
*opp
, uint64_t val
)
540 bool mpic_proxy
= false;
542 if (val
& GCR_RESET
) {
543 openpic_reset(DEVICE(opp
));
547 opp
->gcr
&= ~opp
->mpic_mode_mask
;
548 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
550 /* Set external proxy mode */
551 if ((val
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
) {
555 ppce500_set_mpic_proxy(mpic_proxy
);
558 static void openpic_gbl_write(void *opaque
, hwaddr addr
, uint64_t val
,
561 OpenPICState
*opp
= opaque
;
565 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
566 __func__
, addr
, val
);
571 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
581 openpic_cpu_write_internal(opp
, addr
, val
, get_current_cpu());
583 case 0x1000: /* FRR */
585 case 0x1020: /* GCR */
586 openpic_gcr_write(opp
, val
);
588 case 0x1080: /* VIR */
590 case 0x1090: /* PIR */
591 for (idx
= 0; idx
< opp
->nb_cpus
; idx
++) {
592 if ((val
& (1 << idx
)) && !(opp
->pir
& (1 << idx
))) {
593 DPRINTF("Raise OpenPIC RESET output for CPU %d", idx
);
594 dst
= &opp
->dst
[idx
];
595 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
596 } else if (!(val
& (1 << idx
)) && (opp
->pir
& (1 << idx
))) {
597 DPRINTF("Lower OpenPIC RESET output for CPU %d", idx
);
598 dst
= &opp
->dst
[idx
];
599 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_RESET
]);
604 case 0x10A0: /* IPI_IVPR */
610 idx
= (addr
- 0x10A0) >> 4;
611 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
614 case 0x10E0: /* SPVE */
615 opp
->spve
= val
& opp
->vector_mask
;
622 static uint64_t openpic_gbl_read(void *opaque
, hwaddr addr
, unsigned len
)
624 OpenPICState
*opp
= opaque
;
627 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
633 case 0x1000: /* FRR */
636 case 0x1020: /* GCR */
639 case 0x1080: /* VIR */
642 case 0x1090: /* PIR */
645 case 0x00: /* Block Revision Register1 (BRR1) */
656 retval
= openpic_cpu_read_internal(opp
, addr
, get_current_cpu());
658 case 0x10A0: /* IPI_IVPR */
664 idx
= (addr
- 0x10A0) >> 4;
665 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
668 case 0x10E0: /* SPVE */
674 DPRINTF("%s: => 0x%08x", __func__
, retval
);
679 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
);
681 static void qemu_timer_cb(void *opaque
)
683 OpenPICTimer
*tmr
= opaque
;
684 OpenPICState
*opp
= tmr
->opp
;
685 uint32_t n_IRQ
= tmr
->n_IRQ
;
686 uint32_t val
= tmr
->tbcr
& ~TBCR_CI
;
687 uint32_t tog
= ((tmr
->tccr
& TCCR_TOG
) ^ TCCR_TOG
); /* invert toggle. */
689 DPRINTF("%s n_IRQ=%d", __func__
, n_IRQ
);
690 /* Reload current count from base count and setup timer. */
691 tmr
->tccr
= val
| tog
;
692 openpic_tmr_set_tmr(tmr
, val
, /*enabled=*/true);
693 /* Raise the interrupt. */
694 opp
->src
[n_IRQ
].destmask
= read_IRQreg_idr(opp
, n_IRQ
);
695 openpic_set_irq(opp
, n_IRQ
, 1);
696 openpic_set_irq(opp
, n_IRQ
, 0);
699 /* If enabled is true, arranges for an interrupt to be raised val clocks into
700 the future, if enabled is false cancels the timer. */
701 static void openpic_tmr_set_tmr(OpenPICTimer
*tmr
, uint32_t val
, bool enabled
)
703 uint64_t ns
= ticks_to_ns(val
& ~TCCR_TOG
);
704 /* A count of zero causes a timer to be set to expire immediately. This
705 effectively stops the simulation since the timer is constantly expiring
706 which prevents guest code execution, so we don't honor that
707 configuration. On real hardware, this situation would generate an
708 interrupt on every clock cycle if the interrupt was unmasked. */
709 if ((ns
== 0) || !enabled
) {
710 tmr
->qemu_timer_active
= false;
711 tmr
->tccr
= tmr
->tccr
& TCCR_TOG
;
712 timer_del(tmr
->qemu_timer
); /* set timer to never expire. */
714 tmr
->qemu_timer_active
= true;
715 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
716 tmr
->origin_time
= now
;
717 timer_mod(tmr
->qemu_timer
, now
+ ns
); /* set timer expiration. */
721 /* Returns the currrent tccr value, i.e., timer value (in clocks) with
723 static uint64_t openpic_tmr_get_timer(OpenPICTimer
*tmr
)
726 if (!tmr
->qemu_timer_active
) {
729 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
730 uint64_t used
= now
- tmr
->origin_time
; /* nsecs */
731 uint32_t used_ticks
= (uint32_t)ns_to_ticks(used
);
732 uint32_t count
= (tmr
->tccr
& ~TCCR_TOG
) - used_ticks
;
733 retval
= (uint32_t)((tmr
->tccr
& TCCR_TOG
) | (count
& ~TCCR_TOG
));
738 static void openpic_tmr_write(void *opaque
, hwaddr addr
, uint64_t val
,
741 OpenPICState
*opp
= opaque
;
744 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
745 __func__
, (addr
+ 0x10f0), val
);
755 addr
-= 0x10; /* correct for TFRR */
756 idx
= (addr
>> 6) & 0x3;
758 switch (addr
& 0x30) {
759 case 0x00: /* TCCR */
761 case 0x10: /* TBCR */
762 /* Did the enable status change? */
763 if ((opp
->timers
[idx
].tbcr
& TBCR_CI
) != (val
& TBCR_CI
)) {
764 /* Did "Count Inhibit" transition from 1 to 0? */
765 if ((val
& TBCR_CI
) == 0) {
766 opp
->timers
[idx
].tccr
= val
& ~TCCR_TOG
;
768 openpic_tmr_set_tmr(&opp
->timers
[idx
],
770 /*enabled=*/((val
& TBCR_CI
) == 0));
772 opp
->timers
[idx
].tbcr
= val
;
774 case 0x20: /* TVPR */
775 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
778 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
783 static uint64_t openpic_tmr_read(void *opaque
, hwaddr addr
, unsigned len
)
785 OpenPICState
*opp
= opaque
;
786 uint32_t retval
= -1;
789 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
+ 0x10f0);
798 addr
-= 0x10; /* correct for TFRR */
799 idx
= (addr
>> 6) & 0x3;
800 switch (addr
& 0x30) {
801 case 0x00: /* TCCR */
802 retval
= openpic_tmr_get_timer(&opp
->timers
[idx
]);
804 case 0x10: /* TBCR */
805 retval
= opp
->timers
[idx
].tbcr
;
807 case 0x20: /* TVPR */
808 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
811 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
816 DPRINTF("%s: => 0x%08x", __func__
, retval
);
821 static void openpic_src_write(void *opaque
, hwaddr addr
, uint64_t val
,
824 OpenPICState
*opp
= opaque
;
827 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= %08" PRIx64
,
828 __func__
, addr
, val
);
830 addr
= addr
& 0xffff;
833 switch (addr
& 0x1f) {
835 write_IRQreg_ivpr(opp
, idx
, val
);
838 write_IRQreg_idr(opp
, idx
, val
);
841 write_IRQreg_ilr(opp
, idx
, val
);
846 static uint64_t openpic_src_read(void *opaque
, uint64_t addr
, unsigned len
)
848 OpenPICState
*opp
= opaque
;
852 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
855 addr
= addr
& 0xffff;
858 switch (addr
& 0x1f) {
860 retval
= read_IRQreg_ivpr(opp
, idx
);
863 retval
= read_IRQreg_idr(opp
, idx
);
866 retval
= read_IRQreg_ilr(opp
, idx
);
870 DPRINTF("%s: => 0x%08x", __func__
, retval
);
874 static void openpic_msi_write(void *opaque
, hwaddr addr
, uint64_t val
,
877 OpenPICState
*opp
= opaque
;
878 int idx
= opp
->irq_msi
;
881 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
,
882 __func__
, addr
, val
);
889 srs
= val
>> MSIIR_SRS_SHIFT
;
891 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
892 opp
->msi
[srs
].msir
|= 1 << ibs
;
893 openpic_set_irq(opp
, idx
, 1);
896 /* most registers are read-only, thus ignored */
901 static uint64_t openpic_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
903 OpenPICState
*opp
= opaque
;
907 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
922 case 0x70: /* MSIRs */
923 r
= opp
->msi
[srs
].msir
;
925 opp
->msi
[srs
].msir
= 0;
926 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
928 case 0x120: /* MSISR */
929 for (i
= 0; i
< MAX_MSI
; i
++) {
930 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
938 static uint64_t openpic_summary_read(void *opaque
, hwaddr addr
, unsigned size
)
942 DPRINTF("%s: addr %#" HWADDR_PRIx
, __func__
, addr
);
944 /* TODO: EISR/EIMR */
949 static void openpic_summary_write(void *opaque
, hwaddr addr
, uint64_t val
,
952 DPRINTF("%s: addr %#" HWADDR_PRIx
" <= 0x%08" PRIx64
,
953 __func__
, addr
, val
);
955 /* TODO: EISR/EIMR */
958 static void openpic_cpu_write_internal(void *opaque
, hwaddr addr
,
959 uint32_t val
, int idx
)
961 OpenPICState
*opp
= opaque
;
966 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
" <= 0x%08x", __func__
, idx
,
969 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
976 dst
= &opp
->dst
[idx
];
979 case 0x40: /* IPIDR */
983 idx
= (addr
- 0x40) >> 4;
984 /* we use IDE as mask which CPUs to deliver the IPI to still. */
985 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
986 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
987 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
989 case 0x80: /* CTPR */
990 dst
->ctpr
= val
& 0x0000000F;
992 DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
993 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
994 dst
->servicing
.priority
);
996 if (dst
->raised
.priority
<= dst
->ctpr
) {
997 DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
999 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1000 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1001 DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
1002 __func__
, idx
, dst
->raised
.next
);
1003 qemu_irq_raise(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1007 case 0x90: /* WHOAMI */
1008 /* Read-only register */
1010 case 0xA0: /* IACK */
1011 /* Read-only register */
1013 case 0xB0: /* EOI */
1015 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1018 DPRINTF("%s: EOI with no interrupt in service", __func__
);
1022 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1023 /* Set up next servicing IRQ */
1024 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1025 /* Check queued interrupts. */
1026 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1027 src
= &opp
->src
[n_IRQ
];
1030 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1031 DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
1033 qemu_irq_raise(opp
->dst
[idx
].irqs
[OPENPIC_OUTPUT_INT
]);
1041 static void openpic_cpu_write(void *opaque
, hwaddr addr
, uint64_t val
,
1044 openpic_cpu_write_internal(opaque
, addr
, val
, (addr
& 0x1f000) >> 12);
1048 static uint32_t openpic_iack(OpenPICState
*opp
, IRQDest
*dst
, int cpu
)
1053 DPRINTF("Lower OpenPIC INT output");
1054 qemu_irq_lower(dst
->irqs
[OPENPIC_OUTPUT_INT
]);
1056 irq
= IRQ_get_next(opp
, &dst
->raised
);
1057 DPRINTF("IACK: irq=%d", irq
);
1060 /* No more interrupt pending */
1064 src
= &opp
->src
[irq
];
1065 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1066 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1067 error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
1068 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1069 openpic_update_irq(opp
, irq
);
1072 /* IRQ enter servicing state */
1073 IRQ_setbit(&dst
->servicing
, irq
);
1074 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1078 /* edge-sensitive IRQ */
1079 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1081 IRQ_resetbit(&dst
->raised
, irq
);
1084 /* Timers and IPIs support multicast. */
1085 if (((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ OPENPIC_MAX_IPI
))) ||
1086 ((irq
>= opp
->irq_tim0
) && (irq
< (opp
->irq_tim0
+ OPENPIC_MAX_TMR
)))) {
1087 DPRINTF("irq is IPI or TMR");
1088 src
->destmask
&= ~(1 << cpu
);
1089 if (src
->destmask
&& !src
->level
) {
1090 /* trigger on CPUs that didn't know about it yet */
1091 openpic_set_irq(opp
, irq
, 1);
1092 openpic_set_irq(opp
, irq
, 0);
1093 /* if all CPUs knew about it, set active bit again */
1094 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1101 static uint32_t openpic_cpu_read_internal(void *opaque
, hwaddr addr
,
1104 OpenPICState
*opp
= opaque
;
1108 DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx
, __func__
, idx
, addr
);
1109 retval
= 0xFFFFFFFF;
1111 if (idx
< 0 || idx
>= opp
->nb_cpus
) {
1118 dst
= &opp
->dst
[idx
];
1121 case 0x80: /* CTPR */
1124 case 0x90: /* WHOAMI */
1127 case 0xA0: /* IACK */
1128 retval
= openpic_iack(opp
, dst
, idx
);
1130 case 0xB0: /* EOI */
1136 DPRINTF("%s: => 0x%08x", __func__
, retval
);
1141 static uint64_t openpic_cpu_read(void *opaque
, hwaddr addr
, unsigned len
)
1143 return openpic_cpu_read_internal(opaque
, addr
, (addr
& 0x1f000) >> 12);
1146 static const MemoryRegionOps openpic_glb_ops_le
= {
1147 .write
= openpic_gbl_write
,
1148 .read
= openpic_gbl_read
,
1149 .endianness
= DEVICE_LITTLE_ENDIAN
,
1151 .min_access_size
= 4,
1152 .max_access_size
= 4,
1156 static const MemoryRegionOps openpic_glb_ops_be
= {
1157 .write
= openpic_gbl_write
,
1158 .read
= openpic_gbl_read
,
1159 .endianness
= DEVICE_BIG_ENDIAN
,
1161 .min_access_size
= 4,
1162 .max_access_size
= 4,
1166 static const MemoryRegionOps openpic_tmr_ops_le
= {
1167 .write
= openpic_tmr_write
,
1168 .read
= openpic_tmr_read
,
1169 .endianness
= DEVICE_LITTLE_ENDIAN
,
1171 .min_access_size
= 4,
1172 .max_access_size
= 4,
1176 static const MemoryRegionOps openpic_tmr_ops_be
= {
1177 .write
= openpic_tmr_write
,
1178 .read
= openpic_tmr_read
,
1179 .endianness
= DEVICE_BIG_ENDIAN
,
1181 .min_access_size
= 4,
1182 .max_access_size
= 4,
1186 static const MemoryRegionOps openpic_cpu_ops_le
= {
1187 .write
= openpic_cpu_write
,
1188 .read
= openpic_cpu_read
,
1189 .endianness
= DEVICE_LITTLE_ENDIAN
,
1191 .min_access_size
= 4,
1192 .max_access_size
= 4,
1196 static const MemoryRegionOps openpic_cpu_ops_be
= {
1197 .write
= openpic_cpu_write
,
1198 .read
= openpic_cpu_read
,
1199 .endianness
= DEVICE_BIG_ENDIAN
,
1201 .min_access_size
= 4,
1202 .max_access_size
= 4,
1206 static const MemoryRegionOps openpic_src_ops_le
= {
1207 .write
= openpic_src_write
,
1208 .read
= openpic_src_read
,
1209 .endianness
= DEVICE_LITTLE_ENDIAN
,
1211 .min_access_size
= 4,
1212 .max_access_size
= 4,
1216 static const MemoryRegionOps openpic_src_ops_be
= {
1217 .write
= openpic_src_write
,
1218 .read
= openpic_src_read
,
1219 .endianness
= DEVICE_BIG_ENDIAN
,
1221 .min_access_size
= 4,
1222 .max_access_size
= 4,
1226 static const MemoryRegionOps openpic_msi_ops_be
= {
1227 .read
= openpic_msi_read
,
1228 .write
= openpic_msi_write
,
1229 .endianness
= DEVICE_BIG_ENDIAN
,
1231 .min_access_size
= 4,
1232 .max_access_size
= 4,
1236 static const MemoryRegionOps openpic_summary_ops_be
= {
1237 .read
= openpic_summary_read
,
1238 .write
= openpic_summary_write
,
1239 .endianness
= DEVICE_BIG_ENDIAN
,
1241 .min_access_size
= 4,
1242 .max_access_size
= 4,
1246 static void openpic_reset(DeviceState
*d
)
1248 OpenPICState
*opp
= OPENPIC(d
);
1251 opp
->gcr
= GCR_RESET
;
1252 /* Initialise controller registers */
1253 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
1254 ((opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
) |
1255 (opp
->vid
<< FRR_VID_SHIFT
);
1258 opp
->spve
= -1 & opp
->vector_mask
;
1259 opp
->tfrr
= opp
->tfrr_reset
;
1260 /* Initialise IRQ sources */
1261 for (i
= 0; i
< opp
->max_irq
; i
++) {
1262 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
1263 switch (opp
->src
[i
].type
) {
1264 case IRQ_TYPE_NORMAL
:
1265 opp
->src
[i
].level
= !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
1268 case IRQ_TYPE_FSLINT
:
1269 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
1272 case IRQ_TYPE_FSLSPECIAL
:
1276 write_IRQreg_idr(opp
, i
, opp
->idr_reset
);
1278 /* Initialise IRQ destinations */
1279 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1280 opp
->dst
[i
].ctpr
= 15;
1281 opp
->dst
[i
].raised
.next
= -1;
1282 opp
->dst
[i
].raised
.priority
= 0;
1283 bitmap_clear(opp
->dst
[i
].raised
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1284 opp
->dst
[i
].servicing
.next
= -1;
1285 opp
->dst
[i
].servicing
.priority
= 0;
1286 bitmap_clear(opp
->dst
[i
].servicing
.queue
, 0, IRQQUEUE_SIZE_BITS
);
1288 /* Initialise timers */
1289 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1290 opp
->timers
[i
].tccr
= 0;
1291 opp
->timers
[i
].tbcr
= TBCR_CI
;
1292 if (opp
->timers
[i
].qemu_timer_active
) {
1293 timer_del(opp
->timers
[i
].qemu_timer
); /* Inhibit timer */
1294 opp
->timers
[i
].qemu_timer_active
= false;
1297 /* Go out of RESET state */
1301 typedef struct MemReg
{
1303 MemoryRegionOps
const *ops
;
1308 static void fsl_common_init(OpenPICState
*opp
)
1311 int virq
= OPENPIC_MAX_SRC
;
1313 opp
->vid
= VID_REVISION_1_2
;
1314 opp
->vir
= VIR_GENERIC
;
1315 opp
->vector_mask
= 0xFFFF;
1316 opp
->tfrr_reset
= 0;
1317 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1318 opp
->idr_reset
= 1 << 0;
1319 opp
->max_irq
= OPENPIC_MAX_IRQ
;
1321 opp
->irq_ipi0
= virq
;
1322 virq
+= OPENPIC_MAX_IPI
;
1323 opp
->irq_tim0
= virq
;
1324 virq
+= OPENPIC_MAX_TMR
;
1326 assert(virq
<= OPENPIC_MAX_IRQ
);
1330 msi_nonbroken
= true;
1331 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++) {
1332 opp
->src
[i
].level
= false;
1335 /* Internal interrupts, including message and MSI */
1336 for (i
= 16; i
< OPENPIC_MAX_SRC
; i
++) {
1337 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1338 opp
->src
[i
].level
= true;
1341 /* timers and IPIs */
1342 for (i
= OPENPIC_MAX_SRC
; i
< virq
; i
++) {
1343 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1344 opp
->src
[i
].level
= false;
1347 for (i
= 0; i
< OPENPIC_MAX_TMR
; i
++) {
1348 opp
->timers
[i
].n_IRQ
= opp
->irq_tim0
+ i
;
1349 opp
->timers
[i
].qemu_timer_active
= false;
1350 opp
->timers
[i
].qemu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1353 opp
->timers
[i
].opp
= opp
;
1357 static void map_list(OpenPICState
*opp
, const MemReg
*list
, int *count
)
1359 while (list
->name
) {
1360 assert(*count
< ARRAY_SIZE(opp
->sub_io_mem
));
1362 memory_region_init_io(&opp
->sub_io_mem
[*count
], OBJECT(opp
), list
->ops
,
1363 opp
, list
->name
, list
->size
);
1365 memory_region_add_subregion(&opp
->mem
, list
->start_addr
,
1366 &opp
->sub_io_mem
[*count
]);
1373 static const VMStateDescription vmstate_openpic_irq_queue
= {
1374 .name
= "openpic_irq_queue",
1376 .minimum_version_id
= 0,
1377 .fields
= (VMStateField
[]) {
1378 VMSTATE_BITMAP(queue
, IRQQueue
, 0, queue_size
),
1379 VMSTATE_INT32(next
, IRQQueue
),
1380 VMSTATE_INT32(priority
, IRQQueue
),
1381 VMSTATE_END_OF_LIST()
1385 static const VMStateDescription vmstate_openpic_irqdest
= {
1386 .name
= "openpic_irqdest",
1388 .minimum_version_id
= 0,
1389 .fields
= (VMStateField
[]) {
1390 VMSTATE_INT32(ctpr
, IRQDest
),
1391 VMSTATE_STRUCT(raised
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1393 VMSTATE_STRUCT(servicing
, IRQDest
, 0, vmstate_openpic_irq_queue
,
1395 VMSTATE_UINT32_ARRAY(outputs_active
, IRQDest
, OPENPIC_OUTPUT_NB
),
1396 VMSTATE_END_OF_LIST()
1400 static const VMStateDescription vmstate_openpic_irqsource
= {
1401 .name
= "openpic_irqsource",
1403 .minimum_version_id
= 0,
1404 .fields
= (VMStateField
[]) {
1405 VMSTATE_UINT32(ivpr
, IRQSource
),
1406 VMSTATE_UINT32(idr
, IRQSource
),
1407 VMSTATE_UINT32(destmask
, IRQSource
),
1408 VMSTATE_INT32(last_cpu
, IRQSource
),
1409 VMSTATE_INT32(pending
, IRQSource
),
1410 VMSTATE_END_OF_LIST()
1414 static const VMStateDescription vmstate_openpic_timer
= {
1415 .name
= "openpic_timer",
1417 .minimum_version_id
= 0,
1418 .fields
= (VMStateField
[]) {
1419 VMSTATE_UINT32(tccr
, OpenPICTimer
),
1420 VMSTATE_UINT32(tbcr
, OpenPICTimer
),
1421 VMSTATE_END_OF_LIST()
1425 static const VMStateDescription vmstate_openpic_msi
= {
1426 .name
= "openpic_msi",
1428 .minimum_version_id
= 0,
1429 .fields
= (VMStateField
[]) {
1430 VMSTATE_UINT32(msir
, OpenPICMSI
),
1431 VMSTATE_END_OF_LIST()
1435 static int openpic_post_load(void *opaque
, int version_id
)
1437 OpenPICState
*opp
= (OpenPICState
*)opaque
;
1440 /* Update internal ivpr and idr variables */
1441 for (i
= 0; i
< opp
->max_irq
; i
++) {
1442 write_IRQreg_idr(opp
, i
, opp
->src
[i
].idr
);
1443 write_IRQreg_ivpr(opp
, i
, opp
->src
[i
].ivpr
);
1449 static const VMStateDescription vmstate_openpic
= {
1452 .minimum_version_id
= 3,
1453 .post_load
= openpic_post_load
,
1454 .fields
= (VMStateField
[]) {
1455 VMSTATE_UINT32(gcr
, OpenPICState
),
1456 VMSTATE_UINT32(vir
, OpenPICState
),
1457 VMSTATE_UINT32(pir
, OpenPICState
),
1458 VMSTATE_UINT32(spve
, OpenPICState
),
1459 VMSTATE_UINT32(tfrr
, OpenPICState
),
1460 VMSTATE_UINT32(max_irq
, OpenPICState
),
1461 VMSTATE_STRUCT_VARRAY_UINT32(src
, OpenPICState
, max_irq
, 0,
1462 vmstate_openpic_irqsource
, IRQSource
),
1463 VMSTATE_UINT32_EQUAL(nb_cpus
, OpenPICState
, NULL
),
1464 VMSTATE_STRUCT_VARRAY_UINT32(dst
, OpenPICState
, nb_cpus
, 0,
1465 vmstate_openpic_irqdest
, IRQDest
),
1466 VMSTATE_STRUCT_ARRAY(timers
, OpenPICState
, OPENPIC_MAX_TMR
, 0,
1467 vmstate_openpic_timer
, OpenPICTimer
),
1468 VMSTATE_STRUCT_ARRAY(msi
, OpenPICState
, MAX_MSI
, 0,
1469 vmstate_openpic_msi
, OpenPICMSI
),
1470 VMSTATE_UINT32(irq_ipi0
, OpenPICState
),
1471 VMSTATE_UINT32(irq_tim0
, OpenPICState
),
1472 VMSTATE_UINT32(irq_msi
, OpenPICState
),
1473 VMSTATE_END_OF_LIST()
1477 static void openpic_init(Object
*obj
)
1479 OpenPICState
*opp
= OPENPIC(obj
);
1481 memory_region_init(&opp
->mem
, obj
, "openpic", 0x40000);
1484 static void openpic_realize(DeviceState
*dev
, Error
**errp
)
1486 SysBusDevice
*d
= SYS_BUS_DEVICE(dev
);
1487 OpenPICState
*opp
= OPENPIC(dev
);
1490 static const MemReg list_le
[] = {
1491 {"glb", &openpic_glb_ops_le
,
1492 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1493 {"tmr", &openpic_tmr_ops_le
,
1494 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1495 {"src", &openpic_src_ops_le
,
1496 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1497 {"cpu", &openpic_cpu_ops_le
,
1498 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1501 static const MemReg list_be
[] = {
1502 {"glb", &openpic_glb_ops_be
,
1503 OPENPIC_GLB_REG_START
, OPENPIC_GLB_REG_SIZE
},
1504 {"tmr", &openpic_tmr_ops_be
,
1505 OPENPIC_TMR_REG_START
, OPENPIC_TMR_REG_SIZE
},
1506 {"src", &openpic_src_ops_be
,
1507 OPENPIC_SRC_REG_START
, OPENPIC_SRC_REG_SIZE
},
1508 {"cpu", &openpic_cpu_ops_be
,
1509 OPENPIC_CPU_REG_START
, OPENPIC_CPU_REG_SIZE
},
1512 static const MemReg list_fsl
[] = {
1513 {"msi", &openpic_msi_ops_be
,
1514 OPENPIC_MSI_REG_START
, OPENPIC_MSI_REG_SIZE
},
1515 {"summary", &openpic_summary_ops_be
,
1516 OPENPIC_SUMMARY_REG_START
, OPENPIC_SUMMARY_REG_SIZE
},
1520 if (opp
->nb_cpus
> MAX_CPU
) {
1521 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
,
1522 TYPE_OPENPIC
, "nb_cpus", (uint64_t)opp
->nb_cpus
,
1523 (uint64_t)0, (uint64_t)MAX_CPU
);
1527 switch (opp
->model
) {
1528 case OPENPIC_MODEL_FSL_MPIC_20
:
1530 opp
->fsl
= &fsl_mpic_20
;
1531 opp
->brr1
= 0x00400200;
1532 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1534 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1536 fsl_common_init(opp
);
1537 map_list(opp
, list_be
, &list_count
);
1538 map_list(opp
, list_fsl
, &list_count
);
1542 case OPENPIC_MODEL_FSL_MPIC_42
:
1543 opp
->fsl
= &fsl_mpic_42
;
1544 opp
->brr1
= 0x00400402;
1545 opp
->flags
|= OPENPIC_FLAG_ILR
;
1547 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1549 fsl_common_init(opp
);
1550 map_list(opp
, list_be
, &list_count
);
1551 map_list(opp
, list_fsl
, &list_count
);
1555 case OPENPIC_MODEL_RAVEN
:
1556 opp
->nb_irqs
= RAVEN_MAX_EXT
;
1557 opp
->vid
= VID_REVISION_1_3
;
1558 opp
->vir
= VIR_GENERIC
;
1559 opp
->vector_mask
= 0xFF;
1560 opp
->tfrr_reset
= 4160000;
1561 opp
->ivpr_reset
= IVPR_MASK_MASK
| IVPR_MODE_MASK
;
1563 opp
->max_irq
= RAVEN_MAX_IRQ
;
1564 opp
->irq_ipi0
= RAVEN_IPI_IRQ
;
1565 opp
->irq_tim0
= RAVEN_TMR_IRQ
;
1567 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1569 if (opp
->nb_cpus
!= 1) {
1570 error_setg(errp
, "Only UP supported today");
1574 map_list(opp
, list_le
, &list_count
);
1577 case OPENPIC_MODEL_KEYLARGO
:
1578 opp
->nb_irqs
= KEYLARGO_MAX_EXT
;
1579 opp
->vid
= VID_REVISION_1_2
;
1580 opp
->vir
= VIR_GENERIC
;
1581 opp
->vector_mask
= 0xFF;
1582 opp
->tfrr_reset
= 4160000;
1583 opp
->ivpr_reset
= IVPR_MASK_MASK
| IVPR_MODE_MASK
;
1585 opp
->max_irq
= KEYLARGO_MAX_IRQ
;
1586 opp
->irq_ipi0
= KEYLARGO_IPI_IRQ
;
1587 opp
->irq_tim0
= KEYLARGO_TMR_IRQ
;
1589 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1591 if (opp
->nb_cpus
!= 1) {
1592 error_setg(errp
, "Only UP supported today");
1596 map_list(opp
, list_le
, &list_count
);
1600 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
1601 opp
->dst
[i
].irqs
= g_new0(qemu_irq
, OPENPIC_OUTPUT_NB
);
1602 for (j
= 0; j
< OPENPIC_OUTPUT_NB
; j
++) {
1603 sysbus_init_irq(d
, &opp
->dst
[i
].irqs
[j
]);
1606 opp
->dst
[i
].raised
.queue_size
= IRQQUEUE_SIZE_BITS
;
1607 opp
->dst
[i
].raised
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1608 opp
->dst
[i
].servicing
.queue_size
= IRQQUEUE_SIZE_BITS
;
1609 opp
->dst
[i
].servicing
.queue
= bitmap_new(IRQQUEUE_SIZE_BITS
);
1612 sysbus_init_mmio(d
, &opp
->mem
);
1613 qdev_init_gpio_in(dev
, openpic_set_irq
, opp
->max_irq
);
1616 static Property openpic_properties
[] = {
1617 DEFINE_PROP_UINT32("model", OpenPICState
, model
, OPENPIC_MODEL_FSL_MPIC_20
),
1618 DEFINE_PROP_UINT32("nb_cpus", OpenPICState
, nb_cpus
, 1),
1619 DEFINE_PROP_END_OF_LIST(),
1622 static void openpic_class_init(ObjectClass
*oc
, void *data
)
1624 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1626 dc
->realize
= openpic_realize
;
1627 dc
->props
= openpic_properties
;
1628 dc
->reset
= openpic_reset
;
1629 dc
->vmsd
= &vmstate_openpic
;
1630 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1633 static const TypeInfo openpic_info
= {
1634 .name
= TYPE_OPENPIC
,
1635 .parent
= TYPE_SYS_BUS_DEVICE
,
1636 .instance_size
= sizeof(OpenPICState
),
1637 .instance_init
= openpic_init
,
1638 .class_init
= openpic_class_init
,
1641 static void openpic_register_types(void)
1643 type_register_static(&openpic_info
);
1646 type_init(openpic_register_types
)