2 * ARM Generic/Distributed Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
10 /* This file contains implementation code for the RealView EB interrupt
11 * controller, MPCore distributed interrupt controller and ARMv7-M
12 * Nested Vectored Interrupt Controller.
13 * It is compiled in two ways:
14 * (1) as a standalone file to produce a sysbus device which is a GIC
15 * that can be used on the realview board and as one of the builtin
16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
17 * (2) by being directly #included into armv7m_nvic.c to produce the
21 #include "hw/sysbus.h"
22 #include "gic_internal.h"
28 #define DPRINTF(fmt, ...) \
29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0)
31 #define DPRINTF(fmt, ...) do {} while(0)
34 static const uint8_t gic_id
[] = {
35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
38 #define NUM_CPU(s) ((s)->num_cpu)
40 static inline int gic_get_current_cpu(GICState
*s
)
43 return current_cpu
->cpu_index
;
48 /* TODO: Many places that call this routine could be optimized. */
49 /* Update interrupt status after enabled or pending bits have been changed. */
50 void gic_update(GICState
*s
)
59 for (cpu
= 0; cpu
< NUM_CPU(s
); cpu
++) {
61 s
->current_pending
[cpu
] = 1023;
62 if (!s
->enabled
|| !s
->cpu_enabled
[cpu
]) {
63 qemu_irq_lower(s
->parent_irq
[cpu
]);
68 for (irq
= 0; irq
< s
->num_irq
; irq
++) {
69 if (GIC_TEST_ENABLED(irq
, cm
) && gic_test_pending(s
, irq
, cm
) &&
70 (irq
< GIC_INTERNAL
|| GIC_TARGET(irq
) & cm
)) {
71 if (GIC_GET_PRIORITY(irq
, cpu
) < best_prio
) {
72 best_prio
= GIC_GET_PRIORITY(irq
, cpu
);
78 if (best_prio
< s
->priority_mask
[cpu
]) {
79 s
->current_pending
[cpu
] = best_irq
;
80 if (best_prio
< s
->running_priority
[cpu
]) {
81 DPRINTF("Raised pending IRQ %d (cpu %d)\n", best_irq
, cpu
);
85 qemu_set_irq(s
->parent_irq
[cpu
], level
);
89 void gic_set_pending_private(GICState
*s
, int cpu
, int irq
)
93 if (gic_test_pending(s
, irq
, cm
)) {
97 DPRINTF("Set %d pending cpu %d\n", irq
, cpu
);
98 GIC_SET_PENDING(irq
, cm
);
102 static void gic_set_irq_11mpcore(GICState
*s
, int irq
, int level
,
106 GIC_SET_LEVEL(irq
, cm
);
107 if (GIC_TEST_EDGE_TRIGGER(irq
) || GIC_TEST_ENABLED(irq
, cm
)) {
108 DPRINTF("Set %d pending mask %x\n", irq
, target
);
109 GIC_SET_PENDING(irq
, target
);
112 GIC_CLEAR_LEVEL(irq
, cm
);
116 static void gic_set_irq_generic(GICState
*s
, int irq
, int level
,
120 GIC_SET_LEVEL(irq
, cm
);
121 DPRINTF("Set %d pending mask %x\n", irq
, target
);
122 if (GIC_TEST_EDGE_TRIGGER(irq
)) {
123 GIC_SET_PENDING(irq
, target
);
126 GIC_CLEAR_LEVEL(irq
, cm
);
130 /* Process a change in an external IRQ input. */
131 static void gic_set_irq(void *opaque
, int irq
, int level
)
133 /* Meaning of the 'irq' parameter:
134 * [0..N-1] : external interrupts
135 * [N..N+31] : PPI (internal) interrupts for CPU 0
136 * [N+32..N+63] : PPI (internal interrupts for CPU 1
139 GICState
*s
= (GICState
*)opaque
;
141 if (irq
< (s
->num_irq
- GIC_INTERNAL
)) {
142 /* The first external input line is internal interrupt 32. */
145 target
= GIC_TARGET(irq
);
148 irq
-= (s
->num_irq
- GIC_INTERNAL
);
149 cpu
= irq
/ GIC_INTERNAL
;
155 assert(irq
>= GIC_NR_SGIS
);
157 if (level
== GIC_TEST_LEVEL(irq
, cm
)) {
161 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
162 gic_set_irq_11mpcore(s
, irq
, level
, cm
, target
);
164 gic_set_irq_generic(s
, irq
, level
, cm
, target
);
170 static void gic_set_running_irq(GICState
*s
, int cpu
, int irq
)
172 s
->running_irq
[cpu
] = irq
;
174 s
->running_priority
[cpu
] = 0x100;
176 s
->running_priority
[cpu
] = GIC_GET_PRIORITY(irq
, cpu
);
181 uint32_t gic_acknowledge_irq(GICState
*s
, int cpu
)
185 irq
= s
->current_pending
[cpu
];
187 || GIC_GET_PRIORITY(irq
, cpu
) >= s
->running_priority
[cpu
]) {
188 DPRINTF("ACK no pending IRQ\n");
191 s
->last_active
[irq
][cpu
] = s
->running_irq
[cpu
];
193 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
194 /* Clear pending flags for both level and edge triggered interrupts.
195 * Level triggered IRQs will be reasserted once they become inactive.
197 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
200 if (irq
< GIC_NR_SGIS
) {
201 /* Lookup the source CPU for the SGI and clear this in the
202 * sgi_pending map. Return the src and clear the overall pending
203 * state on this CPU if the SGI is not pending from any CPUs.
205 assert(s
->sgi_pending
[irq
][cpu
] != 0);
206 src
= ctz32(s
->sgi_pending
[irq
][cpu
]);
207 s
->sgi_pending
[irq
][cpu
] &= ~(1 << src
);
208 if (s
->sgi_pending
[irq
][cpu
] == 0) {
209 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
211 ret
= irq
| ((src
& 0x7) << 10);
213 /* Clear pending state for both level and edge triggered
214 * interrupts. (level triggered interrupts with an active line
215 * remain pending, see gic_test_pending)
217 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
222 gic_set_running_irq(s
, cpu
, irq
);
223 DPRINTF("ACK %d\n", irq
);
227 void gic_set_priority(GICState
*s
, int cpu
, int irq
, uint8_t val
)
229 if (irq
< GIC_INTERNAL
) {
230 s
->priority1
[irq
][cpu
] = val
;
232 s
->priority2
[(irq
) - GIC_INTERNAL
] = val
;
236 void gic_complete_irq(GICState
*s
, int cpu
, int irq
)
240 DPRINTF("EOI %d\n", irq
);
241 if (irq
>= s
->num_irq
) {
242 /* This handles two cases:
243 * 1. If software writes the ID of a spurious interrupt [ie 1023]
244 * to the GICC_EOIR, the GIC ignores that write.
245 * 2. If software writes the number of a non-existent interrupt
246 * this must be a subcase of "value written does not match the last
247 * valid interrupt value read from the Interrupt Acknowledge
248 * register" and so this is UNPREDICTABLE. We choose to ignore it.
252 if (s
->running_irq
[cpu
] == 1023)
253 return; /* No active IRQ. */
255 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
256 /* Mark level triggered interrupts as pending if they are still
258 if (!GIC_TEST_EDGE_TRIGGER(irq
) && GIC_TEST_ENABLED(irq
, cm
)
259 && GIC_TEST_LEVEL(irq
, cm
) && (GIC_TARGET(irq
) & cm
) != 0) {
260 DPRINTF("Set %d pending mask %x\n", irq
, cm
);
261 GIC_SET_PENDING(irq
, cm
);
266 if (irq
!= s
->running_irq
[cpu
]) {
267 /* Complete an IRQ that is not currently running. */
268 int tmp
= s
->running_irq
[cpu
];
269 while (s
->last_active
[tmp
][cpu
] != 1023) {
270 if (s
->last_active
[tmp
][cpu
] == irq
) {
271 s
->last_active
[tmp
][cpu
] = s
->last_active
[irq
][cpu
];
274 tmp
= s
->last_active
[tmp
][cpu
];
280 /* Complete the current running IRQ. */
281 gic_set_running_irq(s
, cpu
, s
->last_active
[s
->running_irq
[cpu
]][cpu
]);
285 static uint32_t gic_dist_readb(void *opaque
, hwaddr offset
)
287 GICState
*s
= (GICState
*)opaque
;
295 cpu
= gic_get_current_cpu(s
);
297 if (offset
< 0x100) {
301 return ((s
->num_irq
/ 32) - 1) | ((NUM_CPU(s
) - 1) << 5);
304 if (offset
>= 0x80) {
305 /* Interrupt Security , RAZ/WI */
309 } else if (offset
< 0x200) {
310 /* Interrupt Set/Clear Enable. */
312 irq
= (offset
- 0x100) * 8;
314 irq
= (offset
- 0x180) * 8;
316 if (irq
>= s
->num_irq
)
319 for (i
= 0; i
< 8; i
++) {
320 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
324 } else if (offset
< 0x300) {
325 /* Interrupt Set/Clear Pending. */
327 irq
= (offset
- 0x200) * 8;
329 irq
= (offset
- 0x280) * 8;
331 if (irq
>= s
->num_irq
)
334 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
335 for (i
= 0; i
< 8; i
++) {
336 if (gic_test_pending(s
, irq
+ i
, mask
)) {
340 } else if (offset
< 0x400) {
341 /* Interrupt Active. */
342 irq
= (offset
- 0x300) * 8 + GIC_BASE_IRQ
;
343 if (irq
>= s
->num_irq
)
346 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
347 for (i
= 0; i
< 8; i
++) {
348 if (GIC_TEST_ACTIVE(irq
+ i
, mask
)) {
352 } else if (offset
< 0x800) {
353 /* Interrupt Priority. */
354 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
355 if (irq
>= s
->num_irq
)
357 res
= GIC_GET_PRIORITY(irq
, cpu
);
358 } else if (offset
< 0xc00) {
359 /* Interrupt CPU Target. */
360 if (s
->num_cpu
== 1 && s
->revision
!= REV_11MPCORE
) {
361 /* For uniprocessor GICs these RAZ/WI */
364 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
365 if (irq
>= s
->num_irq
) {
368 if (irq
>= 29 && irq
<= 31) {
371 res
= GIC_TARGET(irq
);
374 } else if (offset
< 0xf00) {
375 /* Interrupt Configuration. */
376 irq
= (offset
- 0xc00) * 4 + GIC_BASE_IRQ
;
377 if (irq
>= s
->num_irq
)
380 for (i
= 0; i
< 4; i
++) {
381 if (GIC_TEST_MODEL(irq
+ i
))
382 res
|= (1 << (i
* 2));
383 if (GIC_TEST_EDGE_TRIGGER(irq
+ i
))
384 res
|= (2 << (i
* 2));
386 } else if (offset
< 0xf10) {
388 } else if (offset
< 0xf30) {
389 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
393 if (offset
< 0xf20) {
394 /* GICD_CPENDSGIRn */
395 irq
= (offset
- 0xf10);
397 irq
= (offset
- 0xf20);
398 /* GICD_SPENDSGIRn */
401 res
= s
->sgi_pending
[irq
][cpu
];
402 } else if (offset
< 0xfe0) {
404 } else /* offset >= 0xfe0 */ {
408 res
= gic_id
[(offset
- 0xfe0) >> 2];
413 qemu_log_mask(LOG_GUEST_ERROR
,
414 "gic_dist_readb: Bad offset %x\n", (int)offset
);
418 static uint32_t gic_dist_readw(void *opaque
, hwaddr offset
)
421 val
= gic_dist_readb(opaque
, offset
);
422 val
|= gic_dist_readb(opaque
, offset
+ 1) << 8;
426 static uint32_t gic_dist_readl(void *opaque
, hwaddr offset
)
429 val
= gic_dist_readw(opaque
, offset
);
430 val
|= gic_dist_readw(opaque
, offset
+ 2) << 16;
434 static void gic_dist_writeb(void *opaque
, hwaddr offset
,
437 GICState
*s
= (GICState
*)opaque
;
442 cpu
= gic_get_current_cpu(s
);
443 if (offset
< 0x100) {
445 s
->enabled
= (value
& 1);
446 DPRINTF("Distribution %sabled\n", s
->enabled
? "En" : "Dis");
447 } else if (offset
< 4) {
449 } else if (offset
>= 0x80) {
450 /* Interrupt Security Registers, RAZ/WI */
454 } else if (offset
< 0x180) {
455 /* Interrupt Set Enable. */
456 irq
= (offset
- 0x100) * 8 + GIC_BASE_IRQ
;
457 if (irq
>= s
->num_irq
)
459 if (irq
< GIC_NR_SGIS
) {
463 for (i
= 0; i
< 8; i
++) {
464 if (value
& (1 << i
)) {
466 (irq
< GIC_INTERNAL
) ? (1 << cpu
) : GIC_TARGET(irq
+ i
);
467 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
469 if (!GIC_TEST_ENABLED(irq
+ i
, cm
)) {
470 DPRINTF("Enabled IRQ %d\n", irq
+ i
);
472 GIC_SET_ENABLED(irq
+ i
, cm
);
473 /* If a raised level triggered IRQ enabled then mark
475 if (GIC_TEST_LEVEL(irq
+ i
, mask
)
476 && !GIC_TEST_EDGE_TRIGGER(irq
+ i
)) {
477 DPRINTF("Set %d pending mask %x\n", irq
+ i
, mask
);
478 GIC_SET_PENDING(irq
+ i
, mask
);
482 } else if (offset
< 0x200) {
483 /* Interrupt Clear Enable. */
484 irq
= (offset
- 0x180) * 8 + GIC_BASE_IRQ
;
485 if (irq
>= s
->num_irq
)
487 if (irq
< GIC_NR_SGIS
) {
491 for (i
= 0; i
< 8; i
++) {
492 if (value
& (1 << i
)) {
493 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
495 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
496 DPRINTF("Disabled IRQ %d\n", irq
+ i
);
498 GIC_CLEAR_ENABLED(irq
+ i
, cm
);
501 } else if (offset
< 0x280) {
502 /* Interrupt Set Pending. */
503 irq
= (offset
- 0x200) * 8 + GIC_BASE_IRQ
;
504 if (irq
>= s
->num_irq
)
506 if (irq
< GIC_NR_SGIS
) {
510 for (i
= 0; i
< 8; i
++) {
511 if (value
& (1 << i
)) {
512 GIC_SET_PENDING(irq
+ i
, GIC_TARGET(irq
+ i
));
515 } else if (offset
< 0x300) {
516 /* Interrupt Clear Pending. */
517 irq
= (offset
- 0x280) * 8 + GIC_BASE_IRQ
;
518 if (irq
>= s
->num_irq
)
520 if (irq
< GIC_NR_SGIS
) {
524 for (i
= 0; i
< 8; i
++) {
525 /* ??? This currently clears the pending bit for all CPUs, even
526 for per-CPU interrupts. It's unclear whether this is the
528 if (value
& (1 << i
)) {
529 GIC_CLEAR_PENDING(irq
+ i
, ALL_CPU_MASK
);
532 } else if (offset
< 0x400) {
533 /* Interrupt Active. */
535 } else if (offset
< 0x800) {
536 /* Interrupt Priority. */
537 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
538 if (irq
>= s
->num_irq
)
540 gic_set_priority(s
, cpu
, irq
, value
);
541 } else if (offset
< 0xc00) {
542 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
543 * annoying exception of the 11MPCore's GIC.
545 if (s
->num_cpu
!= 1 || s
->revision
== REV_11MPCORE
) {
546 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
547 if (irq
>= s
->num_irq
) {
552 } else if (irq
< GIC_INTERNAL
) {
553 value
= ALL_CPU_MASK
;
555 s
->irq_target
[irq
] = value
& ALL_CPU_MASK
;
557 } else if (offset
< 0xf00) {
558 /* Interrupt Configuration. */
559 irq
= (offset
- 0xc00) * 4 + GIC_BASE_IRQ
;
560 if (irq
>= s
->num_irq
)
562 if (irq
< GIC_NR_SGIS
)
564 for (i
= 0; i
< 4; i
++) {
565 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
566 if (value
& (1 << (i
* 2))) {
567 GIC_SET_MODEL(irq
+ i
);
569 GIC_CLEAR_MODEL(irq
+ i
);
572 if (value
& (2 << (i
* 2))) {
573 GIC_SET_EDGE_TRIGGER(irq
+ i
);
575 GIC_CLEAR_EDGE_TRIGGER(irq
+ i
);
578 } else if (offset
< 0xf10) {
579 /* 0xf00 is only handled for 32-bit writes. */
581 } else if (offset
< 0xf20) {
582 /* GICD_CPENDSGIRn */
583 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
586 irq
= (offset
- 0xf10);
588 s
->sgi_pending
[irq
][cpu
] &= ~value
;
589 if (s
->sgi_pending
[irq
][cpu
] == 0) {
590 GIC_CLEAR_PENDING(irq
, 1 << cpu
);
592 } else if (offset
< 0xf30) {
593 /* GICD_SPENDSGIRn */
594 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
597 irq
= (offset
- 0xf20);
599 GIC_SET_PENDING(irq
, 1 << cpu
);
600 s
->sgi_pending
[irq
][cpu
] |= value
;
607 qemu_log_mask(LOG_GUEST_ERROR
,
608 "gic_dist_writeb: Bad offset %x\n", (int)offset
);
611 static void gic_dist_writew(void *opaque
, hwaddr offset
,
614 gic_dist_writeb(opaque
, offset
, value
& 0xff);
615 gic_dist_writeb(opaque
, offset
+ 1, value
>> 8);
618 static void gic_dist_writel(void *opaque
, hwaddr offset
,
621 GICState
*s
= (GICState
*)opaque
;
622 if (offset
== 0xf00) {
628 cpu
= gic_get_current_cpu(s
);
630 switch ((value
>> 24) & 3) {
632 mask
= (value
>> 16) & ALL_CPU_MASK
;
635 mask
= ALL_CPU_MASK
^ (1 << cpu
);
641 DPRINTF("Bad Soft Int target filter\n");
645 GIC_SET_PENDING(irq
, mask
);
646 target_cpu
= ctz32(mask
);
647 while (target_cpu
< GIC_NCPU
) {
648 s
->sgi_pending
[irq
][target_cpu
] |= (1 << cpu
);
649 mask
&= ~(1 << target_cpu
);
650 target_cpu
= ctz32(mask
);
655 gic_dist_writew(opaque
, offset
, value
& 0xffff);
656 gic_dist_writew(opaque
, offset
+ 2, value
>> 16);
659 static const MemoryRegionOps gic_dist_ops
= {
661 .read
= { gic_dist_readb
, gic_dist_readw
, gic_dist_readl
, },
662 .write
= { gic_dist_writeb
, gic_dist_writew
, gic_dist_writel
, },
664 .endianness
= DEVICE_NATIVE_ENDIAN
,
667 static uint32_t gic_cpu_read(GICState
*s
, int cpu
, int offset
)
670 case 0x00: /* Control */
671 return s
->cpu_enabled
[cpu
];
672 case 0x04: /* Priority mask */
673 return s
->priority_mask
[cpu
];
674 case 0x08: /* Binary Point */
676 case 0x0c: /* Acknowledge */
677 return gic_acknowledge_irq(s
, cpu
);
678 case 0x14: /* Running Priority */
679 return s
->running_priority
[cpu
];
680 case 0x18: /* Highest Pending Interrupt */
681 return s
->current_pending
[cpu
];
682 case 0x1c: /* Aliased Binary Point */
684 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
685 return s
->apr
[(offset
- 0xd0) / 4][cpu
];
687 qemu_log_mask(LOG_GUEST_ERROR
,
688 "gic_cpu_read: Bad offset %x\n", (int)offset
);
693 static void gic_cpu_write(GICState
*s
, int cpu
, int offset
, uint32_t value
)
696 case 0x00: /* Control */
697 s
->cpu_enabled
[cpu
] = (value
& 1);
698 DPRINTF("CPU %d %sabled\n", cpu
, s
->cpu_enabled
[cpu
] ? "En" : "Dis");
700 case 0x04: /* Priority mask */
701 s
->priority_mask
[cpu
] = (value
& 0xff);
703 case 0x08: /* Binary Point */
704 s
->bpr
[cpu
] = (value
& 0x7);
706 case 0x10: /* End Of Interrupt */
707 return gic_complete_irq(s
, cpu
, value
& 0x3ff);
708 case 0x1c: /* Aliased Binary Point */
709 if (s
->revision
>= 2) {
710 s
->abpr
[cpu
] = (value
& 0x7);
713 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
714 qemu_log_mask(LOG_UNIMP
, "Writing APR not implemented\n");
717 qemu_log_mask(LOG_GUEST_ERROR
,
718 "gic_cpu_write: Bad offset %x\n", (int)offset
);
724 /* Wrappers to read/write the GIC CPU interface for the current CPU */
725 static uint64_t gic_thiscpu_read(void *opaque
, hwaddr addr
,
728 GICState
*s
= (GICState
*)opaque
;
729 return gic_cpu_read(s
, gic_get_current_cpu(s
), addr
);
732 static void gic_thiscpu_write(void *opaque
, hwaddr addr
,
733 uint64_t value
, unsigned size
)
735 GICState
*s
= (GICState
*)opaque
;
736 gic_cpu_write(s
, gic_get_current_cpu(s
), addr
, value
);
739 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
740 * These just decode the opaque pointer into GICState* + cpu id.
742 static uint64_t gic_do_cpu_read(void *opaque
, hwaddr addr
,
745 GICState
**backref
= (GICState
**)opaque
;
746 GICState
*s
= *backref
;
747 int id
= (backref
- s
->backref
);
748 return gic_cpu_read(s
, id
, addr
);
751 static void gic_do_cpu_write(void *opaque
, hwaddr addr
,
752 uint64_t value
, unsigned size
)
754 GICState
**backref
= (GICState
**)opaque
;
755 GICState
*s
= *backref
;
756 int id
= (backref
- s
->backref
);
757 gic_cpu_write(s
, id
, addr
, value
);
760 static const MemoryRegionOps gic_thiscpu_ops
= {
761 .read
= gic_thiscpu_read
,
762 .write
= gic_thiscpu_write
,
763 .endianness
= DEVICE_NATIVE_ENDIAN
,
766 static const MemoryRegionOps gic_cpu_ops
= {
767 .read
= gic_do_cpu_read
,
768 .write
= gic_do_cpu_write
,
769 .endianness
= DEVICE_NATIVE_ENDIAN
,
772 void gic_init_irqs_and_distributor(GICState
*s
)
774 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
777 i
= s
->num_irq
- GIC_INTERNAL
;
778 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
779 * GPIO array layout is thus:
781 * [N..N+31] PPIs for CPU 0
782 * [N+32..N+63] PPIs for CPU 1
785 if (s
->revision
!= REV_NVIC
) {
786 i
+= (GIC_INTERNAL
* s
->num_cpu
);
788 qdev_init_gpio_in(DEVICE(s
), gic_set_irq
, i
);
789 for (i
= 0; i
< NUM_CPU(s
); i
++) {
790 sysbus_init_irq(sbd
, &s
->parent_irq
[i
]);
792 memory_region_init_io(&s
->iomem
, OBJECT(s
), &gic_dist_ops
, s
,
796 static void arm_gic_realize(DeviceState
*dev
, Error
**errp
)
798 /* Device instance realize function for the GIC sysbus device */
800 GICState
*s
= ARM_GIC(dev
);
801 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
802 ARMGICClass
*agc
= ARM_GIC_GET_CLASS(s
);
803 Error
*local_err
= NULL
;
805 agc
->parent_realize(dev
, &local_err
);
807 error_propagate(errp
, local_err
);
811 gic_init_irqs_and_distributor(s
);
813 /* Memory regions for the CPU interfaces (NVIC doesn't have these):
814 * a region for "CPU interface for this core", then a region for
815 * "CPU interface for core 0", "for core 1", ...
816 * NB that the memory region size of 0x100 applies for the 11MPCore
817 * and also cores following the GIC v1 spec (ie A9).
818 * GIC v2 defines a larger memory region (0x1000) so this will need
819 * to be extended when we implement A15.
821 memory_region_init_io(&s
->cpuiomem
[0], OBJECT(s
), &gic_thiscpu_ops
, s
,
823 for (i
= 0; i
< NUM_CPU(s
); i
++) {
825 memory_region_init_io(&s
->cpuiomem
[i
+1], OBJECT(s
), &gic_cpu_ops
,
826 &s
->backref
[i
], "gic_cpu", 0x100);
829 sysbus_init_mmio(sbd
, &s
->iomem
);
830 /* cpu interfaces (one for "current cpu" plus one per cpu) */
831 for (i
= 0; i
<= NUM_CPU(s
); i
++) {
832 sysbus_init_mmio(sbd
, &s
->cpuiomem
[i
]);
836 static void arm_gic_class_init(ObjectClass
*klass
, void *data
)
838 DeviceClass
*dc
= DEVICE_CLASS(klass
);
839 ARMGICClass
*agc
= ARM_GIC_CLASS(klass
);
841 agc
->parent_realize
= dc
->realize
;
842 dc
->realize
= arm_gic_realize
;
845 static const TypeInfo arm_gic_info
= {
846 .name
= TYPE_ARM_GIC
,
847 .parent
= TYPE_ARM_GIC_COMMON
,
848 .instance_size
= sizeof(GICState
),
849 .class_init
= arm_gic_class_init
,
850 .class_size
= sizeof(ARMGICClass
),
853 static void arm_gic_register_types(void)
855 type_register_static(&arm_gic_info
);
858 type_init(arm_gic_register_types
)