2 * ARM Generic/Distributed Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
10 /* This file contains implementation code for the RealView EB interrupt
11 * controller, MPCore distributed interrupt controller and ARMv7-M
12 * Nested Vectored Interrupt Controller.
13 * It is compiled in two ways:
14 * (1) as a standalone file to produce a sysbus device which is a GIC
15 * that can be used on the realview board and as one of the builtin
16 * private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
17 * (2) by being directly #included into armv7m_nvic.c to produce the
21 #include "hw/sysbus.h"
22 #include "gic_internal.h"
28 #define DPRINTF(fmt, ...) \
29 do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0)
31 #define DPRINTF(fmt, ...) do {} while(0)
34 static const uint8_t gic_id
[] = {
35 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
38 #define NUM_CPU(s) ((s)->num_cpu)
40 static inline int gic_get_current_cpu(GICState
*s
)
43 return current_cpu
->cpu_index
;
48 /* TODO: Many places that call this routine could be optimized. */
49 /* Update interrupt status after enabled or pending bits have been changed. */
50 void gic_update(GICState
*s
)
59 for (cpu
= 0; cpu
< NUM_CPU(s
); cpu
++) {
61 s
->current_pending
[cpu
] = 1023;
62 if (!s
->enabled
|| !s
->cpu_enabled
[cpu
]) {
63 qemu_irq_lower(s
->parent_irq
[cpu
]);
68 for (irq
= 0; irq
< s
->num_irq
; irq
++) {
69 if (GIC_TEST_ENABLED(irq
, cm
) && gic_test_pending(s
, irq
, cm
)) {
70 if (GIC_GET_PRIORITY(irq
, cpu
) < best_prio
) {
71 best_prio
= GIC_GET_PRIORITY(irq
, cpu
);
77 if (best_prio
< s
->priority_mask
[cpu
]) {
78 s
->current_pending
[cpu
] = best_irq
;
79 if (best_prio
< s
->running_priority
[cpu
]) {
80 DPRINTF("Raised pending IRQ %d (cpu %d)\n", best_irq
, cpu
);
84 qemu_set_irq(s
->parent_irq
[cpu
], level
);
88 void gic_set_pending_private(GICState
*s
, int cpu
, int irq
)
92 if (gic_test_pending(s
, irq
, cm
)) {
96 DPRINTF("Set %d pending cpu %d\n", irq
, cpu
);
97 GIC_SET_PENDING(irq
, cm
);
101 static void gic_set_irq_11mpcore(GICState
*s
, int irq
, int level
,
105 GIC_SET_LEVEL(irq
, cm
);
106 if (GIC_TEST_EDGE_TRIGGER(irq
) || GIC_TEST_ENABLED(irq
, cm
)) {
107 DPRINTF("Set %d pending mask %x\n", irq
, target
);
108 GIC_SET_PENDING(irq
, target
);
111 GIC_CLEAR_LEVEL(irq
, cm
);
115 static void gic_set_irq_generic(GICState
*s
, int irq
, int level
,
119 GIC_SET_LEVEL(irq
, cm
);
120 DPRINTF("Set %d pending mask %x\n", irq
, target
);
121 if (GIC_TEST_EDGE_TRIGGER(irq
)) {
122 GIC_SET_PENDING(irq
, target
);
125 GIC_CLEAR_LEVEL(irq
, cm
);
129 /* Process a change in an external IRQ input. */
130 static void gic_set_irq(void *opaque
, int irq
, int level
)
132 /* Meaning of the 'irq' parameter:
133 * [0..N-1] : external interrupts
134 * [N..N+31] : PPI (internal) interrupts for CPU 0
135 * [N+32..N+63] : PPI (internal interrupts for CPU 1
138 GICState
*s
= (GICState
*)opaque
;
140 if (irq
< (s
->num_irq
- GIC_INTERNAL
)) {
141 /* The first external input line is internal interrupt 32. */
144 target
= GIC_TARGET(irq
);
147 irq
-= (s
->num_irq
- GIC_INTERNAL
);
148 cpu
= irq
/ GIC_INTERNAL
;
154 assert(irq
>= GIC_NR_SGIS
);
156 if (level
== GIC_TEST_LEVEL(irq
, cm
)) {
160 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
161 gic_set_irq_11mpcore(s
, irq
, level
, cm
, target
);
163 gic_set_irq_generic(s
, irq
, level
, cm
, target
);
169 static void gic_set_running_irq(GICState
*s
, int cpu
, int irq
)
171 s
->running_irq
[cpu
] = irq
;
173 s
->running_priority
[cpu
] = 0x100;
175 s
->running_priority
[cpu
] = GIC_GET_PRIORITY(irq
, cpu
);
180 uint32_t gic_acknowledge_irq(GICState
*s
, int cpu
)
184 irq
= s
->current_pending
[cpu
];
186 || GIC_GET_PRIORITY(irq
, cpu
) >= s
->running_priority
[cpu
]) {
187 DPRINTF("ACK no pending IRQ\n");
190 s
->last_active
[irq
][cpu
] = s
->running_irq
[cpu
];
192 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
193 /* Clear pending flags for both level and edge triggered interrupts.
194 * Level triggered IRQs will be reasserted once they become inactive.
196 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
199 if (irq
< GIC_NR_SGIS
) {
200 /* Lookup the source CPU for the SGI and clear this in the
201 * sgi_pending map. Return the src and clear the overall pending
202 * state on this CPU if the SGI is not pending from any CPUs.
204 assert(s
->sgi_pending
[irq
][cpu
] != 0);
205 src
= ctz32(s
->sgi_pending
[irq
][cpu
]);
206 s
->sgi_pending
[irq
][cpu
] &= ~(1 << src
);
207 if (s
->sgi_pending
[irq
][cpu
] == 0) {
208 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
210 ret
= irq
| ((src
& 0x7) << 10);
212 /* Clear pending state for both level and edge triggered
213 * interrupts. (level triggered interrupts with an active line
214 * remain pending, see gic_test_pending)
216 GIC_CLEAR_PENDING(irq
, GIC_TEST_MODEL(irq
) ? ALL_CPU_MASK
: cm
);
221 gic_set_running_irq(s
, cpu
, irq
);
222 DPRINTF("ACK %d\n", irq
);
226 void gic_set_priority(GICState
*s
, int cpu
, int irq
, uint8_t val
)
228 if (irq
< GIC_INTERNAL
) {
229 s
->priority1
[irq
][cpu
] = val
;
231 s
->priority2
[(irq
) - GIC_INTERNAL
] = val
;
235 void gic_complete_irq(GICState
*s
, int cpu
, int irq
)
239 DPRINTF("EOI %d\n", irq
);
240 if (irq
>= s
->num_irq
) {
241 /* This handles two cases:
242 * 1. If software writes the ID of a spurious interrupt [ie 1023]
243 * to the GICC_EOIR, the GIC ignores that write.
244 * 2. If software writes the number of a non-existent interrupt
245 * this must be a subcase of "value written does not match the last
246 * valid interrupt value read from the Interrupt Acknowledge
247 * register" and so this is UNPREDICTABLE. We choose to ignore it.
251 if (s
->running_irq
[cpu
] == 1023)
252 return; /* No active IRQ. */
254 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
255 /* Mark level triggered interrupts as pending if they are still
257 if (!GIC_TEST_EDGE_TRIGGER(irq
) && GIC_TEST_ENABLED(irq
, cm
)
258 && GIC_TEST_LEVEL(irq
, cm
) && (GIC_TARGET(irq
) & cm
) != 0) {
259 DPRINTF("Set %d pending mask %x\n", irq
, cm
);
260 GIC_SET_PENDING(irq
, cm
);
265 if (irq
!= s
->running_irq
[cpu
]) {
266 /* Complete an IRQ that is not currently running. */
267 int tmp
= s
->running_irq
[cpu
];
268 while (s
->last_active
[tmp
][cpu
] != 1023) {
269 if (s
->last_active
[tmp
][cpu
] == irq
) {
270 s
->last_active
[tmp
][cpu
] = s
->last_active
[irq
][cpu
];
273 tmp
= s
->last_active
[tmp
][cpu
];
279 /* Complete the current running IRQ. */
280 gic_set_running_irq(s
, cpu
, s
->last_active
[s
->running_irq
[cpu
]][cpu
]);
284 static uint32_t gic_dist_readb(void *opaque
, hwaddr offset
)
286 GICState
*s
= (GICState
*)opaque
;
294 cpu
= gic_get_current_cpu(s
);
296 if (offset
< 0x100) {
300 return ((s
->num_irq
/ 32) - 1) | ((NUM_CPU(s
) - 1) << 5);
303 if (offset
>= 0x80) {
304 /* Interrupt Security , RAZ/WI */
308 } else if (offset
< 0x200) {
309 /* Interrupt Set/Clear Enable. */
311 irq
= (offset
- 0x100) * 8;
313 irq
= (offset
- 0x180) * 8;
315 if (irq
>= s
->num_irq
)
318 for (i
= 0; i
< 8; i
++) {
319 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
323 } else if (offset
< 0x300) {
324 /* Interrupt Set/Clear Pending. */
326 irq
= (offset
- 0x200) * 8;
328 irq
= (offset
- 0x280) * 8;
330 if (irq
>= s
->num_irq
)
333 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
334 for (i
= 0; i
< 8; i
++) {
335 if (gic_test_pending(s
, irq
+ i
, mask
)) {
339 } else if (offset
< 0x400) {
340 /* Interrupt Active. */
341 irq
= (offset
- 0x300) * 8 + GIC_BASE_IRQ
;
342 if (irq
>= s
->num_irq
)
345 mask
= (irq
< GIC_INTERNAL
) ? cm
: ALL_CPU_MASK
;
346 for (i
= 0; i
< 8; i
++) {
347 if (GIC_TEST_ACTIVE(irq
+ i
, mask
)) {
351 } else if (offset
< 0x800) {
352 /* Interrupt Priority. */
353 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
354 if (irq
>= s
->num_irq
)
356 res
= GIC_GET_PRIORITY(irq
, cpu
);
357 } else if (offset
< 0xc00) {
358 /* Interrupt CPU Target. */
359 if (s
->num_cpu
== 1 && s
->revision
!= REV_11MPCORE
) {
360 /* For uniprocessor GICs these RAZ/WI */
363 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
364 if (irq
>= s
->num_irq
) {
367 if (irq
>= 29 && irq
<= 31) {
370 res
= GIC_TARGET(irq
);
373 } else if (offset
< 0xf00) {
374 /* Interrupt Configuration. */
375 irq
= (offset
- 0xc00) * 2 + GIC_BASE_IRQ
;
376 if (irq
>= s
->num_irq
)
379 for (i
= 0; i
< 4; i
++) {
380 if (GIC_TEST_MODEL(irq
+ i
))
381 res
|= (1 << (i
* 2));
382 if (GIC_TEST_EDGE_TRIGGER(irq
+ i
))
383 res
|= (2 << (i
* 2));
385 } else if (offset
< 0xf10) {
387 } else if (offset
< 0xf30) {
388 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
392 if (offset
< 0xf20) {
393 /* GICD_CPENDSGIRn */
394 irq
= (offset
- 0xf10);
396 irq
= (offset
- 0xf20);
397 /* GICD_SPENDSGIRn */
400 res
= s
->sgi_pending
[irq
][cpu
];
401 } else if (offset
< 0xfe0) {
403 } else /* offset >= 0xfe0 */ {
407 res
= gic_id
[(offset
- 0xfe0) >> 2];
412 qemu_log_mask(LOG_GUEST_ERROR
,
413 "gic_dist_readb: Bad offset %x\n", (int)offset
);
417 static uint32_t gic_dist_readw(void *opaque
, hwaddr offset
)
420 val
= gic_dist_readb(opaque
, offset
);
421 val
|= gic_dist_readb(opaque
, offset
+ 1) << 8;
425 static uint32_t gic_dist_readl(void *opaque
, hwaddr offset
)
428 val
= gic_dist_readw(opaque
, offset
);
429 val
|= gic_dist_readw(opaque
, offset
+ 2) << 16;
433 static void gic_dist_writeb(void *opaque
, hwaddr offset
,
436 GICState
*s
= (GICState
*)opaque
;
441 cpu
= gic_get_current_cpu(s
);
442 if (offset
< 0x100) {
444 s
->enabled
= (value
& 1);
445 DPRINTF("Distribution %sabled\n", s
->enabled
? "En" : "Dis");
446 } else if (offset
< 4) {
448 } else if (offset
>= 0x80) {
449 /* Interrupt Security Registers, RAZ/WI */
453 } else if (offset
< 0x180) {
454 /* Interrupt Set Enable. */
455 irq
= (offset
- 0x100) * 8 + GIC_BASE_IRQ
;
456 if (irq
>= s
->num_irq
)
458 if (irq
< GIC_NR_SGIS
) {
462 for (i
= 0; i
< 8; i
++) {
463 if (value
& (1 << i
)) {
465 (irq
< GIC_INTERNAL
) ? (1 << cpu
) : GIC_TARGET(irq
+ i
);
466 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
468 if (!GIC_TEST_ENABLED(irq
+ i
, cm
)) {
469 DPRINTF("Enabled IRQ %d\n", irq
+ i
);
471 GIC_SET_ENABLED(irq
+ i
, cm
);
472 /* If a raised level triggered IRQ enabled then mark
474 if (GIC_TEST_LEVEL(irq
+ i
, mask
)
475 && !GIC_TEST_EDGE_TRIGGER(irq
+ i
)) {
476 DPRINTF("Set %d pending mask %x\n", irq
+ i
, mask
);
477 GIC_SET_PENDING(irq
+ i
, mask
);
481 } else if (offset
< 0x200) {
482 /* Interrupt Clear Enable. */
483 irq
= (offset
- 0x180) * 8 + GIC_BASE_IRQ
;
484 if (irq
>= s
->num_irq
)
486 if (irq
< GIC_NR_SGIS
) {
490 for (i
= 0; i
< 8; i
++) {
491 if (value
& (1 << i
)) {
492 int cm
= (irq
< GIC_INTERNAL
) ? (1 << cpu
) : ALL_CPU_MASK
;
494 if (GIC_TEST_ENABLED(irq
+ i
, cm
)) {
495 DPRINTF("Disabled IRQ %d\n", irq
+ i
);
497 GIC_CLEAR_ENABLED(irq
+ i
, cm
);
500 } else if (offset
< 0x280) {
501 /* Interrupt Set Pending. */
502 irq
= (offset
- 0x200) * 8 + GIC_BASE_IRQ
;
503 if (irq
>= s
->num_irq
)
505 if (irq
< GIC_NR_SGIS
) {
509 for (i
= 0; i
< 8; i
++) {
510 if (value
& (1 << i
)) {
511 GIC_SET_PENDING(irq
+ i
, GIC_TARGET(irq
+ i
));
514 } else if (offset
< 0x300) {
515 /* Interrupt Clear Pending. */
516 irq
= (offset
- 0x280) * 8 + GIC_BASE_IRQ
;
517 if (irq
>= s
->num_irq
)
519 if (irq
< GIC_NR_SGIS
) {
523 for (i
= 0; i
< 8; i
++) {
524 /* ??? This currently clears the pending bit for all CPUs, even
525 for per-CPU interrupts. It's unclear whether this is the
527 if (value
& (1 << i
)) {
528 GIC_CLEAR_PENDING(irq
+ i
, ALL_CPU_MASK
);
531 } else if (offset
< 0x400) {
532 /* Interrupt Active. */
534 } else if (offset
< 0x800) {
535 /* Interrupt Priority. */
536 irq
= (offset
- 0x400) + GIC_BASE_IRQ
;
537 if (irq
>= s
->num_irq
)
539 gic_set_priority(s
, cpu
, irq
, value
);
540 } else if (offset
< 0xc00) {
541 /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
542 * annoying exception of the 11MPCore's GIC.
544 if (s
->num_cpu
!= 1 || s
->revision
== REV_11MPCORE
) {
545 irq
= (offset
- 0x800) + GIC_BASE_IRQ
;
546 if (irq
>= s
->num_irq
) {
551 } else if (irq
< GIC_INTERNAL
) {
552 value
= ALL_CPU_MASK
;
554 s
->irq_target
[irq
] = value
& ALL_CPU_MASK
;
556 } else if (offset
< 0xf00) {
557 /* Interrupt Configuration. */
558 irq
= (offset
- 0xc00) * 4 + GIC_BASE_IRQ
;
559 if (irq
>= s
->num_irq
)
561 if (irq
< GIC_INTERNAL
)
563 for (i
= 0; i
< 4; i
++) {
564 if (value
& (1 << (i
* 2))) {
565 GIC_SET_MODEL(irq
+ i
);
567 GIC_CLEAR_MODEL(irq
+ i
);
569 if (value
& (2 << (i
* 2))) {
570 GIC_SET_EDGE_TRIGGER(irq
+ i
);
572 GIC_CLEAR_EDGE_TRIGGER(irq
+ i
);
575 } else if (offset
< 0xf10) {
576 /* 0xf00 is only handled for 32-bit writes. */
578 } else if (offset
< 0xf20) {
579 /* GICD_CPENDSGIRn */
580 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
583 irq
= (offset
- 0xf10);
585 s
->sgi_pending
[irq
][cpu
] &= ~value
;
586 if (s
->sgi_pending
[irq
][cpu
] == 0) {
587 GIC_CLEAR_PENDING(irq
, 1 << cpu
);
589 } else if (offset
< 0xf30) {
590 /* GICD_SPENDSGIRn */
591 if (s
->revision
== REV_11MPCORE
|| s
->revision
== REV_NVIC
) {
594 irq
= (offset
- 0xf20);
596 GIC_SET_PENDING(irq
, 1 << cpu
);
597 s
->sgi_pending
[irq
][cpu
] |= value
;
604 qemu_log_mask(LOG_GUEST_ERROR
,
605 "gic_dist_writeb: Bad offset %x\n", (int)offset
);
608 static void gic_dist_writew(void *opaque
, hwaddr offset
,
611 gic_dist_writeb(opaque
, offset
, value
& 0xff);
612 gic_dist_writeb(opaque
, offset
+ 1, value
>> 8);
615 static void gic_dist_writel(void *opaque
, hwaddr offset
,
618 GICState
*s
= (GICState
*)opaque
;
619 if (offset
== 0xf00) {
625 cpu
= gic_get_current_cpu(s
);
627 switch ((value
>> 24) & 3) {
629 mask
= (value
>> 16) & ALL_CPU_MASK
;
632 mask
= ALL_CPU_MASK
^ (1 << cpu
);
638 DPRINTF("Bad Soft Int target filter\n");
642 GIC_SET_PENDING(irq
, mask
);
643 target_cpu
= ctz32(mask
);
644 while (target_cpu
< GIC_NCPU
) {
645 s
->sgi_pending
[irq
][target_cpu
] |= (1 << cpu
);
646 mask
&= ~(1 << target_cpu
);
647 target_cpu
= ctz32(mask
);
652 gic_dist_writew(opaque
, offset
, value
& 0xffff);
653 gic_dist_writew(opaque
, offset
+ 2, value
>> 16);
656 static const MemoryRegionOps gic_dist_ops
= {
658 .read
= { gic_dist_readb
, gic_dist_readw
, gic_dist_readl
, },
659 .write
= { gic_dist_writeb
, gic_dist_writew
, gic_dist_writel
, },
661 .endianness
= DEVICE_NATIVE_ENDIAN
,
664 static uint32_t gic_cpu_read(GICState
*s
, int cpu
, int offset
)
667 case 0x00: /* Control */
668 return s
->cpu_enabled
[cpu
];
669 case 0x04: /* Priority mask */
670 return s
->priority_mask
[cpu
];
671 case 0x08: /* Binary Point */
673 case 0x0c: /* Acknowledge */
674 return gic_acknowledge_irq(s
, cpu
);
675 case 0x14: /* Running Priority */
676 return s
->running_priority
[cpu
];
677 case 0x18: /* Highest Pending Interrupt */
678 return s
->current_pending
[cpu
];
679 case 0x1c: /* Aliased Binary Point */
681 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
682 return s
->apr
[(offset
- 0xd0) / 4][cpu
];
684 qemu_log_mask(LOG_GUEST_ERROR
,
685 "gic_cpu_read: Bad offset %x\n", (int)offset
);
690 static void gic_cpu_write(GICState
*s
, int cpu
, int offset
, uint32_t value
)
693 case 0x00: /* Control */
694 s
->cpu_enabled
[cpu
] = (value
& 1);
695 DPRINTF("CPU %d %sabled\n", cpu
, s
->cpu_enabled
[cpu
] ? "En" : "Dis");
697 case 0x04: /* Priority mask */
698 s
->priority_mask
[cpu
] = (value
& 0xff);
700 case 0x08: /* Binary Point */
701 s
->bpr
[cpu
] = (value
& 0x7);
703 case 0x10: /* End Of Interrupt */
704 return gic_complete_irq(s
, cpu
, value
& 0x3ff);
705 case 0x1c: /* Aliased Binary Point */
706 if (s
->revision
>= 2) {
707 s
->abpr
[cpu
] = (value
& 0x7);
710 case 0xd0: case 0xd4: case 0xd8: case 0xdc:
711 qemu_log_mask(LOG_UNIMP
, "Writing APR not implemented\n");
714 qemu_log_mask(LOG_GUEST_ERROR
,
715 "gic_cpu_write: Bad offset %x\n", (int)offset
);
721 /* Wrappers to read/write the GIC CPU interface for the current CPU */
722 static uint64_t gic_thiscpu_read(void *opaque
, hwaddr addr
,
725 GICState
*s
= (GICState
*)opaque
;
726 return gic_cpu_read(s
, gic_get_current_cpu(s
), addr
);
729 static void gic_thiscpu_write(void *opaque
, hwaddr addr
,
730 uint64_t value
, unsigned size
)
732 GICState
*s
= (GICState
*)opaque
;
733 gic_cpu_write(s
, gic_get_current_cpu(s
), addr
, value
);
736 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
737 * These just decode the opaque pointer into GICState* + cpu id.
739 static uint64_t gic_do_cpu_read(void *opaque
, hwaddr addr
,
742 GICState
**backref
= (GICState
**)opaque
;
743 GICState
*s
= *backref
;
744 int id
= (backref
- s
->backref
);
745 return gic_cpu_read(s
, id
, addr
);
748 static void gic_do_cpu_write(void *opaque
, hwaddr addr
,
749 uint64_t value
, unsigned size
)
751 GICState
**backref
= (GICState
**)opaque
;
752 GICState
*s
= *backref
;
753 int id
= (backref
- s
->backref
);
754 gic_cpu_write(s
, id
, addr
, value
);
757 static const MemoryRegionOps gic_thiscpu_ops
= {
758 .read
= gic_thiscpu_read
,
759 .write
= gic_thiscpu_write
,
760 .endianness
= DEVICE_NATIVE_ENDIAN
,
763 static const MemoryRegionOps gic_cpu_ops
= {
764 .read
= gic_do_cpu_read
,
765 .write
= gic_do_cpu_write
,
766 .endianness
= DEVICE_NATIVE_ENDIAN
,
769 void gic_init_irqs_and_distributor(GICState
*s
, int num_irq
)
771 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
774 i
= s
->num_irq
- GIC_INTERNAL
;
775 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
776 * GPIO array layout is thus:
778 * [N..N+31] PPIs for CPU 0
779 * [N+32..N+63] PPIs for CPU 1
782 if (s
->revision
!= REV_NVIC
) {
783 i
+= (GIC_INTERNAL
* s
->num_cpu
);
785 qdev_init_gpio_in(DEVICE(s
), gic_set_irq
, i
);
786 for (i
= 0; i
< NUM_CPU(s
); i
++) {
787 sysbus_init_irq(sbd
, &s
->parent_irq
[i
]);
789 memory_region_init_io(&s
->iomem
, OBJECT(s
), &gic_dist_ops
, s
,
793 static void arm_gic_realize(DeviceState
*dev
, Error
**errp
)
795 /* Device instance realize function for the GIC sysbus device */
797 GICState
*s
= ARM_GIC(dev
);
798 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
799 ARMGICClass
*agc
= ARM_GIC_GET_CLASS(s
);
800 Error
*local_err
= NULL
;
802 agc
->parent_realize(dev
, &local_err
);
804 error_propagate(errp
, local_err
);
808 gic_init_irqs_and_distributor(s
, s
->num_irq
);
810 /* Memory regions for the CPU interfaces (NVIC doesn't have these):
811 * a region for "CPU interface for this core", then a region for
812 * "CPU interface for core 0", "for core 1", ...
813 * NB that the memory region size of 0x100 applies for the 11MPCore
814 * and also cores following the GIC v1 spec (ie A9).
815 * GIC v2 defines a larger memory region (0x1000) so this will need
816 * to be extended when we implement A15.
818 memory_region_init_io(&s
->cpuiomem
[0], OBJECT(s
), &gic_thiscpu_ops
, s
,
820 for (i
= 0; i
< NUM_CPU(s
); i
++) {
822 memory_region_init_io(&s
->cpuiomem
[i
+1], OBJECT(s
), &gic_cpu_ops
,
823 &s
->backref
[i
], "gic_cpu", 0x100);
826 sysbus_init_mmio(sbd
, &s
->iomem
);
827 /* cpu interfaces (one for "current cpu" plus one per cpu) */
828 for (i
= 0; i
<= NUM_CPU(s
); i
++) {
829 sysbus_init_mmio(sbd
, &s
->cpuiomem
[i
]);
833 static void arm_gic_class_init(ObjectClass
*klass
, void *data
)
835 DeviceClass
*dc
= DEVICE_CLASS(klass
);
836 ARMGICClass
*agc
= ARM_GIC_CLASS(klass
);
838 agc
->parent_realize
= dc
->realize
;
839 dc
->realize
= arm_gic_realize
;
842 static const TypeInfo arm_gic_info
= {
843 .name
= TYPE_ARM_GIC
,
844 .parent
= TYPE_ARM_GIC_COMMON
,
845 .instance_size
= sizeof(GICState
),
846 .class_init
= arm_gic_class_init
,
847 .class_size
= sizeof(ARMGICClass
),
850 static void arm_gic_register_types(void)
852 type_register_static(&arm_gic_info
);
855 type_init(arm_gic_register_types
)