prep: qdev'ify Raven host bridge (PCIDevice)
[qemu/kevin.git] / hw / arm_gic.c
blobcf582a5a14343fe086916d4f745a5193116cea5f
1 /*
2 * ARM Generic/Distributed Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
8 */
10 /* This file contains implementation code for the RealView EB interrupt
11 controller, MPCore distributed interrupt controller and ARMv7-M
12 Nested Vectored Interrupt Controller. */
14 /* Maximum number of possible interrupts, determined by the GIC architecture */
15 #define GIC_MAXIRQ 1020
16 //#define DEBUG_GIC
18 #ifdef DEBUG_GIC
19 #define DPRINTF(fmt, ...) \
20 do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0)
21 #else
22 #define DPRINTF(fmt, ...) do {} while(0)
23 #endif
25 #ifdef NVIC
26 static const uint8_t gic_id[] =
27 { 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 };
28 /* The NVIC has 16 internal vectors. However these are not exposed
29 through the normal GIC interface. */
30 #define GIC_BASE_IRQ 32
31 #else
32 static const uint8_t gic_id[] =
33 { 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 };
34 #define GIC_BASE_IRQ 0
35 #endif
37 #define FROM_SYSBUSGIC(type, dev) \
38 DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev))
40 typedef struct gic_irq_state
42 /* The enable bits are only banked for per-cpu interrupts. */
43 unsigned enabled:NCPU;
44 unsigned pending:NCPU;
45 unsigned active:NCPU;
46 unsigned level:NCPU;
47 unsigned model:1; /* 0 = N:N, 1 = 1:N */
48 unsigned trigger:1; /* nonzero = edge triggered. */
49 } gic_irq_state;
51 #define ALL_CPU_MASK ((1 << NCPU) - 1)
52 #if NCPU > 1
53 #define NUM_CPU(s) ((s)->num_cpu)
54 #else
55 #define NUM_CPU(s) 1
56 #endif
58 #define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
59 #define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
60 #define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
61 #define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
62 #define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
63 #define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0)
64 #define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
65 #define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
66 #define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
67 #define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1
68 #define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0
69 #define GIC_TEST_MODEL(irq) s->irq_state[irq].model
70 #define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
71 #define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
72 #define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
73 #define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1
74 #define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0
75 #define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
76 #define GIC_GET_PRIORITY(irq, cpu) \
77 (((irq) < 32) ? s->priority1[irq][cpu] : s->priority2[(irq) - 32])
78 #ifdef NVIC
79 #define GIC_TARGET(irq) 1
80 #else
81 #define GIC_TARGET(irq) s->irq_target[irq]
82 #endif
84 typedef struct gic_state
86 SysBusDevice busdev;
87 qemu_irq parent_irq[NCPU];
88 int enabled;
89 int cpu_enabled[NCPU];
91 gic_irq_state irq_state[GIC_MAXIRQ];
92 #ifndef NVIC
93 int irq_target[GIC_MAXIRQ];
94 #endif
95 int priority1[32][NCPU];
96 int priority2[GIC_MAXIRQ - 32];
97 int last_active[GIC_MAXIRQ][NCPU];
99 int priority_mask[NCPU];
100 int running_irq[NCPU];
101 int running_priority[NCPU];
102 int current_pending[NCPU];
104 #if NCPU > 1
105 int num_cpu;
106 #endif
108 MemoryRegion iomem; /* Distributor */
109 #ifndef NVIC
110 /* This is just so we can have an opaque pointer which identifies
111 * both this GIC and which CPU interface we should be accessing.
113 struct gic_state *backref[NCPU];
114 MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */
115 #endif
116 uint32_t num_irq;
117 } gic_state;
119 /* TODO: Many places that call this routine could be optimized. */
120 /* Update interrupt status after enabled or pending bits have been changed. */
121 static void gic_update(gic_state *s)
123 int best_irq;
124 int best_prio;
125 int irq;
126 int level;
127 int cpu;
128 int cm;
130 for (cpu = 0; cpu < NUM_CPU(s); cpu++) {
131 cm = 1 << cpu;
132 s->current_pending[cpu] = 1023;
133 if (!s->enabled || !s->cpu_enabled[cpu]) {
134 qemu_irq_lower(s->parent_irq[cpu]);
135 return;
137 best_prio = 0x100;
138 best_irq = 1023;
139 for (irq = 0; irq < s->num_irq; irq++) {
140 if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
141 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
142 best_prio = GIC_GET_PRIORITY(irq, cpu);
143 best_irq = irq;
147 level = 0;
148 if (best_prio <= s->priority_mask[cpu]) {
149 s->current_pending[cpu] = best_irq;
150 if (best_prio < s->running_priority[cpu]) {
151 DPRINTF("Raised pending IRQ %d\n", best_irq);
152 level = 1;
155 qemu_set_irq(s->parent_irq[cpu], level);
159 static void __attribute__((unused))
160 gic_set_pending_private(gic_state *s, int cpu, int irq)
162 int cm = 1 << cpu;
164 if (GIC_TEST_PENDING(irq, cm))
165 return;
167 DPRINTF("Set %d pending cpu %d\n", irq, cpu);
168 GIC_SET_PENDING(irq, cm);
169 gic_update(s);
172 /* Process a change in an external IRQ input. */
173 static void gic_set_irq(void *opaque, int irq, int level)
175 gic_state *s = (gic_state *)opaque;
176 /* The first external input line is internal interrupt 32. */
177 irq += 32;
178 if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK))
179 return;
181 if (level) {
182 GIC_SET_LEVEL(irq, ALL_CPU_MASK);
183 if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, ALL_CPU_MASK)) {
184 DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq));
185 GIC_SET_PENDING(irq, GIC_TARGET(irq));
187 } else {
188 GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK);
190 gic_update(s);
193 static void gic_set_running_irq(gic_state *s, int cpu, int irq)
195 s->running_irq[cpu] = irq;
196 if (irq == 1023) {
197 s->running_priority[cpu] = 0x100;
198 } else {
199 s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu);
201 gic_update(s);
204 static uint32_t gic_acknowledge_irq(gic_state *s, int cpu)
206 int new_irq;
207 int cm = 1 << cpu;
208 new_irq = s->current_pending[cpu];
209 if (new_irq == 1023
210 || GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) {
211 DPRINTF("ACK no pending IRQ\n");
212 return 1023;
214 s->last_active[new_irq][cpu] = s->running_irq[cpu];
215 /* Clear pending flags for both level and edge triggered interrupts.
216 Level triggered IRQs will be reasserted once they become inactive. */
217 GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm);
218 gic_set_running_irq(s, cpu, new_irq);
219 DPRINTF("ACK %d\n", new_irq);
220 return new_irq;
223 static void gic_complete_irq(gic_state * s, int cpu, int irq)
225 int update = 0;
226 int cm = 1 << cpu;
227 DPRINTF("EOI %d\n", irq);
228 if (irq >= s->num_irq) {
229 /* This handles two cases:
230 * 1. If software writes the ID of a spurious interrupt [ie 1023]
231 * to the GICC_EOIR, the GIC ignores that write.
232 * 2. If software writes the number of a non-existent interrupt
233 * this must be a subcase of "value written does not match the last
234 * valid interrupt value read from the Interrupt Acknowledge
235 * register" and so this is UNPREDICTABLE. We choose to ignore it.
237 return;
239 if (s->running_irq[cpu] == 1023)
240 return; /* No active IRQ. */
241 /* Mark level triggered interrupts as pending if they are still
242 raised. */
243 if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
244 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
245 DPRINTF("Set %d pending mask %x\n", irq, cm);
246 GIC_SET_PENDING(irq, cm);
247 update = 1;
249 if (irq != s->running_irq[cpu]) {
250 /* Complete an IRQ that is not currently running. */
251 int tmp = s->running_irq[cpu];
252 while (s->last_active[tmp][cpu] != 1023) {
253 if (s->last_active[tmp][cpu] == irq) {
254 s->last_active[tmp][cpu] = s->last_active[irq][cpu];
255 break;
257 tmp = s->last_active[tmp][cpu];
259 if (update) {
260 gic_update(s);
262 } else {
263 /* Complete the current running IRQ. */
264 gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]);
268 static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset)
270 gic_state *s = (gic_state *)opaque;
271 uint32_t res;
272 int irq;
273 int i;
274 int cpu;
275 int cm;
276 int mask;
278 cpu = gic_get_current_cpu();
279 cm = 1 << cpu;
280 if (offset < 0x100) {
281 #ifndef NVIC
282 if (offset == 0)
283 return s->enabled;
284 if (offset == 4)
285 return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5);
286 if (offset < 0x08)
287 return 0;
288 if (offset >= 0x80) {
289 /* Interrupt Security , RAZ/WI */
290 return 0;
292 #endif
293 goto bad_reg;
294 } else if (offset < 0x200) {
295 /* Interrupt Set/Clear Enable. */
296 if (offset < 0x180)
297 irq = (offset - 0x100) * 8;
298 else
299 irq = (offset - 0x180) * 8;
300 irq += GIC_BASE_IRQ;
301 if (irq >= s->num_irq)
302 goto bad_reg;
303 res = 0;
304 for (i = 0; i < 8; i++) {
305 if (GIC_TEST_ENABLED(irq + i, cm)) {
306 res |= (1 << i);
309 } else if (offset < 0x300) {
310 /* Interrupt Set/Clear Pending. */
311 if (offset < 0x280)
312 irq = (offset - 0x200) * 8;
313 else
314 irq = (offset - 0x280) * 8;
315 irq += GIC_BASE_IRQ;
316 if (irq >= s->num_irq)
317 goto bad_reg;
318 res = 0;
319 mask = (irq < 32) ? cm : ALL_CPU_MASK;
320 for (i = 0; i < 8; i++) {
321 if (GIC_TEST_PENDING(irq + i, mask)) {
322 res |= (1 << i);
325 } else if (offset < 0x400) {
326 /* Interrupt Active. */
327 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
328 if (irq >= s->num_irq)
329 goto bad_reg;
330 res = 0;
331 mask = (irq < 32) ? cm : ALL_CPU_MASK;
332 for (i = 0; i < 8; i++) {
333 if (GIC_TEST_ACTIVE(irq + i, mask)) {
334 res |= (1 << i);
337 } else if (offset < 0x800) {
338 /* Interrupt Priority. */
339 irq = (offset - 0x400) + GIC_BASE_IRQ;
340 if (irq >= s->num_irq)
341 goto bad_reg;
342 res = GIC_GET_PRIORITY(irq, cpu);
343 #ifndef NVIC
344 } else if (offset < 0xc00) {
345 /* Interrupt CPU Target. */
346 irq = (offset - 0x800) + GIC_BASE_IRQ;
347 if (irq >= s->num_irq)
348 goto bad_reg;
349 if (irq >= 29 && irq <= 31) {
350 res = cm;
351 } else {
352 res = GIC_TARGET(irq);
354 } else if (offset < 0xf00) {
355 /* Interrupt Configuration. */
356 irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ;
357 if (irq >= s->num_irq)
358 goto bad_reg;
359 res = 0;
360 for (i = 0; i < 4; i++) {
361 if (GIC_TEST_MODEL(irq + i))
362 res |= (1 << (i * 2));
363 if (GIC_TEST_TRIGGER(irq + i))
364 res |= (2 << (i * 2));
366 #endif
367 } else if (offset < 0xfe0) {
368 goto bad_reg;
369 } else /* offset >= 0xfe0 */ {
370 if (offset & 3) {
371 res = 0;
372 } else {
373 res = gic_id[(offset - 0xfe0) >> 2];
376 return res;
377 bad_reg:
378 hw_error("gic_dist_readb: Bad offset %x\n", (int)offset);
379 return 0;
382 static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset)
384 uint32_t val;
385 val = gic_dist_readb(opaque, offset);
386 val |= gic_dist_readb(opaque, offset + 1) << 8;
387 return val;
390 static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset)
392 uint32_t val;
393 #ifdef NVIC
394 gic_state *s = (gic_state *)opaque;
395 uint32_t addr;
396 addr = offset;
397 if (addr < 0x100 || addr > 0xd00)
398 return nvic_readl(s, addr);
399 #endif
400 val = gic_dist_readw(opaque, offset);
401 val |= gic_dist_readw(opaque, offset + 2) << 16;
402 return val;
405 static void gic_dist_writeb(void *opaque, target_phys_addr_t offset,
406 uint32_t value)
408 gic_state *s = (gic_state *)opaque;
409 int irq;
410 int i;
411 int cpu;
413 cpu = gic_get_current_cpu();
414 if (offset < 0x100) {
415 #ifdef NVIC
416 goto bad_reg;
417 #else
418 if (offset == 0) {
419 s->enabled = (value & 1);
420 DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis");
421 } else if (offset < 4) {
422 /* ignored. */
423 } else if (offset >= 0x80) {
424 /* Interrupt Security Registers, RAZ/WI */
425 } else {
426 goto bad_reg;
428 #endif
429 } else if (offset < 0x180) {
430 /* Interrupt Set Enable. */
431 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ;
432 if (irq >= s->num_irq)
433 goto bad_reg;
434 if (irq < 16)
435 value = 0xff;
436 for (i = 0; i < 8; i++) {
437 if (value & (1 << i)) {
438 int mask = (irq < 32) ? (1 << cpu) : GIC_TARGET(irq);
439 int cm = (irq < 32) ? (1 << cpu) : ALL_CPU_MASK;
441 if (!GIC_TEST_ENABLED(irq + i, cm)) {
442 DPRINTF("Enabled IRQ %d\n", irq + i);
444 GIC_SET_ENABLED(irq + i, cm);
445 /* If a raised level triggered IRQ enabled then mark
446 is as pending. */
447 if (GIC_TEST_LEVEL(irq + i, mask)
448 && !GIC_TEST_TRIGGER(irq + i)) {
449 DPRINTF("Set %d pending mask %x\n", irq + i, mask);
450 GIC_SET_PENDING(irq + i, mask);
454 } else if (offset < 0x200) {
455 /* Interrupt Clear Enable. */
456 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ;
457 if (irq >= s->num_irq)
458 goto bad_reg;
459 if (irq < 16)
460 value = 0;
461 for (i = 0; i < 8; i++) {
462 if (value & (1 << i)) {
463 int cm = (irq < 32) ? (1 << cpu) : ALL_CPU_MASK;
465 if (GIC_TEST_ENABLED(irq + i, cm)) {
466 DPRINTF("Disabled IRQ %d\n", irq + i);
468 GIC_CLEAR_ENABLED(irq + i, cm);
471 } else if (offset < 0x280) {
472 /* Interrupt Set Pending. */
473 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ;
474 if (irq >= s->num_irq)
475 goto bad_reg;
476 if (irq < 16)
477 irq = 0;
479 for (i = 0; i < 8; i++) {
480 if (value & (1 << i)) {
481 GIC_SET_PENDING(irq + i, GIC_TARGET(irq));
484 } else if (offset < 0x300) {
485 /* Interrupt Clear Pending. */
486 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ;
487 if (irq >= s->num_irq)
488 goto bad_reg;
489 for (i = 0; i < 8; i++) {
490 /* ??? This currently clears the pending bit for all CPUs, even
491 for per-CPU interrupts. It's unclear whether this is the
492 corect behavior. */
493 if (value & (1 << i)) {
494 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
497 } else if (offset < 0x400) {
498 /* Interrupt Active. */
499 goto bad_reg;
500 } else if (offset < 0x800) {
501 /* Interrupt Priority. */
502 irq = (offset - 0x400) + GIC_BASE_IRQ;
503 if (irq >= s->num_irq)
504 goto bad_reg;
505 if (irq < 32) {
506 s->priority1[irq][cpu] = value;
507 } else {
508 s->priority2[irq - 32] = value;
510 #ifndef NVIC
511 } else if (offset < 0xc00) {
512 /* Interrupt CPU Target. */
513 irq = (offset - 0x800) + GIC_BASE_IRQ;
514 if (irq >= s->num_irq)
515 goto bad_reg;
516 if (irq < 29)
517 value = 0;
518 else if (irq < 32)
519 value = ALL_CPU_MASK;
520 s->irq_target[irq] = value & ALL_CPU_MASK;
521 } else if (offset < 0xf00) {
522 /* Interrupt Configuration. */
523 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
524 if (irq >= s->num_irq)
525 goto bad_reg;
526 if (irq < 32)
527 value |= 0xaa;
528 for (i = 0; i < 4; i++) {
529 if (value & (1 << (i * 2))) {
530 GIC_SET_MODEL(irq + i);
531 } else {
532 GIC_CLEAR_MODEL(irq + i);
534 if (value & (2 << (i * 2))) {
535 GIC_SET_TRIGGER(irq + i);
536 } else {
537 GIC_CLEAR_TRIGGER(irq + i);
540 #endif
541 } else {
542 /* 0xf00 is only handled for 32-bit writes. */
543 goto bad_reg;
545 gic_update(s);
546 return;
547 bad_reg:
548 hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset);
551 static void gic_dist_writew(void *opaque, target_phys_addr_t offset,
552 uint32_t value)
554 gic_dist_writeb(opaque, offset, value & 0xff);
555 gic_dist_writeb(opaque, offset + 1, value >> 8);
558 static void gic_dist_writel(void *opaque, target_phys_addr_t offset,
559 uint32_t value)
561 gic_state *s = (gic_state *)opaque;
562 #ifdef NVIC
563 uint32_t addr;
564 addr = offset;
565 if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) {
566 nvic_writel(s, addr, value);
567 return;
569 #endif
570 if (offset == 0xf00) {
571 int cpu;
572 int irq;
573 int mask;
575 cpu = gic_get_current_cpu();
576 irq = value & 0x3ff;
577 switch ((value >> 24) & 3) {
578 case 0:
579 mask = (value >> 16) & ALL_CPU_MASK;
580 break;
581 case 1:
582 mask = ALL_CPU_MASK ^ (1 << cpu);
583 break;
584 case 2:
585 mask = 1 << cpu;
586 break;
587 default:
588 DPRINTF("Bad Soft Int target filter\n");
589 mask = ALL_CPU_MASK;
590 break;
592 GIC_SET_PENDING(irq, mask);
593 gic_update(s);
594 return;
596 gic_dist_writew(opaque, offset, value & 0xffff);
597 gic_dist_writew(opaque, offset + 2, value >> 16);
600 static const MemoryRegionOps gic_dist_ops = {
601 .old_mmio = {
602 .read = { gic_dist_readb, gic_dist_readw, gic_dist_readl, },
603 .write = { gic_dist_writeb, gic_dist_writew, gic_dist_writel, },
605 .endianness = DEVICE_NATIVE_ENDIAN,
608 #ifndef NVIC
609 static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset)
611 switch (offset) {
612 case 0x00: /* Control */
613 return s->cpu_enabled[cpu];
614 case 0x04: /* Priority mask */
615 return s->priority_mask[cpu];
616 case 0x08: /* Binary Point */
617 /* ??? Not implemented. */
618 return 0;
619 case 0x0c: /* Acknowledge */
620 return gic_acknowledge_irq(s, cpu);
621 case 0x14: /* Running Priority */
622 return s->running_priority[cpu];
623 case 0x18: /* Highest Pending Interrupt */
624 return s->current_pending[cpu];
625 default:
626 hw_error("gic_cpu_read: Bad offset %x\n", (int)offset);
627 return 0;
631 static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value)
633 switch (offset) {
634 case 0x00: /* Control */
635 s->cpu_enabled[cpu] = (value & 1);
636 DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled ? "En" : "Dis");
637 break;
638 case 0x04: /* Priority mask */
639 s->priority_mask[cpu] = (value & 0xff);
640 break;
641 case 0x08: /* Binary Point */
642 /* ??? Not implemented. */
643 break;
644 case 0x10: /* End Of Interrupt */
645 return gic_complete_irq(s, cpu, value & 0x3ff);
646 default:
647 hw_error("gic_cpu_write: Bad offset %x\n", (int)offset);
648 return;
650 gic_update(s);
653 /* Wrappers to read/write the GIC CPU interface for the current CPU */
654 static uint64_t gic_thiscpu_read(void *opaque, target_phys_addr_t addr,
655 unsigned size)
657 gic_state *s = (gic_state *)opaque;
658 return gic_cpu_read(s, gic_get_current_cpu(), addr & 0xff);
661 static void gic_thiscpu_write(void *opaque, target_phys_addr_t addr,
662 uint64_t value, unsigned size)
664 gic_state *s = (gic_state *)opaque;
665 gic_cpu_write(s, gic_get_current_cpu(), addr & 0xff, value);
668 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
669 * These just decode the opaque pointer into gic_state* + cpu id.
671 static uint64_t gic_do_cpu_read(void *opaque, target_phys_addr_t addr,
672 unsigned size)
674 gic_state **backref = (gic_state **)opaque;
675 gic_state *s = *backref;
676 int id = (backref - s->backref);
677 return gic_cpu_read(s, id, addr & 0xff);
680 static void gic_do_cpu_write(void *opaque, target_phys_addr_t addr,
681 uint64_t value, unsigned size)
683 gic_state **backref = (gic_state **)opaque;
684 gic_state *s = *backref;
685 int id = (backref - s->backref);
686 gic_cpu_write(s, id, addr & 0xff, value);
689 static const MemoryRegionOps gic_thiscpu_ops = {
690 .read = gic_thiscpu_read,
691 .write = gic_thiscpu_write,
692 .endianness = DEVICE_NATIVE_ENDIAN,
695 static const MemoryRegionOps gic_cpu_ops = {
696 .read = gic_do_cpu_read,
697 .write = gic_do_cpu_write,
698 .endianness = DEVICE_NATIVE_ENDIAN,
700 #endif
702 static void gic_reset(gic_state *s)
704 int i;
705 memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state));
706 for (i = 0 ; i < NUM_CPU(s); i++) {
707 s->priority_mask[i] = 0xf0;
708 s->current_pending[i] = 1023;
709 s->running_irq[i] = 1023;
710 s->running_priority[i] = 0x100;
711 #ifdef NVIC
712 /* The NVIC doesn't have per-cpu interfaces, so enable by default. */
713 s->cpu_enabled[i] = 1;
714 #else
715 s->cpu_enabled[i] = 0;
716 #endif
718 for (i = 0; i < 16; i++) {
719 GIC_SET_ENABLED(i, ALL_CPU_MASK);
720 GIC_SET_TRIGGER(i);
722 #ifdef NVIC
723 /* The NVIC is always enabled. */
724 s->enabled = 1;
725 #else
726 s->enabled = 0;
727 #endif
730 static void gic_save(QEMUFile *f, void *opaque)
732 gic_state *s = (gic_state *)opaque;
733 int i;
734 int j;
736 qemu_put_be32(f, s->enabled);
737 for (i = 0; i < NUM_CPU(s); i++) {
738 qemu_put_be32(f, s->cpu_enabled[i]);
739 for (j = 0; j < 32; j++)
740 qemu_put_be32(f, s->priority1[j][i]);
741 for (j = 0; j < s->num_irq; j++)
742 qemu_put_be32(f, s->last_active[j][i]);
743 qemu_put_be32(f, s->priority_mask[i]);
744 qemu_put_be32(f, s->running_irq[i]);
745 qemu_put_be32(f, s->running_priority[i]);
746 qemu_put_be32(f, s->current_pending[i]);
748 for (i = 0; i < s->num_irq - 32; i++) {
749 qemu_put_be32(f, s->priority2[i]);
751 for (i = 0; i < s->num_irq; i++) {
752 #ifndef NVIC
753 qemu_put_be32(f, s->irq_target[i]);
754 #endif
755 qemu_put_byte(f, s->irq_state[i].enabled);
756 qemu_put_byte(f, s->irq_state[i].pending);
757 qemu_put_byte(f, s->irq_state[i].active);
758 qemu_put_byte(f, s->irq_state[i].level);
759 qemu_put_byte(f, s->irq_state[i].model);
760 qemu_put_byte(f, s->irq_state[i].trigger);
764 static int gic_load(QEMUFile *f, void *opaque, int version_id)
766 gic_state *s = (gic_state *)opaque;
767 int i;
768 int j;
770 if (version_id != 2)
771 return -EINVAL;
773 s->enabled = qemu_get_be32(f);
774 for (i = 0; i < NUM_CPU(s); i++) {
775 s->cpu_enabled[i] = qemu_get_be32(f);
776 for (j = 0; j < 32; j++)
777 s->priority1[j][i] = qemu_get_be32(f);
778 for (j = 0; j < s->num_irq; j++)
779 s->last_active[j][i] = qemu_get_be32(f);
780 s->priority_mask[i] = qemu_get_be32(f);
781 s->running_irq[i] = qemu_get_be32(f);
782 s->running_priority[i] = qemu_get_be32(f);
783 s->current_pending[i] = qemu_get_be32(f);
785 for (i = 0; i < s->num_irq - 32; i++) {
786 s->priority2[i] = qemu_get_be32(f);
788 for (i = 0; i < s->num_irq; i++) {
789 #ifndef NVIC
790 s->irq_target[i] = qemu_get_be32(f);
791 #endif
792 s->irq_state[i].enabled = qemu_get_byte(f);
793 s->irq_state[i].pending = qemu_get_byte(f);
794 s->irq_state[i].active = qemu_get_byte(f);
795 s->irq_state[i].level = qemu_get_byte(f);
796 s->irq_state[i].model = qemu_get_byte(f);
797 s->irq_state[i].trigger = qemu_get_byte(f);
800 return 0;
803 #if NCPU > 1
804 static void gic_init(gic_state *s, int num_cpu, int num_irq)
805 #else
806 static void gic_init(gic_state *s, int num_irq)
807 #endif
809 int i;
811 #if NCPU > 1
812 s->num_cpu = num_cpu;
813 #endif
814 s->num_irq = num_irq + GIC_BASE_IRQ;
815 if (s->num_irq > GIC_MAXIRQ) {
816 hw_error("requested %u interrupt lines exceeds GIC maximum %d\n",
817 num_irq, GIC_MAXIRQ);
819 qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, s->num_irq - 32);
820 for (i = 0; i < NUM_CPU(s); i++) {
821 sysbus_init_irq(&s->busdev, &s->parent_irq[i]);
823 memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000);
824 #ifndef NVIC
825 /* Memory regions for the CPU interfaces (NVIC doesn't have these):
826 * a region for "CPU interface for this core", then a region for
827 * "CPU interface for core 0", "for core 1", ...
828 * NB that the memory region size of 0x100 applies for the 11MPCore
829 * and also cores following the GIC v1 spec (ie A9).
830 * GIC v2 defines a larger memory region (0x1000) so this will need
831 * to be extended when we implement A15.
833 memory_region_init_io(&s->cpuiomem[0], &gic_thiscpu_ops, s,
834 "gic_cpu", 0x100);
835 for (i = 0; i < NUM_CPU(s); i++) {
836 s->backref[i] = s;
837 memory_region_init_io(&s->cpuiomem[i+1], &gic_cpu_ops, &s->backref[i],
838 "gic_cpu", 0x100);
840 #endif
842 gic_reset(s);
843 register_savevm(NULL, "arm_gic", -1, 2, gic_save, gic_load, s);