target-s390x: Update s390x_{tod,cpu}_timer() to use S390CPU
[qemu/ar7.git] / hw / arm_gic.c
blob6b34c06a8fb408576a86450a5f40f4c47ab7e829
1 /*
2 * ARM Generic/Distributed Interrupt Controller
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GPL.
8 */
10 /* This file contains implementation code for the RealView EB interrupt
11 controller, MPCore distributed interrupt controller and ARMv7-M
12 Nested Vectored Interrupt Controller. */
14 /* Maximum number of possible interrupts, determined by the GIC architecture */
15 #define GIC_MAXIRQ 1020
16 /* First 32 are private to each CPU (SGIs and PPIs). */
17 #define GIC_INTERNAL 32
18 //#define DEBUG_GIC
20 #ifdef DEBUG_GIC
21 #define DPRINTF(fmt, ...) \
22 do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0)
23 #else
24 #define DPRINTF(fmt, ...) do {} while(0)
25 #endif
27 #ifdef NVIC
28 static const uint8_t gic_id[] =
29 { 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 };
30 /* The NVIC has 16 internal vectors. However these are not exposed
31 through the normal GIC interface. */
32 #define GIC_BASE_IRQ 32
33 #else
34 static const uint8_t gic_id[] =
35 { 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 };
36 #define GIC_BASE_IRQ 0
37 #endif
39 #define FROM_SYSBUSGIC(type, dev) \
40 DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev))
42 typedef struct gic_irq_state
44 /* The enable bits are only banked for per-cpu interrupts. */
45 unsigned enabled:NCPU;
46 unsigned pending:NCPU;
47 unsigned active:NCPU;
48 unsigned level:NCPU;
49 unsigned model:1; /* 0 = N:N, 1 = 1:N */
50 unsigned trigger:1; /* nonzero = edge triggered. */
51 } gic_irq_state;
53 #define ALL_CPU_MASK ((1 << NCPU) - 1)
54 #if NCPU > 1
55 #define NUM_CPU(s) ((s)->num_cpu)
56 #else
57 #define NUM_CPU(s) 1
58 #endif
60 #define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
61 #define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
62 #define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
63 #define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
64 #define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
65 #define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0)
66 #define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
67 #define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
68 #define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
69 #define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1
70 #define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0
71 #define GIC_TEST_MODEL(irq) s->irq_state[irq].model
72 #define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
73 #define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
74 #define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
75 #define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1
76 #define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0
77 #define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
78 #define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
79 s->priority1[irq][cpu] : \
80 s->priority2[(irq) - GIC_INTERNAL])
81 #ifdef NVIC
82 #define GIC_TARGET(irq) 1
83 #else
84 #define GIC_TARGET(irq) s->irq_target[irq]
85 #endif
87 typedef struct gic_state
89 SysBusDevice busdev;
90 qemu_irq parent_irq[NCPU];
91 int enabled;
92 int cpu_enabled[NCPU];
94 gic_irq_state irq_state[GIC_MAXIRQ];
95 #ifndef NVIC
96 int irq_target[GIC_MAXIRQ];
97 #endif
98 int priority1[GIC_INTERNAL][NCPU];
99 int priority2[GIC_MAXIRQ - GIC_INTERNAL];
100 int last_active[GIC_MAXIRQ][NCPU];
102 int priority_mask[NCPU];
103 int running_irq[NCPU];
104 int running_priority[NCPU];
105 int current_pending[NCPU];
107 #if NCPU > 1
108 int num_cpu;
109 #endif
111 MemoryRegion iomem; /* Distributor */
112 #ifndef NVIC
113 /* This is just so we can have an opaque pointer which identifies
114 * both this GIC and which CPU interface we should be accessing.
116 struct gic_state *backref[NCPU];
117 MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */
118 #endif
119 uint32_t num_irq;
120 } gic_state;
122 /* TODO: Many places that call this routine could be optimized. */
123 /* Update interrupt status after enabled or pending bits have been changed. */
124 static void gic_update(gic_state *s)
126 int best_irq;
127 int best_prio;
128 int irq;
129 int level;
130 int cpu;
131 int cm;
133 for (cpu = 0; cpu < NUM_CPU(s); cpu++) {
134 cm = 1 << cpu;
135 s->current_pending[cpu] = 1023;
136 if (!s->enabled || !s->cpu_enabled[cpu]) {
137 qemu_irq_lower(s->parent_irq[cpu]);
138 return;
140 best_prio = 0x100;
141 best_irq = 1023;
142 for (irq = 0; irq < s->num_irq; irq++) {
143 if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
144 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
145 best_prio = GIC_GET_PRIORITY(irq, cpu);
146 best_irq = irq;
150 level = 0;
151 if (best_prio <= s->priority_mask[cpu]) {
152 s->current_pending[cpu] = best_irq;
153 if (best_prio < s->running_priority[cpu]) {
154 DPRINTF("Raised pending IRQ %d\n", best_irq);
155 level = 1;
158 qemu_set_irq(s->parent_irq[cpu], level);
162 static void __attribute__((unused))
163 gic_set_pending_private(gic_state *s, int cpu, int irq)
165 int cm = 1 << cpu;
167 if (GIC_TEST_PENDING(irq, cm))
168 return;
170 DPRINTF("Set %d pending cpu %d\n", irq, cpu);
171 GIC_SET_PENDING(irq, cm);
172 gic_update(s);
175 /* Process a change in an external IRQ input. */
176 static void gic_set_irq(void *opaque, int irq, int level)
178 gic_state *s = (gic_state *)opaque;
179 /* The first external input line is internal interrupt 32. */
180 irq += GIC_INTERNAL;
181 if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK))
182 return;
184 if (level) {
185 GIC_SET_LEVEL(irq, ALL_CPU_MASK);
186 if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, ALL_CPU_MASK)) {
187 DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq));
188 GIC_SET_PENDING(irq, GIC_TARGET(irq));
190 } else {
191 GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK);
193 gic_update(s);
196 static void gic_set_running_irq(gic_state *s, int cpu, int irq)
198 s->running_irq[cpu] = irq;
199 if (irq == 1023) {
200 s->running_priority[cpu] = 0x100;
201 } else {
202 s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu);
204 gic_update(s);
207 static uint32_t gic_acknowledge_irq(gic_state *s, int cpu)
209 int new_irq;
210 int cm = 1 << cpu;
211 new_irq = s->current_pending[cpu];
212 if (new_irq == 1023
213 || GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) {
214 DPRINTF("ACK no pending IRQ\n");
215 return 1023;
217 s->last_active[new_irq][cpu] = s->running_irq[cpu];
218 /* Clear pending flags for both level and edge triggered interrupts.
219 Level triggered IRQs will be reasserted once they become inactive. */
220 GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm);
221 gic_set_running_irq(s, cpu, new_irq);
222 DPRINTF("ACK %d\n", new_irq);
223 return new_irq;
226 static void gic_complete_irq(gic_state * s, int cpu, int irq)
228 int update = 0;
229 int cm = 1 << cpu;
230 DPRINTF("EOI %d\n", irq);
231 if (irq >= s->num_irq) {
232 /* This handles two cases:
233 * 1. If software writes the ID of a spurious interrupt [ie 1023]
234 * to the GICC_EOIR, the GIC ignores that write.
235 * 2. If software writes the number of a non-existent interrupt
236 * this must be a subcase of "value written does not match the last
237 * valid interrupt value read from the Interrupt Acknowledge
238 * register" and so this is UNPREDICTABLE. We choose to ignore it.
240 return;
242 if (s->running_irq[cpu] == 1023)
243 return; /* No active IRQ. */
244 /* Mark level triggered interrupts as pending if they are still
245 raised. */
246 if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
247 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
248 DPRINTF("Set %d pending mask %x\n", irq, cm);
249 GIC_SET_PENDING(irq, cm);
250 update = 1;
252 if (irq != s->running_irq[cpu]) {
253 /* Complete an IRQ that is not currently running. */
254 int tmp = s->running_irq[cpu];
255 while (s->last_active[tmp][cpu] != 1023) {
256 if (s->last_active[tmp][cpu] == irq) {
257 s->last_active[tmp][cpu] = s->last_active[irq][cpu];
258 break;
260 tmp = s->last_active[tmp][cpu];
262 if (update) {
263 gic_update(s);
265 } else {
266 /* Complete the current running IRQ. */
267 gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]);
271 static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset)
273 gic_state *s = (gic_state *)opaque;
274 uint32_t res;
275 int irq;
276 int i;
277 int cpu;
278 int cm;
279 int mask;
281 cpu = gic_get_current_cpu();
282 cm = 1 << cpu;
283 if (offset < 0x100) {
284 #ifndef NVIC
285 if (offset == 0)
286 return s->enabled;
287 if (offset == 4)
288 return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5);
289 if (offset < 0x08)
290 return 0;
291 if (offset >= 0x80) {
292 /* Interrupt Security , RAZ/WI */
293 return 0;
295 #endif
296 goto bad_reg;
297 } else if (offset < 0x200) {
298 /* Interrupt Set/Clear Enable. */
299 if (offset < 0x180)
300 irq = (offset - 0x100) * 8;
301 else
302 irq = (offset - 0x180) * 8;
303 irq += GIC_BASE_IRQ;
304 if (irq >= s->num_irq)
305 goto bad_reg;
306 res = 0;
307 for (i = 0; i < 8; i++) {
308 if (GIC_TEST_ENABLED(irq + i, cm)) {
309 res |= (1 << i);
312 } else if (offset < 0x300) {
313 /* Interrupt Set/Clear Pending. */
314 if (offset < 0x280)
315 irq = (offset - 0x200) * 8;
316 else
317 irq = (offset - 0x280) * 8;
318 irq += GIC_BASE_IRQ;
319 if (irq >= s->num_irq)
320 goto bad_reg;
321 res = 0;
322 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
323 for (i = 0; i < 8; i++) {
324 if (GIC_TEST_PENDING(irq + i, mask)) {
325 res |= (1 << i);
328 } else if (offset < 0x400) {
329 /* Interrupt Active. */
330 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
331 if (irq >= s->num_irq)
332 goto bad_reg;
333 res = 0;
334 mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
335 for (i = 0; i < 8; i++) {
336 if (GIC_TEST_ACTIVE(irq + i, mask)) {
337 res |= (1 << i);
340 } else if (offset < 0x800) {
341 /* Interrupt Priority. */
342 irq = (offset - 0x400) + GIC_BASE_IRQ;
343 if (irq >= s->num_irq)
344 goto bad_reg;
345 res = GIC_GET_PRIORITY(irq, cpu);
346 #ifndef NVIC
347 } else if (offset < 0xc00) {
348 /* Interrupt CPU Target. */
349 irq = (offset - 0x800) + GIC_BASE_IRQ;
350 if (irq >= s->num_irq)
351 goto bad_reg;
352 if (irq >= 29 && irq <= 31) {
353 res = cm;
354 } else {
355 res = GIC_TARGET(irq);
357 } else if (offset < 0xf00) {
358 /* Interrupt Configuration. */
359 irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ;
360 if (irq >= s->num_irq)
361 goto bad_reg;
362 res = 0;
363 for (i = 0; i < 4; i++) {
364 if (GIC_TEST_MODEL(irq + i))
365 res |= (1 << (i * 2));
366 if (GIC_TEST_TRIGGER(irq + i))
367 res |= (2 << (i * 2));
369 #endif
370 } else if (offset < 0xfe0) {
371 goto bad_reg;
372 } else /* offset >= 0xfe0 */ {
373 if (offset & 3) {
374 res = 0;
375 } else {
376 res = gic_id[(offset - 0xfe0) >> 2];
379 return res;
380 bad_reg:
381 hw_error("gic_dist_readb: Bad offset %x\n", (int)offset);
382 return 0;
385 static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset)
387 uint32_t val;
388 val = gic_dist_readb(opaque, offset);
389 val |= gic_dist_readb(opaque, offset + 1) << 8;
390 return val;
393 static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset)
395 uint32_t val;
396 #ifdef NVIC
397 gic_state *s = (gic_state *)opaque;
398 uint32_t addr;
399 addr = offset;
400 if (addr < 0x100 || addr > 0xd00)
401 return nvic_readl(s, addr);
402 #endif
403 val = gic_dist_readw(opaque, offset);
404 val |= gic_dist_readw(opaque, offset + 2) << 16;
405 return val;
408 static void gic_dist_writeb(void *opaque, target_phys_addr_t offset,
409 uint32_t value)
411 gic_state *s = (gic_state *)opaque;
412 int irq;
413 int i;
414 int cpu;
416 cpu = gic_get_current_cpu();
417 if (offset < 0x100) {
418 #ifdef NVIC
419 goto bad_reg;
420 #else
421 if (offset == 0) {
422 s->enabled = (value & 1);
423 DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis");
424 } else if (offset < 4) {
425 /* ignored. */
426 } else if (offset >= 0x80) {
427 /* Interrupt Security Registers, RAZ/WI */
428 } else {
429 goto bad_reg;
431 #endif
432 } else if (offset < 0x180) {
433 /* Interrupt Set Enable. */
434 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ;
435 if (irq >= s->num_irq)
436 goto bad_reg;
437 if (irq < 16)
438 value = 0xff;
439 for (i = 0; i < 8; i++) {
440 if (value & (1 << i)) {
441 int mask = (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq);
442 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
444 if (!GIC_TEST_ENABLED(irq + i, cm)) {
445 DPRINTF("Enabled IRQ %d\n", irq + i);
447 GIC_SET_ENABLED(irq + i, cm);
448 /* If a raised level triggered IRQ enabled then mark
449 is as pending. */
450 if (GIC_TEST_LEVEL(irq + i, mask)
451 && !GIC_TEST_TRIGGER(irq + i)) {
452 DPRINTF("Set %d pending mask %x\n", irq + i, mask);
453 GIC_SET_PENDING(irq + i, mask);
457 } else if (offset < 0x200) {
458 /* Interrupt Clear Enable. */
459 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ;
460 if (irq >= s->num_irq)
461 goto bad_reg;
462 if (irq < 16)
463 value = 0;
464 for (i = 0; i < 8; i++) {
465 if (value & (1 << i)) {
466 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
468 if (GIC_TEST_ENABLED(irq + i, cm)) {
469 DPRINTF("Disabled IRQ %d\n", irq + i);
471 GIC_CLEAR_ENABLED(irq + i, cm);
474 } else if (offset < 0x280) {
475 /* Interrupt Set Pending. */
476 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ;
477 if (irq >= s->num_irq)
478 goto bad_reg;
479 if (irq < 16)
480 irq = 0;
482 for (i = 0; i < 8; i++) {
483 if (value & (1 << i)) {
484 GIC_SET_PENDING(irq + i, GIC_TARGET(irq));
487 } else if (offset < 0x300) {
488 /* Interrupt Clear Pending. */
489 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ;
490 if (irq >= s->num_irq)
491 goto bad_reg;
492 for (i = 0; i < 8; i++) {
493 /* ??? This currently clears the pending bit for all CPUs, even
494 for per-CPU interrupts. It's unclear whether this is the
495 corect behavior. */
496 if (value & (1 << i)) {
497 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
500 } else if (offset < 0x400) {
501 /* Interrupt Active. */
502 goto bad_reg;
503 } else if (offset < 0x800) {
504 /* Interrupt Priority. */
505 irq = (offset - 0x400) + GIC_BASE_IRQ;
506 if (irq >= s->num_irq)
507 goto bad_reg;
508 if (irq < GIC_INTERNAL) {
509 s->priority1[irq][cpu] = value;
510 } else {
511 s->priority2[irq - GIC_INTERNAL] = value;
513 #ifndef NVIC
514 } else if (offset < 0xc00) {
515 /* Interrupt CPU Target. */
516 irq = (offset - 0x800) + GIC_BASE_IRQ;
517 if (irq >= s->num_irq)
518 goto bad_reg;
519 if (irq < 29)
520 value = 0;
521 else if (irq < GIC_INTERNAL)
522 value = ALL_CPU_MASK;
523 s->irq_target[irq] = value & ALL_CPU_MASK;
524 } else if (offset < 0xf00) {
525 /* Interrupt Configuration. */
526 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
527 if (irq >= s->num_irq)
528 goto bad_reg;
529 if (irq < GIC_INTERNAL)
530 value |= 0xaa;
531 for (i = 0; i < 4; i++) {
532 if (value & (1 << (i * 2))) {
533 GIC_SET_MODEL(irq + i);
534 } else {
535 GIC_CLEAR_MODEL(irq + i);
537 if (value & (2 << (i * 2))) {
538 GIC_SET_TRIGGER(irq + i);
539 } else {
540 GIC_CLEAR_TRIGGER(irq + i);
543 #endif
544 } else {
545 /* 0xf00 is only handled for 32-bit writes. */
546 goto bad_reg;
548 gic_update(s);
549 return;
550 bad_reg:
551 hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset);
554 static void gic_dist_writew(void *opaque, target_phys_addr_t offset,
555 uint32_t value)
557 gic_dist_writeb(opaque, offset, value & 0xff);
558 gic_dist_writeb(opaque, offset + 1, value >> 8);
561 static void gic_dist_writel(void *opaque, target_phys_addr_t offset,
562 uint32_t value)
564 gic_state *s = (gic_state *)opaque;
565 #ifdef NVIC
566 uint32_t addr;
567 addr = offset;
568 if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) {
569 nvic_writel(s, addr, value);
570 return;
572 #endif
573 if (offset == 0xf00) {
574 int cpu;
575 int irq;
576 int mask;
578 cpu = gic_get_current_cpu();
579 irq = value & 0x3ff;
580 switch ((value >> 24) & 3) {
581 case 0:
582 mask = (value >> 16) & ALL_CPU_MASK;
583 break;
584 case 1:
585 mask = ALL_CPU_MASK ^ (1 << cpu);
586 break;
587 case 2:
588 mask = 1 << cpu;
589 break;
590 default:
591 DPRINTF("Bad Soft Int target filter\n");
592 mask = ALL_CPU_MASK;
593 break;
595 GIC_SET_PENDING(irq, mask);
596 gic_update(s);
597 return;
599 gic_dist_writew(opaque, offset, value & 0xffff);
600 gic_dist_writew(opaque, offset + 2, value >> 16);
603 static const MemoryRegionOps gic_dist_ops = {
604 .old_mmio = {
605 .read = { gic_dist_readb, gic_dist_readw, gic_dist_readl, },
606 .write = { gic_dist_writeb, gic_dist_writew, gic_dist_writel, },
608 .endianness = DEVICE_NATIVE_ENDIAN,
611 #ifndef NVIC
612 static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset)
614 switch (offset) {
615 case 0x00: /* Control */
616 return s->cpu_enabled[cpu];
617 case 0x04: /* Priority mask */
618 return s->priority_mask[cpu];
619 case 0x08: /* Binary Point */
620 /* ??? Not implemented. */
621 return 0;
622 case 0x0c: /* Acknowledge */
623 return gic_acknowledge_irq(s, cpu);
624 case 0x14: /* Running Priority */
625 return s->running_priority[cpu];
626 case 0x18: /* Highest Pending Interrupt */
627 return s->current_pending[cpu];
628 default:
629 hw_error("gic_cpu_read: Bad offset %x\n", (int)offset);
630 return 0;
634 static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value)
636 switch (offset) {
637 case 0x00: /* Control */
638 s->cpu_enabled[cpu] = (value & 1);
639 DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled ? "En" : "Dis");
640 break;
641 case 0x04: /* Priority mask */
642 s->priority_mask[cpu] = (value & 0xff);
643 break;
644 case 0x08: /* Binary Point */
645 /* ??? Not implemented. */
646 break;
647 case 0x10: /* End Of Interrupt */
648 return gic_complete_irq(s, cpu, value & 0x3ff);
649 default:
650 hw_error("gic_cpu_write: Bad offset %x\n", (int)offset);
651 return;
653 gic_update(s);
656 /* Wrappers to read/write the GIC CPU interface for the current CPU */
657 static uint64_t gic_thiscpu_read(void *opaque, target_phys_addr_t addr,
658 unsigned size)
660 gic_state *s = (gic_state *)opaque;
661 return gic_cpu_read(s, gic_get_current_cpu(), addr);
664 static void gic_thiscpu_write(void *opaque, target_phys_addr_t addr,
665 uint64_t value, unsigned size)
667 gic_state *s = (gic_state *)opaque;
668 gic_cpu_write(s, gic_get_current_cpu(), addr, value);
671 /* Wrappers to read/write the GIC CPU interface for a specific CPU.
672 * These just decode the opaque pointer into gic_state* + cpu id.
674 static uint64_t gic_do_cpu_read(void *opaque, target_phys_addr_t addr,
675 unsigned size)
677 gic_state **backref = (gic_state **)opaque;
678 gic_state *s = *backref;
679 int id = (backref - s->backref);
680 return gic_cpu_read(s, id, addr);
683 static void gic_do_cpu_write(void *opaque, target_phys_addr_t addr,
684 uint64_t value, unsigned size)
686 gic_state **backref = (gic_state **)opaque;
687 gic_state *s = *backref;
688 int id = (backref - s->backref);
689 gic_cpu_write(s, id, addr, value);
692 static const MemoryRegionOps gic_thiscpu_ops = {
693 .read = gic_thiscpu_read,
694 .write = gic_thiscpu_write,
695 .endianness = DEVICE_NATIVE_ENDIAN,
698 static const MemoryRegionOps gic_cpu_ops = {
699 .read = gic_do_cpu_read,
700 .write = gic_do_cpu_write,
701 .endianness = DEVICE_NATIVE_ENDIAN,
703 #endif
705 static void gic_reset(gic_state *s)
707 int i;
708 memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state));
709 for (i = 0 ; i < NUM_CPU(s); i++) {
710 s->priority_mask[i] = 0xf0;
711 s->current_pending[i] = 1023;
712 s->running_irq[i] = 1023;
713 s->running_priority[i] = 0x100;
714 #ifdef NVIC
715 /* The NVIC doesn't have per-cpu interfaces, so enable by default. */
716 s->cpu_enabled[i] = 1;
717 #else
718 s->cpu_enabled[i] = 0;
719 #endif
721 for (i = 0; i < 16; i++) {
722 GIC_SET_ENABLED(i, ALL_CPU_MASK);
723 GIC_SET_TRIGGER(i);
725 #ifdef NVIC
726 /* The NVIC is always enabled. */
727 s->enabled = 1;
728 #else
729 s->enabled = 0;
730 #endif
733 static void gic_save(QEMUFile *f, void *opaque)
735 gic_state *s = (gic_state *)opaque;
736 int i;
737 int j;
739 qemu_put_be32(f, s->enabled);
740 for (i = 0; i < NUM_CPU(s); i++) {
741 qemu_put_be32(f, s->cpu_enabled[i]);
742 for (j = 0; j < GIC_INTERNAL; j++)
743 qemu_put_be32(f, s->priority1[j][i]);
744 for (j = 0; j < s->num_irq; j++)
745 qemu_put_be32(f, s->last_active[j][i]);
746 qemu_put_be32(f, s->priority_mask[i]);
747 qemu_put_be32(f, s->running_irq[i]);
748 qemu_put_be32(f, s->running_priority[i]);
749 qemu_put_be32(f, s->current_pending[i]);
751 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
752 qemu_put_be32(f, s->priority2[i]);
754 for (i = 0; i < s->num_irq; i++) {
755 #ifndef NVIC
756 qemu_put_be32(f, s->irq_target[i]);
757 #endif
758 qemu_put_byte(f, s->irq_state[i].enabled);
759 qemu_put_byte(f, s->irq_state[i].pending);
760 qemu_put_byte(f, s->irq_state[i].active);
761 qemu_put_byte(f, s->irq_state[i].level);
762 qemu_put_byte(f, s->irq_state[i].model);
763 qemu_put_byte(f, s->irq_state[i].trigger);
767 static int gic_load(QEMUFile *f, void *opaque, int version_id)
769 gic_state *s = (gic_state *)opaque;
770 int i;
771 int j;
773 if (version_id != 2)
774 return -EINVAL;
776 s->enabled = qemu_get_be32(f);
777 for (i = 0; i < NUM_CPU(s); i++) {
778 s->cpu_enabled[i] = qemu_get_be32(f);
779 for (j = 0; j < GIC_INTERNAL; j++)
780 s->priority1[j][i] = qemu_get_be32(f);
781 for (j = 0; j < s->num_irq; j++)
782 s->last_active[j][i] = qemu_get_be32(f);
783 s->priority_mask[i] = qemu_get_be32(f);
784 s->running_irq[i] = qemu_get_be32(f);
785 s->running_priority[i] = qemu_get_be32(f);
786 s->current_pending[i] = qemu_get_be32(f);
788 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
789 s->priority2[i] = qemu_get_be32(f);
791 for (i = 0; i < s->num_irq; i++) {
792 #ifndef NVIC
793 s->irq_target[i] = qemu_get_be32(f);
794 #endif
795 s->irq_state[i].enabled = qemu_get_byte(f);
796 s->irq_state[i].pending = qemu_get_byte(f);
797 s->irq_state[i].active = qemu_get_byte(f);
798 s->irq_state[i].level = qemu_get_byte(f);
799 s->irq_state[i].model = qemu_get_byte(f);
800 s->irq_state[i].trigger = qemu_get_byte(f);
803 return 0;
806 #if NCPU > 1
807 static void gic_init(gic_state *s, int num_cpu, int num_irq)
808 #else
809 static void gic_init(gic_state *s, int num_irq)
810 #endif
812 int i;
814 #if NCPU > 1
815 s->num_cpu = num_cpu;
816 #endif
817 s->num_irq = num_irq + GIC_BASE_IRQ;
818 if (s->num_irq > GIC_MAXIRQ) {
819 hw_error("requested %u interrupt lines exceeds GIC maximum %d\n",
820 num_irq, GIC_MAXIRQ);
822 /* ITLinesNumber is represented as (N / 32) - 1 (see
823 * gic_dist_readb) so this is an implementation imposed
824 * restriction, not an architectural one:
826 if (s->num_irq < 32 || (s->num_irq % 32)) {
827 hw_error("%d interrupt lines unsupported: not divisible by 32\n",
828 num_irq);
831 qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, s->num_irq - GIC_INTERNAL);
832 for (i = 0; i < NUM_CPU(s); i++) {
833 sysbus_init_irq(&s->busdev, &s->parent_irq[i]);
835 memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000);
836 #ifndef NVIC
837 /* Memory regions for the CPU interfaces (NVIC doesn't have these):
838 * a region for "CPU interface for this core", then a region for
839 * "CPU interface for core 0", "for core 1", ...
840 * NB that the memory region size of 0x100 applies for the 11MPCore
841 * and also cores following the GIC v1 spec (ie A9).
842 * GIC v2 defines a larger memory region (0x1000) so this will need
843 * to be extended when we implement A15.
845 memory_region_init_io(&s->cpuiomem[0], &gic_thiscpu_ops, s,
846 "gic_cpu", 0x100);
847 for (i = 0; i < NUM_CPU(s); i++) {
848 s->backref[i] = s;
849 memory_region_init_io(&s->cpuiomem[i+1], &gic_cpu_ops, &s->backref[i],
850 "gic_cpu", 0x100);
852 #endif
854 gic_reset(s);
855 register_savevm(NULL, "arm_gic", -1, 2, gic_save, gic_load, s);