2 * ARM GICv3 support - common bits of emulated and KVM kernel model
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Written by Peter Maydell
8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu/module.h"
27 #include "hw/core/cpu.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "gicv3_internal.h"
32 #include "hw/arm/linux-boot-if.h"
33 #include "sysemu/kvm.h"
36 static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State
*cs
)
38 if (cs
->gicd_no_migration_shift_bug
) {
42 /* Older versions of QEMU had a bug in the handling of state save/restore
43 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
44 * so that instead of the data for external interrupts 32 and up
45 * starting at bit position 32 in the bitmap, it started at bit
46 * position 64. If we're receiving data from a QEMU with that bug,
47 * we must move the data down into the right place.
49 memmove(cs
->group
, (uint8_t *)cs
->group
+ GIC_INTERNAL
/ 8,
50 sizeof(cs
->group
) - GIC_INTERNAL
/ 8);
51 memmove(cs
->grpmod
, (uint8_t *)cs
->grpmod
+ GIC_INTERNAL
/ 8,
52 sizeof(cs
->grpmod
) - GIC_INTERNAL
/ 8);
53 memmove(cs
->enabled
, (uint8_t *)cs
->enabled
+ GIC_INTERNAL
/ 8,
54 sizeof(cs
->enabled
) - GIC_INTERNAL
/ 8);
55 memmove(cs
->pending
, (uint8_t *)cs
->pending
+ GIC_INTERNAL
/ 8,
56 sizeof(cs
->pending
) - GIC_INTERNAL
/ 8);
57 memmove(cs
->active
, (uint8_t *)cs
->active
+ GIC_INTERNAL
/ 8,
58 sizeof(cs
->active
) - GIC_INTERNAL
/ 8);
59 memmove(cs
->edge_trigger
, (uint8_t *)cs
->edge_trigger
+ GIC_INTERNAL
/ 8,
60 sizeof(cs
->edge_trigger
) - GIC_INTERNAL
/ 8);
63 * While this new version QEMU doesn't have this kind of bug as we fix it,
64 * so it needs to set the flag to true to indicate that and it's necessary
65 * for next migration to work from this new version QEMU.
67 cs
->gicd_no_migration_shift_bug
= true;
70 static int gicv3_pre_save(void *opaque
)
72 GICv3State
*s
= (GICv3State
*)opaque
;
73 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
82 static int gicv3_post_load(void *opaque
, int version_id
)
84 GICv3State
*s
= (GICv3State
*)opaque
;
85 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
87 gicv3_gicd_no_migration_shift_bug_post_load(s
);
95 static bool virt_state_needed(void *opaque
)
97 GICv3CPUState
*cs
= opaque
;
99 return cs
->num_list_regs
!= 0;
102 static const VMStateDescription vmstate_gicv3_cpu_virt
= {
103 .name
= "arm_gicv3_cpu/virt",
105 .minimum_version_id
= 1,
106 .needed
= virt_state_needed
,
107 .fields
= (VMStateField
[]) {
108 VMSTATE_UINT64_2DARRAY(ich_apr
, GICv3CPUState
, 3, 4),
109 VMSTATE_UINT64(ich_hcr_el2
, GICv3CPUState
),
110 VMSTATE_UINT64_ARRAY(ich_lr_el2
, GICv3CPUState
, GICV3_LR_MAX
),
111 VMSTATE_UINT64(ich_vmcr_el2
, GICv3CPUState
),
112 VMSTATE_END_OF_LIST()
116 static int vmstate_gicv3_cpu_pre_load(void *opaque
)
118 GICv3CPUState
*cs
= opaque
;
121 * If the sre_el1 subsection is not transferred this
122 * means SRE_EL1 is 0x7 (which might not be the same as
125 cs
->icc_sre_el1
= 0x7;
129 static bool icc_sre_el1_reg_needed(void *opaque
)
131 GICv3CPUState
*cs
= opaque
;
133 return cs
->icc_sre_el1
!= 7;
136 const VMStateDescription vmstate_gicv3_cpu_sre_el1
= {
137 .name
= "arm_gicv3_cpu/sre_el1",
139 .minimum_version_id
= 1,
140 .needed
= icc_sre_el1_reg_needed
,
141 .fields
= (VMStateField
[]) {
142 VMSTATE_UINT64(icc_sre_el1
, GICv3CPUState
),
143 VMSTATE_END_OF_LIST()
147 static const VMStateDescription vmstate_gicv3_cpu
= {
148 .name
= "arm_gicv3_cpu",
150 .minimum_version_id
= 1,
151 .pre_load
= vmstate_gicv3_cpu_pre_load
,
152 .fields
= (VMStateField
[]) {
153 VMSTATE_UINT32(level
, GICv3CPUState
),
154 VMSTATE_UINT32(gicr_ctlr
, GICv3CPUState
),
155 VMSTATE_UINT32_ARRAY(gicr_statusr
, GICv3CPUState
, 2),
156 VMSTATE_UINT32(gicr_waker
, GICv3CPUState
),
157 VMSTATE_UINT64(gicr_propbaser
, GICv3CPUState
),
158 VMSTATE_UINT64(gicr_pendbaser
, GICv3CPUState
),
159 VMSTATE_UINT32(gicr_igroupr0
, GICv3CPUState
),
160 VMSTATE_UINT32(gicr_ienabler0
, GICv3CPUState
),
161 VMSTATE_UINT32(gicr_ipendr0
, GICv3CPUState
),
162 VMSTATE_UINT32(gicr_iactiver0
, GICv3CPUState
),
163 VMSTATE_UINT32(edge_trigger
, GICv3CPUState
),
164 VMSTATE_UINT32(gicr_igrpmodr0
, GICv3CPUState
),
165 VMSTATE_UINT32(gicr_nsacr
, GICv3CPUState
),
166 VMSTATE_UINT8_ARRAY(gicr_ipriorityr
, GICv3CPUState
, GIC_INTERNAL
),
167 VMSTATE_UINT64_ARRAY(icc_ctlr_el1
, GICv3CPUState
, 2),
168 VMSTATE_UINT64(icc_pmr_el1
, GICv3CPUState
),
169 VMSTATE_UINT64_ARRAY(icc_bpr
, GICv3CPUState
, 3),
170 VMSTATE_UINT64_2DARRAY(icc_apr
, GICv3CPUState
, 3, 4),
171 VMSTATE_UINT64_ARRAY(icc_igrpen
, GICv3CPUState
, 3),
172 VMSTATE_UINT64(icc_ctlr_el3
, GICv3CPUState
),
173 VMSTATE_END_OF_LIST()
175 .subsections
= (const VMStateDescription
* []) {
176 &vmstate_gicv3_cpu_virt
,
177 &vmstate_gicv3_cpu_sre_el1
,
182 static int gicv3_pre_load(void *opaque
)
184 GICv3State
*cs
= opaque
;
187 * The gicd_no_migration_shift_bug flag is used for migration compatibility
188 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
189 * Strictly, what we want to know is whether the migration source is using
190 * KVM. Since we don't have any way to determine that, we look at whether the
191 * destination is using KVM; this is close enough because for the older QEMU
192 * versions with this bug KVM -> TCG migration didn't work anyway. If the
193 * source is a newer QEMU without this bug it will transmit the migration
194 * subsection which sets the flag to true; otherwise it will remain set to
195 * the value we select here.
198 cs
->gicd_no_migration_shift_bug
= false;
204 static bool needed_always(void *opaque
)
209 const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug
= {
210 .name
= "arm_gicv3/gicd_no_migration_shift_bug",
212 .minimum_version_id
= 1,
213 .needed
= needed_always
,
214 .fields
= (VMStateField
[]) {
215 VMSTATE_BOOL(gicd_no_migration_shift_bug
, GICv3State
),
216 VMSTATE_END_OF_LIST()
220 static const VMStateDescription vmstate_gicv3
= {
223 .minimum_version_id
= 1,
224 .pre_load
= gicv3_pre_load
,
225 .pre_save
= gicv3_pre_save
,
226 .post_load
= gicv3_post_load
,
227 .priority
= MIG_PRI_GICV3
,
228 .fields
= (VMStateField
[]) {
229 VMSTATE_UINT32(gicd_ctlr
, GICv3State
),
230 VMSTATE_UINT32_ARRAY(gicd_statusr
, GICv3State
, 2),
231 VMSTATE_UINT32_ARRAY(group
, GICv3State
, GICV3_BMP_SIZE
),
232 VMSTATE_UINT32_ARRAY(grpmod
, GICv3State
, GICV3_BMP_SIZE
),
233 VMSTATE_UINT32_ARRAY(enabled
, GICv3State
, GICV3_BMP_SIZE
),
234 VMSTATE_UINT32_ARRAY(pending
, GICv3State
, GICV3_BMP_SIZE
),
235 VMSTATE_UINT32_ARRAY(active
, GICv3State
, GICV3_BMP_SIZE
),
236 VMSTATE_UINT32_ARRAY(level
, GICv3State
, GICV3_BMP_SIZE
),
237 VMSTATE_UINT32_ARRAY(edge_trigger
, GICv3State
, GICV3_BMP_SIZE
),
238 VMSTATE_UINT8_ARRAY(gicd_ipriority
, GICv3State
, GICV3_MAXIRQ
),
239 VMSTATE_UINT64_ARRAY(gicd_irouter
, GICv3State
, GICV3_MAXIRQ
),
240 VMSTATE_UINT32_ARRAY(gicd_nsacr
, GICv3State
,
241 DIV_ROUND_UP(GICV3_MAXIRQ
, 16)),
242 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu
, GICv3State
, num_cpu
,
243 vmstate_gicv3_cpu
, GICv3CPUState
),
244 VMSTATE_END_OF_LIST()
246 .subsections
= (const VMStateDescription
* []) {
247 &vmstate_gicv3_gicd_no_migration_shift_bug
,
252 void gicv3_init_irqs_and_mmio(GICv3State
*s
, qemu_irq_handler handler
,
253 const MemoryRegionOps
*ops
)
255 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
259 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
260 * GPIO array layout is thus:
262 * [N..N+31] PPIs for CPU 0
263 * [N+32..N+63] PPIs for CPU 1
266 i
= s
->num_irq
- GIC_INTERNAL
+ GIC_INTERNAL
* s
->num_cpu
;
267 qdev_init_gpio_in(DEVICE(s
), handler
, i
);
269 for (i
= 0; i
< s
->num_cpu
; i
++) {
270 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_irq
);
272 for (i
= 0; i
< s
->num_cpu
; i
++) {
273 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_fiq
);
275 for (i
= 0; i
< s
->num_cpu
; i
++) {
276 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_virq
);
278 for (i
= 0; i
< s
->num_cpu
; i
++) {
279 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_vfiq
);
282 memory_region_init_io(&s
->iomem_dist
, OBJECT(s
), ops
, s
,
283 "gicv3_dist", 0x10000);
284 sysbus_init_mmio(sbd
, &s
->iomem_dist
);
286 s
->redist_regions
= g_new0(GICv3RedistRegion
, s
->nb_redist_regions
);
288 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
289 char *name
= g_strdup_printf("gicv3_redist_region[%d]", i
);
290 GICv3RedistRegion
*region
= &s
->redist_regions
[i
];
293 region
->cpuidx
= cpuidx
;
294 cpuidx
+= s
->redist_region_count
[i
];
296 memory_region_init_io(®ion
->iomem
, OBJECT(s
),
297 ops
? &ops
[1] : NULL
, region
, name
,
298 s
->redist_region_count
[i
] * GICV3_REDIST_SIZE
);
299 sysbus_init_mmio(sbd
, ®ion
->iomem
);
304 static void arm_gicv3_common_realize(DeviceState
*dev
, Error
**errp
)
306 GICv3State
*s
= ARM_GICV3_COMMON(dev
);
307 int i
, rdist_capacity
, cpuidx
;
309 /* revision property is actually reserved and currently used only in order
310 * to keep the interface compatible with GICv2 code, avoiding extra
311 * conditions. However, in future it could be used, for example, if we
314 if (s
->revision
!= 3) {
315 error_setg(errp
, "unsupported GIC revision %d", s
->revision
);
319 if (s
->num_irq
> GICV3_MAXIRQ
) {
321 "requested %u interrupt lines exceeds GIC maximum %d",
322 s
->num_irq
, GICV3_MAXIRQ
);
325 if (s
->num_irq
< GIC_INTERNAL
) {
327 "requested %u interrupt lines is below GIC minimum %d",
328 s
->num_irq
, GIC_INTERNAL
);
332 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
333 * implementation imposed restriction, not an architectural one,
334 * so we don't have to deal with bitfields where only some of the
335 * bits in a 32-bit word should be valid.
337 if (s
->num_irq
% 32) {
339 "%d interrupt lines unsupported: not divisible by 32",
344 if (s
->lpi_enable
&& !s
->dma
) {
345 error_setg(errp
, "Redist-ITS: Guest 'sysmem' reference link not set");
350 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
351 rdist_capacity
+= s
->redist_region_count
[i
];
353 if (rdist_capacity
< s
->num_cpu
) {
354 error_setg(errp
, "Capacity of the redist regions(%d) "
355 "is less than number of vcpus(%d)",
356 rdist_capacity
, s
->num_cpu
);
361 address_space_init(&s
->dma_as
, s
->dma
,
365 s
->cpu
= g_new0(GICv3CPUState
, s
->num_cpu
);
367 for (i
= 0; i
< s
->num_cpu
; i
++) {
368 CPUState
*cpu
= qemu_get_cpu(i
);
373 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
374 gicv3_set_gicv3state(cpu
, &s
->cpu
[i
]);
376 /* Pre-construct the GICR_TYPER:
377 * For our implementation:
378 * Top 32 bits are the affinity value of the associated CPU
379 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
380 * Processor_Number == CPU index starting from 0
381 * DPGS == 0 (GICR_CTLR.DPG* not supported)
382 * Last == 1 if this is the last redistributor in a series of
383 * contiguous redistributor pages
384 * DirectLPI == 0 (direct injection of LPIs not supported)
385 * VLPIS == 0 (virtual LPIs not supported)
386 * PLPIS == 0 (physical LPIs not supported)
388 cpu_affid
= object_property_get_uint(OBJECT(cpu
), "mp-affinity", NULL
);
390 /* The CPU mp-affinity property is in MPIDR register format; squash
391 * the affinity bytes into 32 bits as the GICR_TYPER has them.
393 cpu_affid
= ((cpu_affid
& 0xFF00000000ULL
) >> 8) |
394 (cpu_affid
& 0xFFFFFF);
395 s
->cpu
[i
].gicr_typer
= (cpu_affid
<< 32) |
400 s
->cpu
[i
].gicr_typer
|= GICR_TYPER_PLPIS
;
405 * Now go through and set GICR_TYPER.Last for the final
406 * redistributor in each region.
409 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
410 cpuidx
+= s
->redist_region_count
[i
];
411 s
->cpu
[cpuidx
- 1].gicr_typer
|= GICR_TYPER_LAST
;
415 static void arm_gicv3_finalize(Object
*obj
)
417 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
419 g_free(s
->redist_region_count
);
422 static void arm_gicv3_common_reset(DeviceState
*dev
)
424 GICv3State
*s
= ARM_GICV3_COMMON(dev
);
427 for (i
= 0; i
< s
->num_cpu
; i
++) {
428 GICv3CPUState
*cs
= &s
->cpu
[i
];
433 /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
434 cs
->gicr_ctlr
|= GICR_CTLR_CES
;
436 cs
->gicr_statusr
[GICV3_S
] = 0;
437 cs
->gicr_statusr
[GICV3_NS
] = 0;
438 cs
->gicr_waker
= GICR_WAKER_ProcessorSleep
| GICR_WAKER_ChildrenAsleep
;
439 cs
->gicr_propbaser
= 0;
440 cs
->gicr_pendbaser
= 0;
441 /* If we're resetting a TZ-aware GIC as if secure firmware
442 * had set it up ready to start a kernel in non-secure, we
443 * need to set interrupts to group 1 so the kernel can use them.
444 * Otherwise they reset to group 0 like the hardware.
446 if (s
->irq_reset_nonsecure
) {
447 cs
->gicr_igroupr0
= 0xffffffff;
449 cs
->gicr_igroupr0
= 0;
452 cs
->gicr_ienabler0
= 0;
453 cs
->gicr_ipendr0
= 0;
454 cs
->gicr_iactiver0
= 0;
455 cs
->edge_trigger
= 0xffff;
456 cs
->gicr_igrpmodr0
= 0;
458 memset(cs
->gicr_ipriorityr
, 0, sizeof(cs
->gicr_ipriorityr
));
460 cs
->hppi
.prio
= 0xff;
461 cs
->hpplpi
.prio
= 0xff;
463 /* State in the CPU interface must *not* be reset here, because it
464 * is part of the CPU's reset domain, not the GIC device's.
468 /* For our implementation affinity routing is always enabled */
469 if (s
->security_extn
) {
470 s
->gicd_ctlr
= GICD_CTLR_ARE_S
| GICD_CTLR_ARE_NS
;
472 s
->gicd_ctlr
= GICD_CTLR_DS
| GICD_CTLR_ARE
;
475 s
->gicd_statusr
[GICV3_S
] = 0;
476 s
->gicd_statusr
[GICV3_NS
] = 0;
478 memset(s
->group
, 0, sizeof(s
->group
));
479 memset(s
->grpmod
, 0, sizeof(s
->grpmod
));
480 memset(s
->enabled
, 0, sizeof(s
->enabled
));
481 memset(s
->pending
, 0, sizeof(s
->pending
));
482 memset(s
->active
, 0, sizeof(s
->active
));
483 memset(s
->level
, 0, sizeof(s
->level
));
484 memset(s
->edge_trigger
, 0, sizeof(s
->edge_trigger
));
485 memset(s
->gicd_ipriority
, 0, sizeof(s
->gicd_ipriority
));
486 memset(s
->gicd_irouter
, 0, sizeof(s
->gicd_irouter
));
487 memset(s
->gicd_nsacr
, 0, sizeof(s
->gicd_nsacr
));
488 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
489 * write these to get sane behaviour and we need not populate the
490 * pointer cache here; however having the cache be different for
491 * "happened to be 0 from reset" and "guest wrote 0" would be
494 gicv3_cache_all_target_cpustates(s
);
496 if (s
->irq_reset_nonsecure
) {
497 /* If we're resetting a TZ-aware GIC as if secure firmware
498 * had set it up ready to start a kernel in non-secure, we
499 * need to set interrupts to group 1 so the kernel can use them.
500 * Otherwise they reset to group 0 like the hardware.
502 for (i
= GIC_INTERNAL
; i
< s
->num_irq
; i
++) {
503 gicv3_gicd_group_set(s
, i
);
506 s
->gicd_no_migration_shift_bug
= true;
509 static void arm_gic_common_linux_init(ARMLinuxBootIf
*obj
,
512 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
514 if (s
->security_extn
&& !secure_boot
) {
515 /* We're directly booting a kernel into NonSecure. If this GIC
516 * implements the security extensions then we must configure it
517 * to have all the interrupts be NonSecure (this is a job that
518 * is done by the Secure boot firmware in real hardware, and in
519 * this mode QEMU is acting as a minimalist firmware-and-bootloader
522 s
->irq_reset_nonsecure
= true;
526 static Property arm_gicv3_common_properties
[] = {
527 DEFINE_PROP_UINT32("num-cpu", GICv3State
, num_cpu
, 1),
528 DEFINE_PROP_UINT32("num-irq", GICv3State
, num_irq
, 32),
529 DEFINE_PROP_UINT32("revision", GICv3State
, revision
, 3),
530 DEFINE_PROP_BOOL("has-lpi", GICv3State
, lpi_enable
, 0),
531 DEFINE_PROP_BOOL("has-security-extensions", GICv3State
, security_extn
, 0),
532 DEFINE_PROP_ARRAY("redist-region-count", GICv3State
, nb_redist_regions
,
533 redist_region_count
, qdev_prop_uint32
, uint32_t),
534 DEFINE_PROP_LINK("sysmem", GICv3State
, dma
, TYPE_MEMORY_REGION
,
536 DEFINE_PROP_END_OF_LIST(),
539 static void arm_gicv3_common_class_init(ObjectClass
*klass
, void *data
)
541 DeviceClass
*dc
= DEVICE_CLASS(klass
);
542 ARMLinuxBootIfClass
*albifc
= ARM_LINUX_BOOT_IF_CLASS(klass
);
544 dc
->reset
= arm_gicv3_common_reset
;
545 dc
->realize
= arm_gicv3_common_realize
;
546 device_class_set_props(dc
, arm_gicv3_common_properties
);
547 dc
->vmsd
= &vmstate_gicv3
;
548 albifc
->arm_linux_init
= arm_gic_common_linux_init
;
551 static const TypeInfo arm_gicv3_common_type
= {
552 .name
= TYPE_ARM_GICV3_COMMON
,
553 .parent
= TYPE_SYS_BUS_DEVICE
,
554 .instance_size
= sizeof(GICv3State
),
555 .class_size
= sizeof(ARMGICv3CommonClass
),
556 .class_init
= arm_gicv3_common_class_init
,
557 .instance_finalize
= arm_gicv3_finalize
,
559 .interfaces
= (InterfaceInfo
[]) {
560 { TYPE_ARM_LINUX_BOOT_IF
},
565 static void register_types(void)
567 type_register_static(&arm_gicv3_common_type
);
570 type_init(register_types
)