2 * ARM GICv3 support - common bits of emulated and KVM kernel model
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Written by Peter Maydell
8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu/module.h"
27 #include "hw/core/cpu.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "gicv3_internal.h"
32 #include "hw/arm/linux-boot-if.h"
33 #include "sysemu/kvm.h"
36 static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State
*cs
)
38 if (cs
->gicd_no_migration_shift_bug
) {
42 /* Older versions of QEMU had a bug in the handling of state save/restore
43 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
44 * so that instead of the data for external interrupts 32 and up
45 * starting at bit position 32 in the bitmap, it started at bit
46 * position 64. If we're receiving data from a QEMU with that bug,
47 * we must move the data down into the right place.
49 memmove(cs
->group
, (uint8_t *)cs
->group
+ GIC_INTERNAL
/ 8,
50 sizeof(cs
->group
) - GIC_INTERNAL
/ 8);
51 memmove(cs
->grpmod
, (uint8_t *)cs
->grpmod
+ GIC_INTERNAL
/ 8,
52 sizeof(cs
->grpmod
) - GIC_INTERNAL
/ 8);
53 memmove(cs
->enabled
, (uint8_t *)cs
->enabled
+ GIC_INTERNAL
/ 8,
54 sizeof(cs
->enabled
) - GIC_INTERNAL
/ 8);
55 memmove(cs
->pending
, (uint8_t *)cs
->pending
+ GIC_INTERNAL
/ 8,
56 sizeof(cs
->pending
) - GIC_INTERNAL
/ 8);
57 memmove(cs
->active
, (uint8_t *)cs
->active
+ GIC_INTERNAL
/ 8,
58 sizeof(cs
->active
) - GIC_INTERNAL
/ 8);
59 memmove(cs
->edge_trigger
, (uint8_t *)cs
->edge_trigger
+ GIC_INTERNAL
/ 8,
60 sizeof(cs
->edge_trigger
) - GIC_INTERNAL
/ 8);
63 * While this new version QEMU doesn't have this kind of bug as we fix it,
64 * so it needs to set the flag to true to indicate that and it's necessary
65 * for next migration to work from this new version QEMU.
67 cs
->gicd_no_migration_shift_bug
= true;
70 static int gicv3_pre_save(void *opaque
)
72 GICv3State
*s
= (GICv3State
*)opaque
;
73 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
82 static int gicv3_post_load(void *opaque
, int version_id
)
84 GICv3State
*s
= (GICv3State
*)opaque
;
85 ARMGICv3CommonClass
*c
= ARM_GICV3_COMMON_GET_CLASS(s
);
87 gicv3_gicd_no_migration_shift_bug_post_load(s
);
95 static bool virt_state_needed(void *opaque
)
97 GICv3CPUState
*cs
= opaque
;
99 return cs
->num_list_regs
!= 0;
102 static const VMStateDescription vmstate_gicv3_cpu_virt
= {
103 .name
= "arm_gicv3_cpu/virt",
105 .minimum_version_id
= 1,
106 .needed
= virt_state_needed
,
107 .fields
= (VMStateField
[]) {
108 VMSTATE_UINT64_2DARRAY(ich_apr
, GICv3CPUState
, 3, 4),
109 VMSTATE_UINT64(ich_hcr_el2
, GICv3CPUState
),
110 VMSTATE_UINT64_ARRAY(ich_lr_el2
, GICv3CPUState
, GICV3_LR_MAX
),
111 VMSTATE_UINT64(ich_vmcr_el2
, GICv3CPUState
),
112 VMSTATE_END_OF_LIST()
116 static int vmstate_gicv3_cpu_pre_load(void *opaque
)
118 GICv3CPUState
*cs
= opaque
;
121 * If the sre_el1 subsection is not transferred this
122 * means SRE_EL1 is 0x7 (which might not be the same as
125 cs
->icc_sre_el1
= 0x7;
129 static bool icc_sre_el1_reg_needed(void *opaque
)
131 GICv3CPUState
*cs
= opaque
;
133 return cs
->icc_sre_el1
!= 7;
136 const VMStateDescription vmstate_gicv3_cpu_sre_el1
= {
137 .name
= "arm_gicv3_cpu/sre_el1",
139 .minimum_version_id
= 1,
140 .needed
= icc_sre_el1_reg_needed
,
141 .fields
= (VMStateField
[]) {
142 VMSTATE_UINT64(icc_sre_el1
, GICv3CPUState
),
143 VMSTATE_END_OF_LIST()
147 static bool gicv4_needed(void *opaque
)
149 GICv3CPUState
*cs
= opaque
;
151 return cs
->gic
->revision
> 3;
154 const VMStateDescription vmstate_gicv3_gicv4
= {
155 .name
= "arm_gicv3_cpu/gicv4",
157 .minimum_version_id
= 1,
158 .needed
= gicv4_needed
,
159 .fields
= (VMStateField
[]) {
160 VMSTATE_UINT64(gicr_vpropbaser
, GICv3CPUState
),
161 VMSTATE_UINT64(gicr_vpendbaser
, GICv3CPUState
),
162 VMSTATE_END_OF_LIST()
166 static const VMStateDescription vmstate_gicv3_cpu
= {
167 .name
= "arm_gicv3_cpu",
169 .minimum_version_id
= 1,
170 .pre_load
= vmstate_gicv3_cpu_pre_load
,
171 .fields
= (VMStateField
[]) {
172 VMSTATE_UINT32(level
, GICv3CPUState
),
173 VMSTATE_UINT32(gicr_ctlr
, GICv3CPUState
),
174 VMSTATE_UINT32_ARRAY(gicr_statusr
, GICv3CPUState
, 2),
175 VMSTATE_UINT32(gicr_waker
, GICv3CPUState
),
176 VMSTATE_UINT64(gicr_propbaser
, GICv3CPUState
),
177 VMSTATE_UINT64(gicr_pendbaser
, GICv3CPUState
),
178 VMSTATE_UINT32(gicr_igroupr0
, GICv3CPUState
),
179 VMSTATE_UINT32(gicr_ienabler0
, GICv3CPUState
),
180 VMSTATE_UINT32(gicr_ipendr0
, GICv3CPUState
),
181 VMSTATE_UINT32(gicr_iactiver0
, GICv3CPUState
),
182 VMSTATE_UINT32(edge_trigger
, GICv3CPUState
),
183 VMSTATE_UINT32(gicr_igrpmodr0
, GICv3CPUState
),
184 VMSTATE_UINT32(gicr_nsacr
, GICv3CPUState
),
185 VMSTATE_UINT8_ARRAY(gicr_ipriorityr
, GICv3CPUState
, GIC_INTERNAL
),
186 VMSTATE_UINT64_ARRAY(icc_ctlr_el1
, GICv3CPUState
, 2),
187 VMSTATE_UINT64(icc_pmr_el1
, GICv3CPUState
),
188 VMSTATE_UINT64_ARRAY(icc_bpr
, GICv3CPUState
, 3),
189 VMSTATE_UINT64_2DARRAY(icc_apr
, GICv3CPUState
, 3, 4),
190 VMSTATE_UINT64_ARRAY(icc_igrpen
, GICv3CPUState
, 3),
191 VMSTATE_UINT64(icc_ctlr_el3
, GICv3CPUState
),
192 VMSTATE_END_OF_LIST()
194 .subsections
= (const VMStateDescription
* []) {
195 &vmstate_gicv3_cpu_virt
,
196 &vmstate_gicv3_cpu_sre_el1
,
197 &vmstate_gicv3_gicv4
,
202 static int gicv3_pre_load(void *opaque
)
204 GICv3State
*cs
= opaque
;
207 * The gicd_no_migration_shift_bug flag is used for migration compatibility
208 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
209 * Strictly, what we want to know is whether the migration source is using
210 * KVM. Since we don't have any way to determine that, we look at whether the
211 * destination is using KVM; this is close enough because for the older QEMU
212 * versions with this bug KVM -> TCG migration didn't work anyway. If the
213 * source is a newer QEMU without this bug it will transmit the migration
214 * subsection which sets the flag to true; otherwise it will remain set to
215 * the value we select here.
218 cs
->gicd_no_migration_shift_bug
= false;
224 static bool needed_always(void *opaque
)
229 const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug
= {
230 .name
= "arm_gicv3/gicd_no_migration_shift_bug",
232 .minimum_version_id
= 1,
233 .needed
= needed_always
,
234 .fields
= (VMStateField
[]) {
235 VMSTATE_BOOL(gicd_no_migration_shift_bug
, GICv3State
),
236 VMSTATE_END_OF_LIST()
240 static const VMStateDescription vmstate_gicv3
= {
243 .minimum_version_id
= 1,
244 .pre_load
= gicv3_pre_load
,
245 .pre_save
= gicv3_pre_save
,
246 .post_load
= gicv3_post_load
,
247 .priority
= MIG_PRI_GICV3
,
248 .fields
= (VMStateField
[]) {
249 VMSTATE_UINT32(gicd_ctlr
, GICv3State
),
250 VMSTATE_UINT32_ARRAY(gicd_statusr
, GICv3State
, 2),
251 VMSTATE_UINT32_ARRAY(group
, GICv3State
, GICV3_BMP_SIZE
),
252 VMSTATE_UINT32_ARRAY(grpmod
, GICv3State
, GICV3_BMP_SIZE
),
253 VMSTATE_UINT32_ARRAY(enabled
, GICv3State
, GICV3_BMP_SIZE
),
254 VMSTATE_UINT32_ARRAY(pending
, GICv3State
, GICV3_BMP_SIZE
),
255 VMSTATE_UINT32_ARRAY(active
, GICv3State
, GICV3_BMP_SIZE
),
256 VMSTATE_UINT32_ARRAY(level
, GICv3State
, GICV3_BMP_SIZE
),
257 VMSTATE_UINT32_ARRAY(edge_trigger
, GICv3State
, GICV3_BMP_SIZE
),
258 VMSTATE_UINT8_ARRAY(gicd_ipriority
, GICv3State
, GICV3_MAXIRQ
),
259 VMSTATE_UINT64_ARRAY(gicd_irouter
, GICv3State
, GICV3_MAXIRQ
),
260 VMSTATE_UINT32_ARRAY(gicd_nsacr
, GICv3State
,
261 DIV_ROUND_UP(GICV3_MAXIRQ
, 16)),
262 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu
, GICv3State
, num_cpu
,
263 vmstate_gicv3_cpu
, GICv3CPUState
),
264 VMSTATE_END_OF_LIST()
266 .subsections
= (const VMStateDescription
* []) {
267 &vmstate_gicv3_gicd_no_migration_shift_bug
,
272 void gicv3_init_irqs_and_mmio(GICv3State
*s
, qemu_irq_handler handler
,
273 const MemoryRegionOps
*ops
)
275 SysBusDevice
*sbd
= SYS_BUS_DEVICE(s
);
279 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
280 * GPIO array layout is thus:
282 * [N..N+31] PPIs for CPU 0
283 * [N+32..N+63] PPIs for CPU 1
286 i
= s
->num_irq
- GIC_INTERNAL
+ GIC_INTERNAL
* s
->num_cpu
;
287 qdev_init_gpio_in(DEVICE(s
), handler
, i
);
289 for (i
= 0; i
< s
->num_cpu
; i
++) {
290 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_irq
);
292 for (i
= 0; i
< s
->num_cpu
; i
++) {
293 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_fiq
);
295 for (i
= 0; i
< s
->num_cpu
; i
++) {
296 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_virq
);
298 for (i
= 0; i
< s
->num_cpu
; i
++) {
299 sysbus_init_irq(sbd
, &s
->cpu
[i
].parent_vfiq
);
302 memory_region_init_io(&s
->iomem_dist
, OBJECT(s
), ops
, s
,
303 "gicv3_dist", 0x10000);
304 sysbus_init_mmio(sbd
, &s
->iomem_dist
);
306 s
->redist_regions
= g_new0(GICv3RedistRegion
, s
->nb_redist_regions
);
308 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
309 char *name
= g_strdup_printf("gicv3_redist_region[%d]", i
);
310 GICv3RedistRegion
*region
= &s
->redist_regions
[i
];
313 region
->cpuidx
= cpuidx
;
314 cpuidx
+= s
->redist_region_count
[i
];
316 memory_region_init_io(®ion
->iomem
, OBJECT(s
),
317 ops
? &ops
[1] : NULL
, region
, name
,
318 s
->redist_region_count
[i
] * gicv3_redist_size(s
));
319 sysbus_init_mmio(sbd
, ®ion
->iomem
);
324 static void arm_gicv3_common_realize(DeviceState
*dev
, Error
**errp
)
326 GICv3State
*s
= ARM_GICV3_COMMON(dev
);
327 int i
, rdist_capacity
, cpuidx
;
330 * This GIC device supports only revisions 3 and 4. The GICv1/v2
331 * is a separate device.
332 * Note that subclasses of this device may impose further restrictions
333 * on the GIC revision: notably, the in-kernel KVM GIC doesn't
336 if (s
->revision
!= 3 && s
->revision
!= 4) {
337 error_setg(errp
, "unsupported GIC revision %d", s
->revision
);
341 if (s
->num_irq
> GICV3_MAXIRQ
) {
343 "requested %u interrupt lines exceeds GIC maximum %d",
344 s
->num_irq
, GICV3_MAXIRQ
);
347 if (s
->num_irq
< GIC_INTERNAL
) {
349 "requested %u interrupt lines is below GIC minimum %d",
350 s
->num_irq
, GIC_INTERNAL
);
353 if (s
->num_cpu
== 0) {
354 error_setg(errp
, "num-cpu must be at least 1");
358 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
359 * implementation imposed restriction, not an architectural one,
360 * so we don't have to deal with bitfields where only some of the
361 * bits in a 32-bit word should be valid.
363 if (s
->num_irq
% 32) {
365 "%d interrupt lines unsupported: not divisible by 32",
370 if (s
->lpi_enable
&& !s
->dma
) {
371 error_setg(errp
, "Redist-ITS: Guest 'sysmem' reference link not set");
376 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
377 rdist_capacity
+= s
->redist_region_count
[i
];
379 if (rdist_capacity
!= s
->num_cpu
) {
380 error_setg(errp
, "Capacity of the redist regions(%d) "
381 "does not match the number of vcpus(%d)",
382 rdist_capacity
, s
->num_cpu
);
387 address_space_init(&s
->dma_as
, s
->dma
,
391 s
->cpu
= g_new0(GICv3CPUState
, s
->num_cpu
);
393 for (i
= 0; i
< s
->num_cpu
; i
++) {
394 CPUState
*cpu
= qemu_get_cpu(i
);
399 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
400 gicv3_set_gicv3state(cpu
, &s
->cpu
[i
]);
402 /* Pre-construct the GICR_TYPER:
403 * For our implementation:
404 * Top 32 bits are the affinity value of the associated CPU
405 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
406 * Processor_Number == CPU index starting from 0
407 * DPGS == 0 (GICR_CTLR.DPG* not supported)
408 * Last == 1 if this is the last redistributor in a series of
409 * contiguous redistributor pages
410 * DirectLPI == 0 (direct injection of LPIs not supported)
411 * VLPIS == 1 if vLPIs supported (GICv4 and up)
412 * PLPIS == 1 if LPIs supported
414 cpu_affid
= object_property_get_uint(OBJECT(cpu
), "mp-affinity", NULL
);
416 /* The CPU mp-affinity property is in MPIDR register format; squash
417 * the affinity bytes into 32 bits as the GICR_TYPER has them.
419 cpu_affid
= ((cpu_affid
& 0xFF00000000ULL
) >> 8) |
420 (cpu_affid
& 0xFFFFFF);
421 s
->cpu
[i
].gicr_typer
= (cpu_affid
<< 32) |
426 s
->cpu
[i
].gicr_typer
|= GICR_TYPER_PLPIS
;
427 if (s
->revision
> 3) {
428 s
->cpu
[i
].gicr_typer
|= GICR_TYPER_VLPIS
;
434 * Now go through and set GICR_TYPER.Last for the final
435 * redistributor in each region.
438 for (i
= 0; i
< s
->nb_redist_regions
; i
++) {
439 cpuidx
+= s
->redist_region_count
[i
];
440 s
->cpu
[cpuidx
- 1].gicr_typer
|= GICR_TYPER_LAST
;
443 s
->itslist
= g_ptr_array_new();
446 static void arm_gicv3_finalize(Object
*obj
)
448 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
450 g_free(s
->redist_region_count
);
453 static void arm_gicv3_common_reset(DeviceState
*dev
)
455 GICv3State
*s
= ARM_GICV3_COMMON(dev
);
458 for (i
= 0; i
< s
->num_cpu
; i
++) {
459 GICv3CPUState
*cs
= &s
->cpu
[i
];
464 /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
465 cs
->gicr_ctlr
|= GICR_CTLR_CES
;
467 cs
->gicr_statusr
[GICV3_S
] = 0;
468 cs
->gicr_statusr
[GICV3_NS
] = 0;
469 cs
->gicr_waker
= GICR_WAKER_ProcessorSleep
| GICR_WAKER_ChildrenAsleep
;
470 cs
->gicr_propbaser
= 0;
471 cs
->gicr_pendbaser
= 0;
472 cs
->gicr_vpropbaser
= 0;
473 cs
->gicr_vpendbaser
= 0;
474 /* If we're resetting a TZ-aware GIC as if secure firmware
475 * had set it up ready to start a kernel in non-secure, we
476 * need to set interrupts to group 1 so the kernel can use them.
477 * Otherwise they reset to group 0 like the hardware.
479 if (s
->irq_reset_nonsecure
) {
480 cs
->gicr_igroupr0
= 0xffffffff;
482 cs
->gicr_igroupr0
= 0;
485 cs
->gicr_ienabler0
= 0;
486 cs
->gicr_ipendr0
= 0;
487 cs
->gicr_iactiver0
= 0;
488 cs
->edge_trigger
= 0xffff;
489 cs
->gicr_igrpmodr0
= 0;
491 memset(cs
->gicr_ipriorityr
, 0, sizeof(cs
->gicr_ipriorityr
));
493 cs
->hppi
.prio
= 0xff;
494 cs
->hpplpi
.prio
= 0xff;
495 cs
->hppvlpi
.prio
= 0xff;
497 /* State in the CPU interface must *not* be reset here, because it
498 * is part of the CPU's reset domain, not the GIC device's.
502 /* For our implementation affinity routing is always enabled */
503 if (s
->security_extn
) {
504 s
->gicd_ctlr
= GICD_CTLR_ARE_S
| GICD_CTLR_ARE_NS
;
506 s
->gicd_ctlr
= GICD_CTLR_DS
| GICD_CTLR_ARE
;
509 s
->gicd_statusr
[GICV3_S
] = 0;
510 s
->gicd_statusr
[GICV3_NS
] = 0;
512 memset(s
->group
, 0, sizeof(s
->group
));
513 memset(s
->grpmod
, 0, sizeof(s
->grpmod
));
514 memset(s
->enabled
, 0, sizeof(s
->enabled
));
515 memset(s
->pending
, 0, sizeof(s
->pending
));
516 memset(s
->active
, 0, sizeof(s
->active
));
517 memset(s
->level
, 0, sizeof(s
->level
));
518 memset(s
->edge_trigger
, 0, sizeof(s
->edge_trigger
));
519 memset(s
->gicd_ipriority
, 0, sizeof(s
->gicd_ipriority
));
520 memset(s
->gicd_irouter
, 0, sizeof(s
->gicd_irouter
));
521 memset(s
->gicd_nsacr
, 0, sizeof(s
->gicd_nsacr
));
522 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
523 * write these to get sane behaviour and we need not populate the
524 * pointer cache here; however having the cache be different for
525 * "happened to be 0 from reset" and "guest wrote 0" would be
528 gicv3_cache_all_target_cpustates(s
);
530 if (s
->irq_reset_nonsecure
) {
531 /* If we're resetting a TZ-aware GIC as if secure firmware
532 * had set it up ready to start a kernel in non-secure, we
533 * need to set interrupts to group 1 so the kernel can use them.
534 * Otherwise they reset to group 0 like the hardware.
536 for (i
= GIC_INTERNAL
; i
< s
->num_irq
; i
++) {
537 gicv3_gicd_group_set(s
, i
);
540 s
->gicd_no_migration_shift_bug
= true;
543 static void arm_gic_common_linux_init(ARMLinuxBootIf
*obj
,
546 GICv3State
*s
= ARM_GICV3_COMMON(obj
);
548 if (s
->security_extn
&& !secure_boot
) {
549 /* We're directly booting a kernel into NonSecure. If this GIC
550 * implements the security extensions then we must configure it
551 * to have all the interrupts be NonSecure (this is a job that
552 * is done by the Secure boot firmware in real hardware, and in
553 * this mode QEMU is acting as a minimalist firmware-and-bootloader
556 s
->irq_reset_nonsecure
= true;
560 static Property arm_gicv3_common_properties
[] = {
561 DEFINE_PROP_UINT32("num-cpu", GICv3State
, num_cpu
, 1),
562 DEFINE_PROP_UINT32("num-irq", GICv3State
, num_irq
, 32),
563 DEFINE_PROP_UINT32("revision", GICv3State
, revision
, 3),
564 DEFINE_PROP_BOOL("has-lpi", GICv3State
, lpi_enable
, 0),
565 DEFINE_PROP_BOOL("has-security-extensions", GICv3State
, security_extn
, 0),
567 * Compatibility property: force 8 bits of physical priority, even
568 * if the CPU being emulated should have fewer.
570 DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State
, force_8bit_prio
, 0),
571 DEFINE_PROP_ARRAY("redist-region-count", GICv3State
, nb_redist_regions
,
572 redist_region_count
, qdev_prop_uint32
, uint32_t),
573 DEFINE_PROP_LINK("sysmem", GICv3State
, dma
, TYPE_MEMORY_REGION
,
575 DEFINE_PROP_END_OF_LIST(),
578 static void arm_gicv3_common_class_init(ObjectClass
*klass
, void *data
)
580 DeviceClass
*dc
= DEVICE_CLASS(klass
);
581 ARMLinuxBootIfClass
*albifc
= ARM_LINUX_BOOT_IF_CLASS(klass
);
583 dc
->reset
= arm_gicv3_common_reset
;
584 dc
->realize
= arm_gicv3_common_realize
;
585 device_class_set_props(dc
, arm_gicv3_common_properties
);
586 dc
->vmsd
= &vmstate_gicv3
;
587 albifc
->arm_linux_init
= arm_gic_common_linux_init
;
590 static const TypeInfo arm_gicv3_common_type
= {
591 .name
= TYPE_ARM_GICV3_COMMON
,
592 .parent
= TYPE_SYS_BUS_DEVICE
,
593 .instance_size
= sizeof(GICv3State
),
594 .class_size
= sizeof(ARMGICv3CommonClass
),
595 .class_init
= arm_gicv3_common_class_init
,
596 .instance_finalize
= arm_gicv3_finalize
,
598 .interfaces
= (InterfaceInfo
[]) {
599 { TYPE_ARM_LINUX_BOOT_IF
},
604 static void register_types(void)
606 type_register_static(&arm_gicv3_common_type
);
609 type_init(register_types
)