2 * ARM mach-virt emulation
4 * Copyright (c) 2013 Linaro Limited
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 * Emulate a virtual board which works by passing Linux all the information
19 * it needs about what devices are present via the device tree.
20 * There are some restrictions about what we can do here:
21 * + we can only present devices whose Linux drivers will work based
22 * purely on the device tree with no platform data at all
23 * + we want to present a very stripped-down minimalist platform,
24 * both because this reduces the security attack surface from the guest
25 * and also because it reduces our exposure to being broken when
26 * the kernel updates its device tree bindings and requires further
27 * information in a device binding that we aren't providing.
28 * This is essentially the same approach kvmtool uses.
31 #include "qemu/osdep.h"
32 #include "qemu/datadir.h"
33 #include "qemu/units.h"
34 #include "qemu/option.h"
35 #include "monitor/qdev.h"
36 #include "hw/sysbus.h"
37 #include "hw/arm/boot.h"
38 #include "hw/arm/primecell.h"
39 #include "hw/arm/virt.h"
40 #include "hw/block/flash.h"
41 #include "hw/vfio/vfio-calxeda-xgmac.h"
42 #include "hw/vfio/vfio-amd-xgbe.h"
43 #include "hw/display/ramfb.h"
45 #include "sysemu/device_tree.h"
46 #include "sysemu/numa.h"
47 #include "sysemu/runstate.h"
48 #include "sysemu/tpm.h"
49 #include "sysemu/tcg.h"
50 #include "sysemu/kvm.h"
51 #include "sysemu/hvf.h"
52 #include "sysemu/qtest.h"
53 #include "hw/loader.h"
54 #include "qapi/error.h"
55 #include "qemu/bitops.h"
56 #include "qemu/error-report.h"
57 #include "qemu/module.h"
58 #include "hw/pci-host/gpex.h"
59 #include "hw/virtio/virtio-pci.h"
60 #include "hw/core/sysbus-fdt.h"
61 #include "hw/platform-bus.h"
62 #include "hw/qdev-properties.h"
63 #include "hw/arm/fdt.h"
64 #include "hw/intc/arm_gic.h"
65 #include "hw/intc/arm_gicv3_common.h"
68 #include "hw/firmware/smbios.h"
69 #include "qapi/visitor.h"
70 #include "qapi/qapi-visit-common.h"
71 #include "standard-headers/linux/input.h"
72 #include "hw/arm/smmuv3.h"
73 #include "hw/acpi/acpi.h"
74 #include "target/arm/internals.h"
75 #include "hw/mem/memory-device.h"
76 #include "hw/mem/pc-dimm.h"
77 #include "hw/mem/nvdimm.h"
78 #include "hw/acpi/generic_event_device.h"
79 #include "hw/virtio/virtio-mem-pci.h"
80 #include "hw/virtio/virtio-iommu.h"
81 #include "hw/char/pl011.h"
82 #include "qemu/guest-random.h"
84 #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
85 static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
88 MachineClass *mc = MACHINE_CLASS(oc); \
89 virt_machine_##major##_##minor##_options(mc); \
90 mc->desc = "QEMU " # major "." # minor " ARM Virtual Machine"; \
95 static const TypeInfo machvirt_##major##_##minor##_info = { \
96 .name = MACHINE_TYPE_NAME("virt-" # major "." # minor), \
97 .parent = TYPE_VIRT_MACHINE, \
98 .class_init = virt_##major##_##minor##_class_init, \
100 static void machvirt_machine_##major##_##minor##_init(void) \
102 type_register_static(&machvirt_##major##_##minor##_info); \
104 type_init(machvirt_machine_##major##_##minor##_init);
106 #define DEFINE_VIRT_MACHINE_AS_LATEST(major, minor) \
107 DEFINE_VIRT_MACHINE_LATEST(major, minor, true)
108 #define DEFINE_VIRT_MACHINE(major, minor) \
109 DEFINE_VIRT_MACHINE_LATEST(major, minor, false)
112 /* Number of external interrupt lines to configure the GIC with */
115 #define PLATFORM_BUS_NUM_IRQS 64
117 /* Legacy RAM limit in GB (< version 4.0) */
118 #define LEGACY_RAMLIMIT_GB 255
119 #define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB)
121 /* Addresses and sizes of our components.
122 * 0..128MB is space for a flash device so we can run bootrom code such as UEFI.
123 * 128MB..256MB is used for miscellaneous device I/O.
124 * 256MB..1GB is reserved for possible future PCI support (ie where the
125 * PCI memory window will go if we add a PCI host controller).
126 * 1GB and up is RAM (which may happily spill over into the
127 * high memory region beyond 4GB).
128 * This represents a compromise between how much RAM can be given to
129 * a 32 bit VM and leaving space for expansion and in particular for PCI.
130 * Note that devices should generally be placed at multiples of 0x10000,
131 * to accommodate guests using 64K pages.
133 static const MemMapEntry base_memmap
[] = {
134 /* Space up to 0x8000000 is reserved for a boot ROM */
135 [VIRT_FLASH
] = { 0, 0x08000000 },
136 [VIRT_CPUPERIPHS
] = { 0x08000000, 0x00020000 },
137 /* GIC distributor and CPU interfaces sit inside the CPU peripheral space */
138 [VIRT_GIC_DIST
] = { 0x08000000, 0x00010000 },
139 [VIRT_GIC_CPU
] = { 0x08010000, 0x00010000 },
140 [VIRT_GIC_V2M
] = { 0x08020000, 0x00001000 },
141 [VIRT_GIC_HYP
] = { 0x08030000, 0x00010000 },
142 [VIRT_GIC_VCPU
] = { 0x08040000, 0x00010000 },
143 /* The space in between here is reserved for GICv3 CPU/vCPU/HYP */
144 [VIRT_GIC_ITS
] = { 0x08080000, 0x00020000 },
145 /* This redistributor space allows up to 2*64kB*123 CPUs */
146 [VIRT_GIC_REDIST
] = { 0x080A0000, 0x00F60000 },
147 [VIRT_UART
] = { 0x09000000, 0x00001000 },
148 [VIRT_RTC
] = { 0x09010000, 0x00001000 },
149 [VIRT_FW_CFG
] = { 0x09020000, 0x00000018 },
150 [VIRT_GPIO
] = { 0x09030000, 0x00001000 },
151 [VIRT_SECURE_UART
] = { 0x09040000, 0x00001000 },
152 [VIRT_SMMU
] = { 0x09050000, 0x00020000 },
153 [VIRT_PCDIMM_ACPI
] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN
},
154 [VIRT_ACPI_GED
] = { 0x09080000, ACPI_GED_EVT_SEL_LEN
},
155 [VIRT_NVDIMM_ACPI
] = { 0x09090000, NVDIMM_ACPI_IO_LEN
},
156 [VIRT_PVTIME
] = { 0x090a0000, 0x00010000 },
157 [VIRT_SECURE_GPIO
] = { 0x090b0000, 0x00001000 },
158 [VIRT_MMIO
] = { 0x0a000000, 0x00000200 },
159 /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
160 [VIRT_PLATFORM_BUS
] = { 0x0c000000, 0x02000000 },
161 [VIRT_SECURE_MEM
] = { 0x0e000000, 0x01000000 },
162 [VIRT_PCIE_MMIO
] = { 0x10000000, 0x2eff0000 },
163 [VIRT_PCIE_PIO
] = { 0x3eff0000, 0x00010000 },
164 [VIRT_PCIE_ECAM
] = { 0x3f000000, 0x01000000 },
165 /* Actual RAM size depends on initial RAM and device memory settings */
166 [VIRT_MEM
] = { GiB
, LEGACY_RAMLIMIT_BYTES
},
170 * Highmem IO Regions: This memory map is floating, located after the RAM.
171 * Each MemMapEntry base (GPA) will be dynamically computed, depending on the
172 * top of the RAM, so that its base get the same alignment as the size,
173 * ie. a 512GiB entry will be aligned on a 512GiB boundary. If there is
174 * less than 256GiB of RAM, the floating area starts at the 256GiB mark.
175 * Note the extended_memmap is sized so that it eventually also includes the
176 * base_memmap entries (VIRT_HIGH_GIC_REDIST2 index is greater than the last
177 * index of base_memmap).
179 * The memory map for these Highmem IO Regions can be in legacy or compact
180 * layout, depending on 'compact-highmem' property. With legacy layout, the
181 * PA space for one specific region is always reserved, even if the region
182 * has been disabled or doesn't fit into the PA space. However, the PA space
183 * for the region won't be reserved in these circumstances with compact layout.
185 static MemMapEntry extended_memmap
[] = {
186 /* Additional 64 MB redist region (can contain up to 512 redistributors) */
187 [VIRT_HIGH_GIC_REDIST2
] = { 0x0, 64 * MiB
},
188 [VIRT_HIGH_PCIE_ECAM
] = { 0x0, 256 * MiB
},
189 /* Second PCIe window */
190 [VIRT_HIGH_PCIE_MMIO
] = { 0x0, 512 * GiB
},
193 static const int a15irqmap
[] = {
196 [VIRT_PCIE
] = 3, /* ... to 6 */
198 [VIRT_SECURE_UART
] = 8,
200 [VIRT_MMIO
] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
201 [VIRT_GIC_V2M
] = 48, /* ...to 48 + NUM_GICV2M_SPIS - 1 */
202 [VIRT_SMMU
] = 74, /* ...to 74 + NUM_SMMU_IRQS - 1 */
203 [VIRT_PLATFORM_BUS
] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
206 static const char *valid_cpus
[] = {
207 ARM_CPU_TYPE_NAME("cortex-a7"),
208 ARM_CPU_TYPE_NAME("cortex-a15"),
209 ARM_CPU_TYPE_NAME("cortex-a35"),
210 ARM_CPU_TYPE_NAME("cortex-a53"),
211 ARM_CPU_TYPE_NAME("cortex-a55"),
212 ARM_CPU_TYPE_NAME("cortex-a57"),
213 ARM_CPU_TYPE_NAME("cortex-a72"),
214 ARM_CPU_TYPE_NAME("cortex-a76"),
215 ARM_CPU_TYPE_NAME("a64fx"),
216 ARM_CPU_TYPE_NAME("neoverse-n1"),
217 ARM_CPU_TYPE_NAME("host"),
218 ARM_CPU_TYPE_NAME("max"),
221 static bool cpu_type_valid(const char *cpu
)
225 for (i
= 0; i
< ARRAY_SIZE(valid_cpus
); i
++) {
226 if (strcmp(cpu
, valid_cpus
[i
]) == 0) {
233 static void create_randomness(MachineState
*ms
, const char *node
)
240 if (qemu_guest_getrandom(&seed
, sizeof(seed
), NULL
)) {
243 qemu_fdt_setprop_u64(ms
->fdt
, node
, "kaslr-seed", seed
.kaslr
);
244 qemu_fdt_setprop(ms
->fdt
, node
, "rng-seed", seed
.rng
, sizeof(seed
.rng
));
247 static void create_fdt(VirtMachineState
*vms
)
249 MachineState
*ms
= MACHINE(vms
);
250 int nb_numa_nodes
= ms
->numa_state
->num_nodes
;
251 void *fdt
= create_device_tree(&vms
->fdt_size
);
254 error_report("create_device_tree() failed");
261 qemu_fdt_setprop_string(fdt
, "/", "compatible", "linux,dummy-virt");
262 qemu_fdt_setprop_cell(fdt
, "/", "#address-cells", 0x2);
263 qemu_fdt_setprop_cell(fdt
, "/", "#size-cells", 0x2);
264 qemu_fdt_setprop_string(fdt
, "/", "model", "linux,dummy-virt");
266 /* /chosen must exist for load_dtb to fill in necessary properties later */
267 qemu_fdt_add_subnode(fdt
, "/chosen");
268 if (vms
->dtb_randomness
) {
269 create_randomness(ms
, "/chosen");
273 qemu_fdt_add_subnode(fdt
, "/secure-chosen");
274 if (vms
->dtb_randomness
) {
275 create_randomness(ms
, "/secure-chosen");
279 /* Clock node, for the benefit of the UART. The kernel device tree
280 * binding documentation claims the PL011 node clock properties are
281 * optional but in practice if you omit them the kernel refuses to
282 * probe for the device.
284 vms
->clock_phandle
= qemu_fdt_alloc_phandle(fdt
);
285 qemu_fdt_add_subnode(fdt
, "/apb-pclk");
286 qemu_fdt_setprop_string(fdt
, "/apb-pclk", "compatible", "fixed-clock");
287 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "#clock-cells", 0x0);
288 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "clock-frequency", 24000000);
289 qemu_fdt_setprop_string(fdt
, "/apb-pclk", "clock-output-names",
291 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "phandle", vms
->clock_phandle
);
293 if (nb_numa_nodes
> 0 && ms
->numa_state
->have_numa_distance
) {
294 int size
= nb_numa_nodes
* nb_numa_nodes
* 3 * sizeof(uint32_t);
295 uint32_t *matrix
= g_malloc0(size
);
298 for (i
= 0; i
< nb_numa_nodes
; i
++) {
299 for (j
= 0; j
< nb_numa_nodes
; j
++) {
300 idx
= (i
* nb_numa_nodes
+ j
) * 3;
301 matrix
[idx
+ 0] = cpu_to_be32(i
);
302 matrix
[idx
+ 1] = cpu_to_be32(j
);
304 cpu_to_be32(ms
->numa_state
->nodes
[i
].distance
[j
]);
308 qemu_fdt_add_subnode(fdt
, "/distance-map");
309 qemu_fdt_setprop_string(fdt
, "/distance-map", "compatible",
310 "numa-distance-map-v1");
311 qemu_fdt_setprop(fdt
, "/distance-map", "distance-matrix",
317 static void fdt_add_timer_nodes(const VirtMachineState
*vms
)
319 /* On real hardware these interrupts are level-triggered.
320 * On KVM they were edge-triggered before host kernel version 4.4,
321 * and level-triggered afterwards.
322 * On emulated QEMU they are level-triggered.
324 * Getting the DTB info about them wrong is awkward for some
326 * pre-4.8 ignore the DT and leave the interrupt configured
327 * with whatever the GIC reset value (or the bootloader) left it at
328 * 4.8 before rc6 honour the incorrect data by programming it back
329 * into the GIC, causing problems
330 * 4.8rc6 and later ignore the DT and always write "level triggered"
333 * For backwards-compatibility, virt-2.8 and earlier will continue
334 * to say these are edge-triggered, but later machines will report
335 * the correct information.
338 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
339 uint32_t irqflags
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
340 MachineState
*ms
= MACHINE(vms
);
342 if (vmc
->claim_edge_triggered_timers
) {
343 irqflags
= GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
;
346 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
347 irqflags
= deposit32(irqflags
, GIC_FDT_IRQ_PPI_CPU_START
,
348 GIC_FDT_IRQ_PPI_CPU_WIDTH
,
349 (1 << MACHINE(vms
)->smp
.cpus
) - 1);
352 qemu_fdt_add_subnode(ms
->fdt
, "/timer");
354 armcpu
= ARM_CPU(qemu_get_cpu(0));
355 if (arm_feature(&armcpu
->env
, ARM_FEATURE_V8
)) {
356 const char compat
[] = "arm,armv8-timer\0arm,armv7-timer";
357 qemu_fdt_setprop(ms
->fdt
, "/timer", "compatible",
358 compat
, sizeof(compat
));
360 qemu_fdt_setprop_string(ms
->fdt
, "/timer", "compatible",
363 qemu_fdt_setprop(ms
->fdt
, "/timer", "always-on", NULL
, 0);
364 qemu_fdt_setprop_cells(ms
->fdt
, "/timer", "interrupts",
365 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_S_EL1_IRQ
, irqflags
,
366 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_NS_EL1_IRQ
, irqflags
,
367 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_VIRT_IRQ
, irqflags
,
368 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_NS_EL2_IRQ
, irqflags
);
371 static void fdt_add_cpu_nodes(const VirtMachineState
*vms
)
375 const MachineState
*ms
= MACHINE(vms
);
376 const VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
377 int smp_cpus
= ms
->smp
.cpus
;
380 * See Linux Documentation/devicetree/bindings/arm/cpus.yaml
381 * On ARM v8 64-bit systems value should be set to 2,
382 * that corresponds to the MPIDR_EL1 register size.
383 * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs
384 * in the system, #address-cells can be set to 1, since
385 * MPIDR_EL1[63:32] bits are not used for CPUs
388 * Here we actually don't know whether our system is 32- or 64-bit one.
389 * The simplest way to go is to examine affinity IDs of all our CPUs. If
390 * at least one of them has Aff3 populated, we set #address-cells to 2.
392 for (cpu
= 0; cpu
< smp_cpus
; cpu
++) {
393 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(cpu
));
395 if (armcpu
->mp_affinity
& ARM_AFF3_MASK
) {
401 qemu_fdt_add_subnode(ms
->fdt
, "/cpus");
402 qemu_fdt_setprop_cell(ms
->fdt
, "/cpus", "#address-cells", addr_cells
);
403 qemu_fdt_setprop_cell(ms
->fdt
, "/cpus", "#size-cells", 0x0);
405 for (cpu
= smp_cpus
- 1; cpu
>= 0; cpu
--) {
406 char *nodename
= g_strdup_printf("/cpus/cpu@%d", cpu
);
407 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(cpu
));
408 CPUState
*cs
= CPU(armcpu
);
410 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
411 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "cpu");
412 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
413 armcpu
->dtb_compatible
);
415 if (vms
->psci_conduit
!= QEMU_PSCI_CONDUIT_DISABLED
&& smp_cpus
> 1) {
416 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
417 "enable-method", "psci");
420 if (addr_cells
== 2) {
421 qemu_fdt_setprop_u64(ms
->fdt
, nodename
, "reg",
422 armcpu
->mp_affinity
);
424 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "reg",
425 armcpu
->mp_affinity
);
428 if (ms
->possible_cpus
->cpus
[cs
->cpu_index
].props
.has_node_id
) {
429 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "numa-node-id",
430 ms
->possible_cpus
->cpus
[cs
->cpu_index
].props
.node_id
);
433 if (!vmc
->no_cpu_topology
) {
434 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle",
435 qemu_fdt_alloc_phandle(ms
->fdt
));
441 if (!vmc
->no_cpu_topology
) {
443 * Add vCPU topology description through fdt node cpu-map.
445 * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt
446 * In a SMP system, the hierarchy of CPUs can be defined through
447 * four entities that are used to describe the layout of CPUs in
448 * the system: socket/cluster/core/thread.
450 * A socket node represents the boundary of system physical package
451 * and its child nodes must be one or more cluster nodes. A system
452 * can contain several layers of clustering within a single physical
453 * package and cluster nodes can be contained in parent cluster nodes.
455 * Note: currently we only support one layer of clustering within
456 * each physical package.
458 qemu_fdt_add_subnode(ms
->fdt
, "/cpus/cpu-map");
460 for (cpu
= smp_cpus
- 1; cpu
>= 0; cpu
--) {
461 char *cpu_path
= g_strdup_printf("/cpus/cpu@%d", cpu
);
464 if (ms
->smp
.threads
> 1) {
465 map_path
= g_strdup_printf(
466 "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
467 cpu
/ (ms
->smp
.clusters
* ms
->smp
.cores
* ms
->smp
.threads
),
468 (cpu
/ (ms
->smp
.cores
* ms
->smp
.threads
)) % ms
->smp
.clusters
,
469 (cpu
/ ms
->smp
.threads
) % ms
->smp
.cores
,
470 cpu
% ms
->smp
.threads
);
472 map_path
= g_strdup_printf(
473 "/cpus/cpu-map/socket%d/cluster%d/core%d",
474 cpu
/ (ms
->smp
.clusters
* ms
->smp
.cores
),
475 (cpu
/ ms
->smp
.cores
) % ms
->smp
.clusters
,
476 cpu
% ms
->smp
.cores
);
478 qemu_fdt_add_path(ms
->fdt
, map_path
);
479 qemu_fdt_setprop_phandle(ms
->fdt
, map_path
, "cpu", cpu_path
);
487 static void fdt_add_its_gic_node(VirtMachineState
*vms
)
490 MachineState
*ms
= MACHINE(vms
);
492 vms
->msi_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
493 nodename
= g_strdup_printf("/intc/its@%" PRIx64
,
494 vms
->memmap
[VIRT_GIC_ITS
].base
);
495 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
496 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
498 qemu_fdt_setprop(ms
->fdt
, nodename
, "msi-controller", NULL
, 0);
499 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#msi-cells", 1);
500 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
501 2, vms
->memmap
[VIRT_GIC_ITS
].base
,
502 2, vms
->memmap
[VIRT_GIC_ITS
].size
);
503 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->msi_phandle
);
507 static void fdt_add_v2m_gic_node(VirtMachineState
*vms
)
509 MachineState
*ms
= MACHINE(vms
);
512 nodename
= g_strdup_printf("/intc/v2m@%" PRIx64
,
513 vms
->memmap
[VIRT_GIC_V2M
].base
);
514 vms
->msi_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
515 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
516 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
517 "arm,gic-v2m-frame");
518 qemu_fdt_setprop(ms
->fdt
, nodename
, "msi-controller", NULL
, 0);
519 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
520 2, vms
->memmap
[VIRT_GIC_V2M
].base
,
521 2, vms
->memmap
[VIRT_GIC_V2M
].size
);
522 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->msi_phandle
);
526 static void fdt_add_gic_node(VirtMachineState
*vms
)
528 MachineState
*ms
= MACHINE(vms
);
531 vms
->gic_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
532 qemu_fdt_setprop_cell(ms
->fdt
, "/", "interrupt-parent", vms
->gic_phandle
);
534 nodename
= g_strdup_printf("/intc@%" PRIx64
,
535 vms
->memmap
[VIRT_GIC_DIST
].base
);
536 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
537 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#interrupt-cells", 3);
538 qemu_fdt_setprop(ms
->fdt
, nodename
, "interrupt-controller", NULL
, 0);
539 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#address-cells", 0x2);
540 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#size-cells", 0x2);
541 qemu_fdt_setprop(ms
->fdt
, nodename
, "ranges", NULL
, 0);
542 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
543 int nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
545 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
548 qemu_fdt_setprop_cell(ms
->fdt
, nodename
,
549 "#redistributor-regions", nb_redist_regions
);
551 if (nb_redist_regions
== 1) {
552 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
553 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
554 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
555 2, vms
->memmap
[VIRT_GIC_REDIST
].base
,
556 2, vms
->memmap
[VIRT_GIC_REDIST
].size
);
558 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
559 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
560 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
561 2, vms
->memmap
[VIRT_GIC_REDIST
].base
,
562 2, vms
->memmap
[VIRT_GIC_REDIST
].size
,
563 2, vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].base
,
564 2, vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].size
);
568 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
569 GIC_FDT_IRQ_TYPE_PPI
, ARCH_GIC_MAINT_IRQ
,
570 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
573 /* 'cortex-a15-gic' means 'GIC v2' */
574 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
575 "arm,cortex-a15-gic");
577 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
578 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
579 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
580 2, vms
->memmap
[VIRT_GIC_CPU
].base
,
581 2, vms
->memmap
[VIRT_GIC_CPU
].size
);
583 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
584 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
585 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
586 2, vms
->memmap
[VIRT_GIC_CPU
].base
,
587 2, vms
->memmap
[VIRT_GIC_CPU
].size
,
588 2, vms
->memmap
[VIRT_GIC_HYP
].base
,
589 2, vms
->memmap
[VIRT_GIC_HYP
].size
,
590 2, vms
->memmap
[VIRT_GIC_VCPU
].base
,
591 2, vms
->memmap
[VIRT_GIC_VCPU
].size
);
592 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
593 GIC_FDT_IRQ_TYPE_PPI
, ARCH_GIC_MAINT_IRQ
,
594 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
598 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->gic_phandle
);
602 static void fdt_add_pmu_nodes(const VirtMachineState
*vms
)
604 ARMCPU
*armcpu
= ARM_CPU(first_cpu
);
605 uint32_t irqflags
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
606 MachineState
*ms
= MACHINE(vms
);
608 if (!arm_feature(&armcpu
->env
, ARM_FEATURE_PMU
)) {
609 assert(!object_property_get_bool(OBJECT(armcpu
), "pmu", NULL
));
613 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
614 irqflags
= deposit32(irqflags
, GIC_FDT_IRQ_PPI_CPU_START
,
615 GIC_FDT_IRQ_PPI_CPU_WIDTH
,
616 (1 << MACHINE(vms
)->smp
.cpus
) - 1);
619 qemu_fdt_add_subnode(ms
->fdt
, "/pmu");
620 if (arm_feature(&armcpu
->env
, ARM_FEATURE_V8
)) {
621 const char compat
[] = "arm,armv8-pmuv3";
622 qemu_fdt_setprop(ms
->fdt
, "/pmu", "compatible",
623 compat
, sizeof(compat
));
624 qemu_fdt_setprop_cells(ms
->fdt
, "/pmu", "interrupts",
625 GIC_FDT_IRQ_TYPE_PPI
, VIRTUAL_PMU_IRQ
, irqflags
);
629 static inline DeviceState
*create_acpi_ged(VirtMachineState
*vms
)
632 MachineState
*ms
= MACHINE(vms
);
633 int irq
= vms
->irqmap
[VIRT_ACPI_GED
];
634 uint32_t event
= ACPI_GED_PWR_DOWN_EVT
;
637 event
|= ACPI_GED_MEM_HOTPLUG_EVT
;
640 if (ms
->nvdimms_state
->is_enabled
) {
641 event
|= ACPI_GED_NVDIMM_HOTPLUG_EVT
;
644 dev
= qdev_new(TYPE_ACPI_GED
);
645 qdev_prop_set_uint32(dev
, "ged-event", event
);
647 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_ACPI_GED
].base
);
648 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 1, vms
->memmap
[VIRT_PCDIMM_ACPI
].base
);
649 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), 0, qdev_get_gpio_in(vms
->gic
, irq
));
651 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
656 static void create_its(VirtMachineState
*vms
)
658 const char *itsclass
= its_class_name();
661 if (!strcmp(itsclass
, "arm-gicv3-its")) {
668 /* Do nothing if not supported */
672 dev
= qdev_new(itsclass
);
674 object_property_set_link(OBJECT(dev
), "parent-gicv3", OBJECT(vms
->gic
),
676 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
677 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_GIC_ITS
].base
);
679 fdt_add_its_gic_node(vms
);
680 vms
->msi_controller
= VIRT_MSI_CTRL_ITS
;
683 static void create_v2m(VirtMachineState
*vms
)
686 int irq
= vms
->irqmap
[VIRT_GIC_V2M
];
689 dev
= qdev_new("arm-gicv2m");
690 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_GIC_V2M
].base
);
691 qdev_prop_set_uint32(dev
, "base-spi", irq
);
692 qdev_prop_set_uint32(dev
, "num-spi", NUM_GICV2M_SPIS
);
693 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
695 for (i
= 0; i
< NUM_GICV2M_SPIS
; i
++) {
696 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
697 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
700 fdt_add_v2m_gic_node(vms
);
701 vms
->msi_controller
= VIRT_MSI_CTRL_GICV2M
;
704 static void create_gic(VirtMachineState
*vms
, MemoryRegion
*mem
)
706 MachineState
*ms
= MACHINE(vms
);
707 /* We create a standalone GIC */
708 SysBusDevice
*gicbusdev
;
711 unsigned int smp_cpus
= ms
->smp
.cpus
;
712 uint32_t nb_redist_regions
= 0;
715 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
716 gictype
= gic_class_name();
718 gictype
= gicv3_class_name();
721 switch (vms
->gic_version
) {
722 case VIRT_GIC_VERSION_2
:
725 case VIRT_GIC_VERSION_3
:
728 case VIRT_GIC_VERSION_4
:
732 g_assert_not_reached();
734 vms
->gic
= qdev_new(gictype
);
735 qdev_prop_set_uint32(vms
->gic
, "revision", revision
);
736 qdev_prop_set_uint32(vms
->gic
, "num-cpu", smp_cpus
);
737 /* Note that the num-irq property counts both internal and external
738 * interrupts; there are always 32 of the former (mandated by GIC spec).
740 qdev_prop_set_uint32(vms
->gic
, "num-irq", NUM_IRQS
+ 32);
741 if (!kvm_irqchip_in_kernel()) {
742 qdev_prop_set_bit(vms
->gic
, "has-security-extensions", vms
->secure
);
745 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
746 uint32_t redist0_capacity
= virt_redist_capacity(vms
, VIRT_GIC_REDIST
);
747 uint32_t redist0_count
= MIN(smp_cpus
, redist0_capacity
);
749 nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
751 qdev_prop_set_uint32(vms
->gic
, "len-redist-region-count",
753 qdev_prop_set_uint32(vms
->gic
, "redist-region-count[0]", redist0_count
);
755 if (!kvm_irqchip_in_kernel()) {
757 object_property_set_link(OBJECT(vms
->gic
), "sysmem",
758 OBJECT(mem
), &error_fatal
);
759 qdev_prop_set_bit(vms
->gic
, "has-lpi", true);
763 if (nb_redist_regions
== 2) {
764 uint32_t redist1_capacity
=
765 virt_redist_capacity(vms
, VIRT_HIGH_GIC_REDIST2
);
767 qdev_prop_set_uint32(vms
->gic
, "redist-region-count[1]",
768 MIN(smp_cpus
- redist0_count
, redist1_capacity
));
771 if (!kvm_irqchip_in_kernel()) {
772 qdev_prop_set_bit(vms
->gic
, "has-virtualization-extensions",
776 gicbusdev
= SYS_BUS_DEVICE(vms
->gic
);
777 sysbus_realize_and_unref(gicbusdev
, &error_fatal
);
778 sysbus_mmio_map(gicbusdev
, 0, vms
->memmap
[VIRT_GIC_DIST
].base
);
779 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
780 sysbus_mmio_map(gicbusdev
, 1, vms
->memmap
[VIRT_GIC_REDIST
].base
);
781 if (nb_redist_regions
== 2) {
782 sysbus_mmio_map(gicbusdev
, 2,
783 vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].base
);
786 sysbus_mmio_map(gicbusdev
, 1, vms
->memmap
[VIRT_GIC_CPU
].base
);
788 sysbus_mmio_map(gicbusdev
, 2, vms
->memmap
[VIRT_GIC_HYP
].base
);
789 sysbus_mmio_map(gicbusdev
, 3, vms
->memmap
[VIRT_GIC_VCPU
].base
);
793 /* Wire the outputs from each CPU's generic timer and the GICv3
794 * maintenance interrupt signal to the appropriate GIC PPI inputs,
795 * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
797 for (i
= 0; i
< smp_cpus
; i
++) {
798 DeviceState
*cpudev
= DEVICE(qemu_get_cpu(i
));
799 int ppibase
= NUM_IRQS
+ i
* GIC_INTERNAL
+ GIC_NR_SGIS
;
801 /* Mapping from the output timer irq lines from the CPU to the
802 * GIC PPI inputs we use for the virt board.
804 const int timer_irq
[] = {
805 [GTIMER_PHYS
] = ARCH_TIMER_NS_EL1_IRQ
,
806 [GTIMER_VIRT
] = ARCH_TIMER_VIRT_IRQ
,
807 [GTIMER_HYP
] = ARCH_TIMER_NS_EL2_IRQ
,
808 [GTIMER_SEC
] = ARCH_TIMER_S_EL1_IRQ
,
811 for (irq
= 0; irq
< ARRAY_SIZE(timer_irq
); irq
++) {
812 qdev_connect_gpio_out(cpudev
, irq
,
813 qdev_get_gpio_in(vms
->gic
,
814 ppibase
+ timer_irq
[irq
]));
817 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
818 qemu_irq irq
= qdev_get_gpio_in(vms
->gic
,
819 ppibase
+ ARCH_GIC_MAINT_IRQ
);
820 qdev_connect_gpio_out_named(cpudev
, "gicv3-maintenance-interrupt",
822 } else if (vms
->virt
) {
823 qemu_irq irq
= qdev_get_gpio_in(vms
->gic
,
824 ppibase
+ ARCH_GIC_MAINT_IRQ
);
825 sysbus_connect_irq(gicbusdev
, i
+ 4 * smp_cpus
, irq
);
828 qdev_connect_gpio_out_named(cpudev
, "pmu-interrupt", 0,
829 qdev_get_gpio_in(vms
->gic
, ppibase
832 sysbus_connect_irq(gicbusdev
, i
, qdev_get_gpio_in(cpudev
, ARM_CPU_IRQ
));
833 sysbus_connect_irq(gicbusdev
, i
+ smp_cpus
,
834 qdev_get_gpio_in(cpudev
, ARM_CPU_FIQ
));
835 sysbus_connect_irq(gicbusdev
, i
+ 2 * smp_cpus
,
836 qdev_get_gpio_in(cpudev
, ARM_CPU_VIRQ
));
837 sysbus_connect_irq(gicbusdev
, i
+ 3 * smp_cpus
,
838 qdev_get_gpio_in(cpudev
, ARM_CPU_VFIQ
));
841 fdt_add_gic_node(vms
);
843 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
&& vms
->its
) {
845 } else if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
850 static void create_uart(const VirtMachineState
*vms
, int uart
,
851 MemoryRegion
*mem
, Chardev
*chr
)
854 hwaddr base
= vms
->memmap
[uart
].base
;
855 hwaddr size
= vms
->memmap
[uart
].size
;
856 int irq
= vms
->irqmap
[uart
];
857 const char compat
[] = "arm,pl011\0arm,primecell";
858 const char clocknames
[] = "uartclk\0apb_pclk";
859 DeviceState
*dev
= qdev_new(TYPE_PL011
);
860 SysBusDevice
*s
= SYS_BUS_DEVICE(dev
);
861 MachineState
*ms
= MACHINE(vms
);
863 qdev_prop_set_chr(dev
, "chardev", chr
);
864 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
865 memory_region_add_subregion(mem
, base
,
866 sysbus_mmio_get_region(s
, 0));
867 sysbus_connect_irq(s
, 0, qdev_get_gpio_in(vms
->gic
, irq
));
869 nodename
= g_strdup_printf("/pl011@%" PRIx64
, base
);
870 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
871 /* Note that we can't use setprop_string because of the embedded NUL */
872 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible",
873 compat
, sizeof(compat
));
874 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
876 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
877 GIC_FDT_IRQ_TYPE_SPI
, irq
,
878 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
879 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "clocks",
880 vms
->clock_phandle
, vms
->clock_phandle
);
881 qemu_fdt_setprop(ms
->fdt
, nodename
, "clock-names",
882 clocknames
, sizeof(clocknames
));
884 if (uart
== VIRT_UART
) {
885 qemu_fdt_setprop_string(ms
->fdt
, "/chosen", "stdout-path", nodename
);
887 /* Mark as not usable by the normal world */
888 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
889 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
891 qemu_fdt_setprop_string(ms
->fdt
, "/secure-chosen", "stdout-path",
898 static void create_rtc(const VirtMachineState
*vms
)
901 hwaddr base
= vms
->memmap
[VIRT_RTC
].base
;
902 hwaddr size
= vms
->memmap
[VIRT_RTC
].size
;
903 int irq
= vms
->irqmap
[VIRT_RTC
];
904 const char compat
[] = "arm,pl031\0arm,primecell";
905 MachineState
*ms
= MACHINE(vms
);
907 sysbus_create_simple("pl031", base
, qdev_get_gpio_in(vms
->gic
, irq
));
909 nodename
= g_strdup_printf("/pl031@%" PRIx64
, base
);
910 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
911 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible", compat
, sizeof(compat
));
912 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
914 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
915 GIC_FDT_IRQ_TYPE_SPI
, irq
,
916 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
917 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "clocks", vms
->clock_phandle
);
918 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "clock-names", "apb_pclk");
922 static DeviceState
*gpio_key_dev
;
923 static void virt_powerdown_req(Notifier
*n
, void *opaque
)
925 VirtMachineState
*s
= container_of(n
, VirtMachineState
, powerdown_notifier
);
928 acpi_send_event(s
->acpi_dev
, ACPI_POWER_DOWN_STATUS
);
930 /* use gpio Pin 3 for power button event */
931 qemu_set_irq(qdev_get_gpio_in(gpio_key_dev
, 0), 1);
935 static void create_gpio_keys(char *fdt
, DeviceState
*pl061_dev
,
938 gpio_key_dev
= sysbus_create_simple("gpio-key", -1,
939 qdev_get_gpio_in(pl061_dev
, 3));
941 qemu_fdt_add_subnode(fdt
, "/gpio-keys");
942 qemu_fdt_setprop_string(fdt
, "/gpio-keys", "compatible", "gpio-keys");
944 qemu_fdt_add_subnode(fdt
, "/gpio-keys/poweroff");
945 qemu_fdt_setprop_string(fdt
, "/gpio-keys/poweroff",
946 "label", "GPIO Key Poweroff");
947 qemu_fdt_setprop_cell(fdt
, "/gpio-keys/poweroff", "linux,code",
949 qemu_fdt_setprop_cells(fdt
, "/gpio-keys/poweroff",
950 "gpios", phandle
, 3, 0);
953 #define SECURE_GPIO_POWEROFF 0
954 #define SECURE_GPIO_RESET 1
956 static void create_secure_gpio_pwr(char *fdt
, DeviceState
*pl061_dev
,
959 DeviceState
*gpio_pwr_dev
;
962 gpio_pwr_dev
= sysbus_create_simple("gpio-pwr", -1, NULL
);
964 /* connect secure pl061 to gpio-pwr */
965 qdev_connect_gpio_out(pl061_dev
, SECURE_GPIO_RESET
,
966 qdev_get_gpio_in_named(gpio_pwr_dev
, "reset", 0));
967 qdev_connect_gpio_out(pl061_dev
, SECURE_GPIO_POWEROFF
,
968 qdev_get_gpio_in_named(gpio_pwr_dev
, "shutdown", 0));
970 qemu_fdt_add_subnode(fdt
, "/gpio-poweroff");
971 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "compatible",
973 qemu_fdt_setprop_cells(fdt
, "/gpio-poweroff",
974 "gpios", phandle
, SECURE_GPIO_POWEROFF
, 0);
975 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "status", "disabled");
976 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "secure-status",
979 qemu_fdt_add_subnode(fdt
, "/gpio-restart");
980 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "compatible",
982 qemu_fdt_setprop_cells(fdt
, "/gpio-restart",
983 "gpios", phandle
, SECURE_GPIO_RESET
, 0);
984 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "status", "disabled");
985 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "secure-status",
989 static void create_gpio_devices(const VirtMachineState
*vms
, int gpio
,
993 DeviceState
*pl061_dev
;
994 hwaddr base
= vms
->memmap
[gpio
].base
;
995 hwaddr size
= vms
->memmap
[gpio
].size
;
996 int irq
= vms
->irqmap
[gpio
];
997 const char compat
[] = "arm,pl061\0arm,primecell";
999 MachineState
*ms
= MACHINE(vms
);
1001 pl061_dev
= qdev_new("pl061");
1002 /* Pull lines down to 0 if not driven by the PL061 */
1003 qdev_prop_set_uint32(pl061_dev
, "pullups", 0);
1004 qdev_prop_set_uint32(pl061_dev
, "pulldowns", 0xff);
1005 s
= SYS_BUS_DEVICE(pl061_dev
);
1006 sysbus_realize_and_unref(s
, &error_fatal
);
1007 memory_region_add_subregion(mem
, base
, sysbus_mmio_get_region(s
, 0));
1008 sysbus_connect_irq(s
, 0, qdev_get_gpio_in(vms
->gic
, irq
));
1010 uint32_t phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1011 nodename
= g_strdup_printf("/pl061@%" PRIx64
, base
);
1012 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1013 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1015 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible", compat
, sizeof(compat
));
1016 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#gpio-cells", 2);
1017 qemu_fdt_setprop(ms
->fdt
, nodename
, "gpio-controller", NULL
, 0);
1018 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
1019 GIC_FDT_IRQ_TYPE_SPI
, irq
,
1020 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
1021 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "clocks", vms
->clock_phandle
);
1022 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "clock-names", "apb_pclk");
1023 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", phandle
);
1025 if (gpio
!= VIRT_GPIO
) {
1026 /* Mark as not usable by the normal world */
1027 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1028 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1032 /* Child gpio devices */
1033 if (gpio
== VIRT_GPIO
) {
1034 create_gpio_keys(ms
->fdt
, pl061_dev
, phandle
);
1036 create_secure_gpio_pwr(ms
->fdt
, pl061_dev
, phandle
);
1040 static void create_virtio_devices(const VirtMachineState
*vms
)
1043 hwaddr size
= vms
->memmap
[VIRT_MMIO
].size
;
1044 MachineState
*ms
= MACHINE(vms
);
1046 /* We create the transports in forwards order. Since qbus_realize()
1047 * prepends (not appends) new child buses, the incrementing loop below will
1048 * create a list of virtio-mmio buses with decreasing base addresses.
1050 * When a -device option is processed from the command line,
1051 * qbus_find_recursive() picks the next free virtio-mmio bus in forwards
1052 * order. The upshot is that -device options in increasing command line
1053 * order are mapped to virtio-mmio buses with decreasing base addresses.
1055 * When this code was originally written, that arrangement ensured that the
1056 * guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to
1057 * the first -device on the command line. (The end-to-end order is a
1058 * function of this loop, qbus_realize(), qbus_find_recursive(), and the
1059 * guest kernel's name-to-address assignment strategy.)
1061 * Meanwhile, the kernel's traversal seems to have been reversed; see eg.
1062 * the message, if not necessarily the code, of commit 70161ff336.
1063 * Therefore the loop now establishes the inverse of the original intent.
1065 * Unfortunately, we can't counteract the kernel change by reversing the
1066 * loop; it would break existing command lines.
1068 * In any case, the kernel makes no guarantee about the stability of
1069 * enumeration order of virtio devices (as demonstrated by it changing
1070 * between kernel versions). For reliable and stable identification
1071 * of disks users must use UUIDs or similar mechanisms.
1073 for (i
= 0; i
< NUM_VIRTIO_TRANSPORTS
; i
++) {
1074 int irq
= vms
->irqmap
[VIRT_MMIO
] + i
;
1075 hwaddr base
= vms
->memmap
[VIRT_MMIO
].base
+ i
* size
;
1077 sysbus_create_simple("virtio-mmio", base
,
1078 qdev_get_gpio_in(vms
->gic
, irq
));
1081 /* We add dtb nodes in reverse order so that they appear in the finished
1082 * device tree lowest address first.
1084 * Note that this mapping is independent of the loop above. The previous
1085 * loop influences virtio device to virtio transport assignment, whereas
1086 * this loop controls how virtio transports are laid out in the dtb.
1088 for (i
= NUM_VIRTIO_TRANSPORTS
- 1; i
>= 0; i
--) {
1090 int irq
= vms
->irqmap
[VIRT_MMIO
] + i
;
1091 hwaddr base
= vms
->memmap
[VIRT_MMIO
].base
+ i
* size
;
1093 nodename
= g_strdup_printf("/virtio_mmio@%" PRIx64
, base
);
1094 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1095 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1096 "compatible", "virtio,mmio");
1097 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1099 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
1100 GIC_FDT_IRQ_TYPE_SPI
, irq
,
1101 GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
);
1102 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1107 #define VIRT_FLASH_SECTOR_SIZE (256 * KiB)
1109 static PFlashCFI01
*virt_flash_create1(VirtMachineState
*vms
,
1111 const char *alias_prop_name
)
1114 * Create a single flash device. We use the same parameters as
1115 * the flash devices on the Versatile Express board.
1117 DeviceState
*dev
= qdev_new(TYPE_PFLASH_CFI01
);
1119 qdev_prop_set_uint64(dev
, "sector-length", VIRT_FLASH_SECTOR_SIZE
);
1120 qdev_prop_set_uint8(dev
, "width", 4);
1121 qdev_prop_set_uint8(dev
, "device-width", 2);
1122 qdev_prop_set_bit(dev
, "big-endian", false);
1123 qdev_prop_set_uint16(dev
, "id0", 0x89);
1124 qdev_prop_set_uint16(dev
, "id1", 0x18);
1125 qdev_prop_set_uint16(dev
, "id2", 0x00);
1126 qdev_prop_set_uint16(dev
, "id3", 0x00);
1127 qdev_prop_set_string(dev
, "name", name
);
1128 object_property_add_child(OBJECT(vms
), name
, OBJECT(dev
));
1129 object_property_add_alias(OBJECT(vms
), alias_prop_name
,
1130 OBJECT(dev
), "drive");
1131 return PFLASH_CFI01(dev
);
1134 static void virt_flash_create(VirtMachineState
*vms
)
1136 vms
->flash
[0] = virt_flash_create1(vms
, "virt.flash0", "pflash0");
1137 vms
->flash
[1] = virt_flash_create1(vms
, "virt.flash1", "pflash1");
1140 static void virt_flash_map1(PFlashCFI01
*flash
,
1141 hwaddr base
, hwaddr size
,
1142 MemoryRegion
*sysmem
)
1144 DeviceState
*dev
= DEVICE(flash
);
1146 assert(QEMU_IS_ALIGNED(size
, VIRT_FLASH_SECTOR_SIZE
));
1147 assert(size
/ VIRT_FLASH_SECTOR_SIZE
<= UINT32_MAX
);
1148 qdev_prop_set_uint32(dev
, "num-blocks", size
/ VIRT_FLASH_SECTOR_SIZE
);
1149 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1151 memory_region_add_subregion(sysmem
, base
,
1152 sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
),
1156 static void virt_flash_map(VirtMachineState
*vms
,
1157 MemoryRegion
*sysmem
,
1158 MemoryRegion
*secure_sysmem
)
1161 * Map two flash devices to fill the VIRT_FLASH space in the memmap.
1162 * sysmem is the system memory space. secure_sysmem is the secure view
1163 * of the system, and the first flash device should be made visible only
1164 * there. The second flash device is visible to both secure and nonsecure.
1165 * If sysmem == secure_sysmem this means there is no separate Secure
1166 * address space and both flash devices are generally visible.
1168 hwaddr flashsize
= vms
->memmap
[VIRT_FLASH
].size
/ 2;
1169 hwaddr flashbase
= vms
->memmap
[VIRT_FLASH
].base
;
1171 virt_flash_map1(vms
->flash
[0], flashbase
, flashsize
,
1173 virt_flash_map1(vms
->flash
[1], flashbase
+ flashsize
, flashsize
,
1177 static void virt_flash_fdt(VirtMachineState
*vms
,
1178 MemoryRegion
*sysmem
,
1179 MemoryRegion
*secure_sysmem
)
1181 hwaddr flashsize
= vms
->memmap
[VIRT_FLASH
].size
/ 2;
1182 hwaddr flashbase
= vms
->memmap
[VIRT_FLASH
].base
;
1183 MachineState
*ms
= MACHINE(vms
);
1186 if (sysmem
== secure_sysmem
) {
1187 /* Report both flash devices as a single node in the DT */
1188 nodename
= g_strdup_printf("/flash@%" PRIx64
, flashbase
);
1189 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1190 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1191 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1192 2, flashbase
, 2, flashsize
,
1193 2, flashbase
+ flashsize
, 2, flashsize
);
1194 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1198 * Report the devices as separate nodes so we can mark one as
1199 * only visible to the secure world.
1201 nodename
= g_strdup_printf("/secflash@%" PRIx64
, flashbase
);
1202 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1203 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1204 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1205 2, flashbase
, 2, flashsize
);
1206 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1207 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1208 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1211 nodename
= g_strdup_printf("/flash@%" PRIx64
, flashbase
+ flashsize
);
1212 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1213 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1214 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1215 2, flashbase
+ flashsize
, 2, flashsize
);
1216 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1221 static bool virt_firmware_init(VirtMachineState
*vms
,
1222 MemoryRegion
*sysmem
,
1223 MemoryRegion
*secure_sysmem
)
1226 const char *bios_name
;
1227 BlockBackend
*pflash_blk0
;
1229 /* Map legacy -drive if=pflash to machine properties */
1230 for (i
= 0; i
< ARRAY_SIZE(vms
->flash
); i
++) {
1231 pflash_cfi01_legacy_drive(vms
->flash
[i
],
1232 drive_get(IF_PFLASH
, 0, i
));
1235 virt_flash_map(vms
, sysmem
, secure_sysmem
);
1237 pflash_blk0
= pflash_cfi01_get_blk(vms
->flash
[0]);
1239 bios_name
= MACHINE(vms
)->firmware
;
1246 error_report("The contents of the first flash device may be "
1247 "specified with -bios or with -drive if=pflash... "
1248 "but you cannot use both options at once");
1252 /* Fall back to -bios */
1254 fname
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
1256 error_report("Could not find ROM image '%s'", bios_name
);
1259 mr
= sysbus_mmio_get_region(SYS_BUS_DEVICE(vms
->flash
[0]), 0);
1260 image_size
= load_image_mr(fname
, mr
);
1262 if (image_size
< 0) {
1263 error_report("Could not load ROM image '%s'", bios_name
);
1268 return pflash_blk0
|| bios_name
;
1271 static FWCfgState
*create_fw_cfg(const VirtMachineState
*vms
, AddressSpace
*as
)
1273 MachineState
*ms
= MACHINE(vms
);
1274 hwaddr base
= vms
->memmap
[VIRT_FW_CFG
].base
;
1275 hwaddr size
= vms
->memmap
[VIRT_FW_CFG
].size
;
1279 fw_cfg
= fw_cfg_init_mem_wide(base
+ 8, base
, 8, base
+ 16, as
);
1280 fw_cfg_add_i16(fw_cfg
, FW_CFG_NB_CPUS
, (uint16_t)ms
->smp
.cpus
);
1282 nodename
= g_strdup_printf("/fw-cfg@%" PRIx64
, base
);
1283 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1284 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1285 "compatible", "qemu,fw-cfg-mmio");
1286 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1288 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1293 static void create_pcie_irq_map(const MachineState
*ms
,
1294 uint32_t gic_phandle
,
1295 int first_irq
, const char *nodename
)
1298 uint32_t full_irq_map
[4 * 4 * 10] = { 0 };
1299 uint32_t *irq_map
= full_irq_map
;
1301 for (devfn
= 0; devfn
<= 0x18; devfn
+= 0x8) {
1302 for (pin
= 0; pin
< 4; pin
++) {
1303 int irq_type
= GIC_FDT_IRQ_TYPE_SPI
;
1304 int irq_nr
= first_irq
+ ((pin
+ PCI_SLOT(devfn
)) % PCI_NUM_PINS
);
1305 int irq_level
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
1309 devfn
<< 8, 0, 0, /* devfn */
1310 pin
+ 1, /* PCI pin */
1311 gic_phandle
, 0, 0, irq_type
, irq_nr
, irq_level
}; /* GIC irq */
1313 /* Convert map to big endian */
1314 for (i
= 0; i
< 10; i
++) {
1315 irq_map
[i
] = cpu_to_be32(map
[i
]);
1321 qemu_fdt_setprop(ms
->fdt
, nodename
, "interrupt-map",
1322 full_irq_map
, sizeof(full_irq_map
));
1324 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupt-map-mask",
1325 cpu_to_be16(PCI_DEVFN(3, 0)), /* Slot 3 */
1330 static void create_smmu(const VirtMachineState
*vms
,
1334 const char compat
[] = "arm,smmu-v3";
1335 int irq
= vms
->irqmap
[VIRT_SMMU
];
1337 hwaddr base
= vms
->memmap
[VIRT_SMMU
].base
;
1338 hwaddr size
= vms
->memmap
[VIRT_SMMU
].size
;
1339 const char irq_names
[] = "eventq\0priq\0cmdq-sync\0gerror";
1341 MachineState
*ms
= MACHINE(vms
);
1343 if (vms
->iommu
!= VIRT_IOMMU_SMMUV3
|| !vms
->iommu_phandle
) {
1347 dev
= qdev_new(TYPE_ARM_SMMUV3
);
1349 object_property_set_link(OBJECT(dev
), "primary-bus", OBJECT(bus
),
1351 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1352 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, base
);
1353 for (i
= 0; i
< NUM_SMMU_IRQS
; i
++) {
1354 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
1355 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
1358 node
= g_strdup_printf("/smmuv3@%" PRIx64
, base
);
1359 qemu_fdt_add_subnode(ms
->fdt
, node
);
1360 qemu_fdt_setprop(ms
->fdt
, node
, "compatible", compat
, sizeof(compat
));
1361 qemu_fdt_setprop_sized_cells(ms
->fdt
, node
, "reg", 2, base
, 2, size
);
1363 qemu_fdt_setprop_cells(ms
->fdt
, node
, "interrupts",
1364 GIC_FDT_IRQ_TYPE_SPI
, irq
, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1365 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1366 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1367 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
);
1369 qemu_fdt_setprop(ms
->fdt
, node
, "interrupt-names", irq_names
,
1372 qemu_fdt_setprop(ms
->fdt
, node
, "dma-coherent", NULL
, 0);
1374 qemu_fdt_setprop_cell(ms
->fdt
, node
, "#iommu-cells", 1);
1376 qemu_fdt_setprop_cell(ms
->fdt
, node
, "phandle", vms
->iommu_phandle
);
1380 static void create_virtio_iommu_dt_bindings(VirtMachineState
*vms
)
1382 const char compat
[] = "virtio,pci-iommu\0pci1af4,1057";
1383 uint16_t bdf
= vms
->virtio_iommu_bdf
;
1384 MachineState
*ms
= MACHINE(vms
);
1387 vms
->iommu_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1389 node
= g_strdup_printf("%s/virtio_iommu@%x,%x", vms
->pciehb_nodename
,
1390 PCI_SLOT(bdf
), PCI_FUNC(bdf
));
1391 qemu_fdt_add_subnode(ms
->fdt
, node
);
1392 qemu_fdt_setprop(ms
->fdt
, node
, "compatible", compat
, sizeof(compat
));
1393 qemu_fdt_setprop_sized_cells(ms
->fdt
, node
, "reg",
1394 1, bdf
<< 8, 1, 0, 1, 0,
1397 qemu_fdt_setprop_cell(ms
->fdt
, node
, "#iommu-cells", 1);
1398 qemu_fdt_setprop_cell(ms
->fdt
, node
, "phandle", vms
->iommu_phandle
);
1401 qemu_fdt_setprop_cells(ms
->fdt
, vms
->pciehb_nodename
, "iommu-map",
1402 0x0, vms
->iommu_phandle
, 0x0, bdf
,
1403 bdf
+ 1, vms
->iommu_phandle
, bdf
+ 1, 0xffff - bdf
);
1406 static void create_pcie(VirtMachineState
*vms
)
1408 hwaddr base_mmio
= vms
->memmap
[VIRT_PCIE_MMIO
].base
;
1409 hwaddr size_mmio
= vms
->memmap
[VIRT_PCIE_MMIO
].size
;
1410 hwaddr base_mmio_high
= vms
->memmap
[VIRT_HIGH_PCIE_MMIO
].base
;
1411 hwaddr size_mmio_high
= vms
->memmap
[VIRT_HIGH_PCIE_MMIO
].size
;
1412 hwaddr base_pio
= vms
->memmap
[VIRT_PCIE_PIO
].base
;
1413 hwaddr size_pio
= vms
->memmap
[VIRT_PCIE_PIO
].size
;
1414 hwaddr base_ecam
, size_ecam
;
1415 hwaddr base
= base_mmio
;
1417 int irq
= vms
->irqmap
[VIRT_PCIE
];
1418 MemoryRegion
*mmio_alias
;
1419 MemoryRegion
*mmio_reg
;
1420 MemoryRegion
*ecam_alias
;
1421 MemoryRegion
*ecam_reg
;
1426 MachineState
*ms
= MACHINE(vms
);
1428 dev
= qdev_new(TYPE_GPEX_HOST
);
1429 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1431 ecam_id
= VIRT_ECAM_ID(vms
->highmem_ecam
);
1432 base_ecam
= vms
->memmap
[ecam_id
].base
;
1433 size_ecam
= vms
->memmap
[ecam_id
].size
;
1434 nr_pcie_buses
= size_ecam
/ PCIE_MMCFG_SIZE_MIN
;
1435 /* Map only the first size_ecam bytes of ECAM space */
1436 ecam_alias
= g_new0(MemoryRegion
, 1);
1437 ecam_reg
= sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
), 0);
1438 memory_region_init_alias(ecam_alias
, OBJECT(dev
), "pcie-ecam",
1439 ecam_reg
, 0, size_ecam
);
1440 memory_region_add_subregion(get_system_memory(), base_ecam
, ecam_alias
);
1442 /* Map the MMIO window into system address space so as to expose
1443 * the section of PCI MMIO space which starts at the same base address
1444 * (ie 1:1 mapping for that part of PCI MMIO space visible through
1447 mmio_alias
= g_new0(MemoryRegion
, 1);
1448 mmio_reg
= sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
), 1);
1449 memory_region_init_alias(mmio_alias
, OBJECT(dev
), "pcie-mmio",
1450 mmio_reg
, base_mmio
, size_mmio
);
1451 memory_region_add_subregion(get_system_memory(), base_mmio
, mmio_alias
);
1453 if (vms
->highmem_mmio
) {
1454 /* Map high MMIO space */
1455 MemoryRegion
*high_mmio_alias
= g_new0(MemoryRegion
, 1);
1457 memory_region_init_alias(high_mmio_alias
, OBJECT(dev
), "pcie-mmio-high",
1458 mmio_reg
, base_mmio_high
, size_mmio_high
);
1459 memory_region_add_subregion(get_system_memory(), base_mmio_high
,
1463 /* Map IO port space */
1464 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 2, base_pio
);
1466 for (i
= 0; i
< GPEX_NUM_IRQS
; i
++) {
1467 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
1468 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
1469 gpex_set_irq_num(GPEX_HOST(dev
), i
, irq
+ i
);
1472 pci
= PCI_HOST_BRIDGE(dev
);
1473 pci
->bypass_iommu
= vms
->default_bus_bypass_iommu
;
1474 vms
->bus
= pci
->bus
;
1476 for (i
= 0; i
< nb_nics
; i
++) {
1477 NICInfo
*nd
= &nd_table
[i
];
1480 nd
->model
= g_strdup("virtio");
1483 pci_nic_init_nofail(nd
, pci
->bus
, nd
->model
, NULL
);
1487 nodename
= vms
->pciehb_nodename
= g_strdup_printf("/pcie@%" PRIx64
, base
);
1488 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1489 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1490 "compatible", "pci-host-ecam-generic");
1491 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "pci");
1492 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#address-cells", 3);
1493 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#size-cells", 2);
1494 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "linux,pci-domain", 0);
1495 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "bus-range", 0,
1497 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1499 if (vms
->msi_phandle
) {
1500 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "msi-map",
1501 0, vms
->msi_phandle
, 0, 0x10000);
1504 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1505 2, base_ecam
, 2, size_ecam
);
1507 if (vms
->highmem_mmio
) {
1508 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "ranges",
1509 1, FDT_PCI_RANGE_IOPORT
, 2, 0,
1510 2, base_pio
, 2, size_pio
,
1511 1, FDT_PCI_RANGE_MMIO
, 2, base_mmio
,
1512 2, base_mmio
, 2, size_mmio
,
1513 1, FDT_PCI_RANGE_MMIO_64BIT
,
1515 2, base_mmio_high
, 2, size_mmio_high
);
1517 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "ranges",
1518 1, FDT_PCI_RANGE_IOPORT
, 2, 0,
1519 2, base_pio
, 2, size_pio
,
1520 1, FDT_PCI_RANGE_MMIO
, 2, base_mmio
,
1521 2, base_mmio
, 2, size_mmio
);
1524 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#interrupt-cells", 1);
1525 create_pcie_irq_map(ms
, vms
->gic_phandle
, irq
, nodename
);
1528 vms
->iommu_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1530 switch (vms
->iommu
) {
1531 case VIRT_IOMMU_SMMUV3
:
1532 create_smmu(vms
, vms
->bus
);
1533 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "iommu-map",
1534 0x0, vms
->iommu_phandle
, 0x0, 0x10000);
1537 g_assert_not_reached();
1542 static void create_platform_bus(VirtMachineState
*vms
)
1547 MemoryRegion
*sysmem
= get_system_memory();
1549 dev
= qdev_new(TYPE_PLATFORM_BUS_DEVICE
);
1550 dev
->id
= g_strdup(TYPE_PLATFORM_BUS_DEVICE
);
1551 qdev_prop_set_uint32(dev
, "num_irqs", PLATFORM_BUS_NUM_IRQS
);
1552 qdev_prop_set_uint32(dev
, "mmio_size", vms
->memmap
[VIRT_PLATFORM_BUS
].size
);
1553 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1554 vms
->platform_bus_dev
= dev
;
1556 s
= SYS_BUS_DEVICE(dev
);
1557 for (i
= 0; i
< PLATFORM_BUS_NUM_IRQS
; i
++) {
1558 int irq
= vms
->irqmap
[VIRT_PLATFORM_BUS
] + i
;
1559 sysbus_connect_irq(s
, i
, qdev_get_gpio_in(vms
->gic
, irq
));
1562 memory_region_add_subregion(sysmem
,
1563 vms
->memmap
[VIRT_PLATFORM_BUS
].base
,
1564 sysbus_mmio_get_region(s
, 0));
1567 static void create_tag_ram(MemoryRegion
*tag_sysmem
,
1568 hwaddr base
, hwaddr size
,
1571 MemoryRegion
*tagram
= g_new(MemoryRegion
, 1);
1573 memory_region_init_ram(tagram
, NULL
, name
, size
/ 32, &error_fatal
);
1574 memory_region_add_subregion(tag_sysmem
, base
/ 32, tagram
);
1577 static void create_secure_ram(VirtMachineState
*vms
,
1578 MemoryRegion
*secure_sysmem
,
1579 MemoryRegion
*secure_tag_sysmem
)
1581 MemoryRegion
*secram
= g_new(MemoryRegion
, 1);
1583 hwaddr base
= vms
->memmap
[VIRT_SECURE_MEM
].base
;
1584 hwaddr size
= vms
->memmap
[VIRT_SECURE_MEM
].size
;
1585 MachineState
*ms
= MACHINE(vms
);
1587 memory_region_init_ram(secram
, NULL
, "virt.secure-ram", size
,
1589 memory_region_add_subregion(secure_sysmem
, base
, secram
);
1591 nodename
= g_strdup_printf("/secram@%" PRIx64
, base
);
1592 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1593 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "memory");
1594 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg", 2, base
, 2, size
);
1595 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1596 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1598 if (secure_tag_sysmem
) {
1599 create_tag_ram(secure_tag_sysmem
, base
, size
, "mach-virt.secure-tag");
1605 static void *machvirt_dtb(const struct arm_boot_info
*binfo
, int *fdt_size
)
1607 const VirtMachineState
*board
= container_of(binfo
, VirtMachineState
,
1609 MachineState
*ms
= MACHINE(board
);
1612 *fdt_size
= board
->fdt_size
;
1616 static void virt_build_smbios(VirtMachineState
*vms
)
1618 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
1619 MachineState
*ms
= MACHINE(vms
);
1620 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
1621 uint8_t *smbios_tables
, *smbios_anchor
;
1622 size_t smbios_tables_len
, smbios_anchor_len
;
1623 struct smbios_phys_mem_area mem_array
;
1624 const char *product
= "QEMU Virtual Machine";
1626 if (kvm_enabled()) {
1627 product
= "KVM Virtual Machine";
1630 smbios_set_defaults("QEMU", product
,
1631 vmc
->smbios_old_sys_ver
? "1.0" : mc
->name
, false,
1632 true, SMBIOS_ENTRY_POINT_TYPE_64
);
1634 /* build the array of physical mem area from base_memmap */
1635 mem_array
.address
= vms
->memmap
[VIRT_MEM
].base
;
1636 mem_array
.length
= ms
->ram_size
;
1638 smbios_get_tables(ms
, &mem_array
, 1,
1639 &smbios_tables
, &smbios_tables_len
,
1640 &smbios_anchor
, &smbios_anchor_len
,
1643 if (smbios_anchor
) {
1644 fw_cfg_add_file(vms
->fw_cfg
, "etc/smbios/smbios-tables",
1645 smbios_tables
, smbios_tables_len
);
1646 fw_cfg_add_file(vms
->fw_cfg
, "etc/smbios/smbios-anchor",
1647 smbios_anchor
, smbios_anchor_len
);
1652 void virt_machine_done(Notifier
*notifier
, void *data
)
1654 VirtMachineState
*vms
= container_of(notifier
, VirtMachineState
,
1656 MachineState
*ms
= MACHINE(vms
);
1657 ARMCPU
*cpu
= ARM_CPU(first_cpu
);
1658 struct arm_boot_info
*info
= &vms
->bootinfo
;
1659 AddressSpace
*as
= arm_boot_address_space(cpu
, info
);
1662 * If the user provided a dtb, we assume the dynamic sysbus nodes
1663 * already are integrated there. This corresponds to a use case where
1664 * the dynamic sysbus nodes are complex and their generation is not yet
1665 * supported. In that case the user can take charge of the guest dt
1666 * while qemu takes charge of the qom stuff.
1668 if (info
->dtb_filename
== NULL
) {
1669 platform_bus_add_all_fdt_nodes(ms
->fdt
, "/intc",
1670 vms
->memmap
[VIRT_PLATFORM_BUS
].base
,
1671 vms
->memmap
[VIRT_PLATFORM_BUS
].size
,
1672 vms
->irqmap
[VIRT_PLATFORM_BUS
]);
1674 if (arm_load_dtb(info
->dtb_start
, info
, info
->dtb_limit
, as
, ms
) < 0) {
1678 fw_cfg_add_extra_pci_roots(vms
->bus
, vms
->fw_cfg
);
1680 virt_acpi_setup(vms
);
1681 virt_build_smbios(vms
);
1684 static uint64_t virt_cpu_mp_affinity(VirtMachineState
*vms
, int idx
)
1686 uint8_t clustersz
= ARM_DEFAULT_CPUS_PER_CLUSTER
;
1687 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
1689 if (!vmc
->disallow_affinity_adjustment
) {
1690 /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the
1691 * GIC's target-list limitations. 32-bit KVM hosts currently
1692 * always create clusters of 4 CPUs, but that is expected to
1693 * change when they gain support for gicv3. When KVM is enabled
1694 * it will override the changes we make here, therefore our
1695 * purposes are to make TCG consistent (with 64-bit KVM hosts)
1696 * and to improve SGI efficiency.
1698 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
1699 clustersz
= GIC_TARGETLIST_BITS
;
1701 clustersz
= GICV3_TARGETLIST_BITS
;
1704 return arm_cpu_mp_affinity(idx
, clustersz
);
1707 static inline bool *virt_get_high_memmap_enabled(VirtMachineState
*vms
,
1710 bool *enabled_array
[] = {
1711 &vms
->highmem_redists
,
1716 assert(ARRAY_SIZE(extended_memmap
) - VIRT_LOWMEMMAP_LAST
==
1717 ARRAY_SIZE(enabled_array
));
1718 assert(index
- VIRT_LOWMEMMAP_LAST
< ARRAY_SIZE(enabled_array
));
1720 return enabled_array
[index
- VIRT_LOWMEMMAP_LAST
];
1723 static void virt_set_high_memmap(VirtMachineState
*vms
,
1724 hwaddr base
, int pa_bits
)
1726 hwaddr region_base
, region_size
;
1727 bool *region_enabled
, fits
;
1730 for (i
= VIRT_LOWMEMMAP_LAST
; i
< ARRAY_SIZE(extended_memmap
); i
++) {
1731 region_enabled
= virt_get_high_memmap_enabled(vms
, i
);
1732 region_base
= ROUND_UP(base
, extended_memmap
[i
].size
);
1733 region_size
= extended_memmap
[i
].size
;
1735 vms
->memmap
[i
].base
= region_base
;
1736 vms
->memmap
[i
].size
= region_size
;
1739 * Check each device to see if it fits in the PA space,
1740 * moving highest_gpa as we go. For compatibility, move
1741 * highest_gpa for disabled fitting devices as well, if
1742 * the compact layout has been disabled.
1744 * For each device that doesn't fit, disable it.
1746 fits
= (region_base
+ region_size
) <= BIT_ULL(pa_bits
);
1747 *region_enabled
&= fits
;
1748 if (vms
->highmem_compact
&& !*region_enabled
) {
1752 base
= region_base
+ region_size
;
1754 vms
->highest_gpa
= base
- 1;
1759 static void virt_set_memmap(VirtMachineState
*vms
, int pa_bits
)
1761 MachineState
*ms
= MACHINE(vms
);
1762 hwaddr base
, device_memory_base
, device_memory_size
, memtop
;
1765 vms
->memmap
= extended_memmap
;
1767 for (i
= 0; i
< ARRAY_SIZE(base_memmap
); i
++) {
1768 vms
->memmap
[i
] = base_memmap
[i
];
1771 if (ms
->ram_slots
> ACPI_MAX_RAM_SLOTS
) {
1772 error_report("unsupported number of memory slots: %"PRIu64
,
1778 * !highmem is exactly the same as limiting the PA space to 32bit,
1779 * irrespective of the underlying capabilities of the HW.
1781 if (!vms
->highmem
) {
1786 * We compute the base of the high IO region depending on the
1787 * amount of initial and device memory. The device memory start/size
1788 * is aligned on 1GiB. We never put the high IO region below 256GiB
1789 * so that if maxram_size is < 255GiB we keep the legacy memory map.
1790 * The device region size assumes 1GiB page max alignment per slot.
1792 device_memory_base
=
1793 ROUND_UP(vms
->memmap
[VIRT_MEM
].base
+ ms
->ram_size
, GiB
);
1794 device_memory_size
= ms
->maxram_size
- ms
->ram_size
+ ms
->ram_slots
* GiB
;
1796 /* Base address of the high IO region */
1797 memtop
= base
= device_memory_base
+ ROUND_UP(device_memory_size
, GiB
);
1798 if (memtop
> BIT_ULL(pa_bits
)) {
1799 error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n",
1800 pa_bits
, memtop
- BIT_ULL(pa_bits
));
1803 if (base
< device_memory_base
) {
1804 error_report("maxmem/slots too huge");
1807 if (base
< vms
->memmap
[VIRT_MEM
].base
+ LEGACY_RAMLIMIT_BYTES
) {
1808 base
= vms
->memmap
[VIRT_MEM
].base
+ LEGACY_RAMLIMIT_BYTES
;
1811 /* We know for sure that at least the memory fits in the PA space */
1812 vms
->highest_gpa
= memtop
- 1;
1814 virt_set_high_memmap(vms
, base
, pa_bits
);
1816 if (device_memory_size
> 0) {
1817 ms
->device_memory
= g_malloc0(sizeof(*ms
->device_memory
));
1818 ms
->device_memory
->base
= device_memory_base
;
1819 memory_region_init(&ms
->device_memory
->mr
, OBJECT(vms
),
1820 "device-memory", device_memory_size
);
1824 static VirtGICType
finalize_gic_version_do(const char *accel_name
,
1825 VirtGICType gic_version
,
1827 unsigned int max_cpus
)
1829 /* Convert host/max/nosel to GIC version number */
1830 switch (gic_version
) {
1831 case VIRT_GIC_VERSION_HOST
:
1832 if (!kvm_enabled()) {
1833 error_report("gic-version=host requires KVM");
1837 /* For KVM, gic-version=host means gic-version=max */
1838 return finalize_gic_version_do(accel_name
, VIRT_GIC_VERSION_MAX
,
1839 gics_supported
, max_cpus
);
1840 case VIRT_GIC_VERSION_MAX
:
1841 if (gics_supported
& VIRT_GIC_VERSION_4_MASK
) {
1842 gic_version
= VIRT_GIC_VERSION_4
;
1843 } else if (gics_supported
& VIRT_GIC_VERSION_3_MASK
) {
1844 gic_version
= VIRT_GIC_VERSION_3
;
1846 gic_version
= VIRT_GIC_VERSION_2
;
1849 case VIRT_GIC_VERSION_NOSEL
:
1850 if ((gics_supported
& VIRT_GIC_VERSION_2_MASK
) &&
1851 max_cpus
<= GIC_NCPU
) {
1852 gic_version
= VIRT_GIC_VERSION_2
;
1853 } else if (gics_supported
& VIRT_GIC_VERSION_3_MASK
) {
1855 * in case the host does not support v2 emulation or
1856 * the end-user requested more than 8 VCPUs we now default
1857 * to v3. In any case defaulting to v2 would be broken.
1859 gic_version
= VIRT_GIC_VERSION_3
;
1860 } else if (max_cpus
> GIC_NCPU
) {
1861 error_report("%s only supports GICv2 emulation but more than 8 "
1862 "vcpus are requested", accel_name
);
1866 case VIRT_GIC_VERSION_2
:
1867 case VIRT_GIC_VERSION_3
:
1868 case VIRT_GIC_VERSION_4
:
1872 /* Check chosen version is effectively supported */
1873 switch (gic_version
) {
1874 case VIRT_GIC_VERSION_2
:
1875 if (!(gics_supported
& VIRT_GIC_VERSION_2_MASK
)) {
1876 error_report("%s does not support GICv2 emulation", accel_name
);
1880 case VIRT_GIC_VERSION_3
:
1881 if (!(gics_supported
& VIRT_GIC_VERSION_3_MASK
)) {
1882 error_report("%s does not support GICv3 emulation", accel_name
);
1886 case VIRT_GIC_VERSION_4
:
1887 if (!(gics_supported
& VIRT_GIC_VERSION_4_MASK
)) {
1888 error_report("%s does not support GICv4 emulation, is virtualization=on?",
1894 error_report("logic error in finalize_gic_version");
1903 * finalize_gic_version - Determines the final gic_version
1904 * according to the gic-version property
1906 * Default GIC type is v2
1908 static void finalize_gic_version(VirtMachineState
*vms
)
1910 const char *accel_name
= current_accel_name();
1911 unsigned int max_cpus
= MACHINE(vms
)->smp
.max_cpus
;
1912 int gics_supported
= 0;
1914 /* Determine which GIC versions the current environment supports */
1915 if (kvm_enabled() && kvm_irqchip_in_kernel()) {
1916 int probe_bitmap
= kvm_arm_vgic_probe();
1918 if (!probe_bitmap
) {
1919 error_report("Unable to determine GIC version supported by host");
1923 if (probe_bitmap
& KVM_ARM_VGIC_V2
) {
1924 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1926 if (probe_bitmap
& KVM_ARM_VGIC_V3
) {
1927 gics_supported
|= VIRT_GIC_VERSION_3_MASK
;
1929 } else if (kvm_enabled() && !kvm_irqchip_in_kernel()) {
1930 /* KVM w/o kernel irqchip can only deal with GICv2 */
1931 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1932 accel_name
= "KVM with kernel-irqchip=off";
1933 } else if (tcg_enabled() || hvf_enabled() || qtest_enabled()) {
1934 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1935 if (module_object_class_by_name("arm-gicv3")) {
1936 gics_supported
|= VIRT_GIC_VERSION_3_MASK
;
1938 /* GICv4 only makes sense if CPU has EL2 */
1939 gics_supported
|= VIRT_GIC_VERSION_4_MASK
;
1943 error_report("Unsupported accelerator, can not determine GIC support");
1948 * Then convert helpers like host/max to concrete GIC versions and ensure
1949 * the desired version is supported
1951 vms
->gic_version
= finalize_gic_version_do(accel_name
, vms
->gic_version
,
1952 gics_supported
, max_cpus
);
1956 * virt_cpu_post_init() must be called after the CPUs have
1957 * been realized and the GIC has been created.
1959 static void virt_cpu_post_init(VirtMachineState
*vms
, MemoryRegion
*sysmem
)
1961 int max_cpus
= MACHINE(vms
)->smp
.max_cpus
;
1962 bool aarch64
, pmu
, steal_time
;
1965 aarch64
= object_property_get_bool(OBJECT(first_cpu
), "aarch64", NULL
);
1966 pmu
= object_property_get_bool(OBJECT(first_cpu
), "pmu", NULL
);
1967 steal_time
= object_property_get_bool(OBJECT(first_cpu
),
1968 "kvm-steal-time", NULL
);
1970 if (kvm_enabled()) {
1971 hwaddr pvtime_reg_base
= vms
->memmap
[VIRT_PVTIME
].base
;
1972 hwaddr pvtime_reg_size
= vms
->memmap
[VIRT_PVTIME
].size
;
1975 MemoryRegion
*pvtime
= g_new(MemoryRegion
, 1);
1976 hwaddr pvtime_size
= max_cpus
* PVTIME_SIZE_PER_CPU
;
1978 /* The memory region size must be a multiple of host page size. */
1979 pvtime_size
= REAL_HOST_PAGE_ALIGN(pvtime_size
);
1981 if (pvtime_size
> pvtime_reg_size
) {
1982 error_report("pvtime requires a %" HWADDR_PRId
1983 " byte memory region for %d CPUs,"
1984 " but only %" HWADDR_PRId
" has been reserved",
1985 pvtime_size
, max_cpus
, pvtime_reg_size
);
1989 memory_region_init_ram(pvtime
, NULL
, "pvtime", pvtime_size
, NULL
);
1990 memory_region_add_subregion(sysmem
, pvtime_reg_base
, pvtime
);
1995 assert(arm_feature(&ARM_CPU(cpu
)->env
, ARM_FEATURE_PMU
));
1996 if (kvm_irqchip_in_kernel()) {
1997 kvm_arm_pmu_set_irq(cpu
, PPI(VIRTUAL_PMU_IRQ
));
1999 kvm_arm_pmu_init(cpu
);
2002 kvm_arm_pvtime_init(cpu
, pvtime_reg_base
+
2003 cpu
->cpu_index
* PVTIME_SIZE_PER_CPU
);
2007 if (aarch64
&& vms
->highmem
) {
2008 int requested_pa_size
= 64 - clz64(vms
->highest_gpa
);
2009 int pamax
= arm_pamax(ARM_CPU(first_cpu
));
2011 if (pamax
< requested_pa_size
) {
2012 error_report("VCPU supports less PA bits (%d) than "
2013 "requested by the memory map (%d)",
2014 pamax
, requested_pa_size
);
2021 static void machvirt_init(MachineState
*machine
)
2023 VirtMachineState
*vms
= VIRT_MACHINE(machine
);
2024 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(machine
);
2025 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2026 const CPUArchIdList
*possible_cpus
;
2027 MemoryRegion
*sysmem
= get_system_memory();
2028 MemoryRegion
*secure_sysmem
= NULL
;
2029 MemoryRegion
*tag_sysmem
= NULL
;
2030 MemoryRegion
*secure_tag_sysmem
= NULL
;
2031 int n
, virt_max_cpus
;
2032 bool firmware_loaded
;
2033 bool aarch64
= true;
2034 bool has_ged
= !vmc
->no_ged
;
2035 unsigned int smp_cpus
= machine
->smp
.cpus
;
2036 unsigned int max_cpus
= machine
->smp
.max_cpus
;
2038 if (!cpu_type_valid(machine
->cpu_type
)) {
2039 error_report("mach-virt: CPU type %s not supported", machine
->cpu_type
);
2043 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2046 * In accelerated mode, the memory map is computed earlier in kvm_type()
2047 * to create a VM with the right number of IPA bits.
2055 * Instanciate a temporary CPU object to find out about what
2056 * we are about to deal with. Once this is done, get rid of
2059 cpuobj
= object_new(possible_cpus
->cpus
[0].type
);
2060 armcpu
= ARM_CPU(cpuobj
);
2062 pa_bits
= arm_pamax(armcpu
);
2064 object_unref(cpuobj
);
2066 virt_set_memmap(vms
, pa_bits
);
2069 /* We can probe only here because during property set
2070 * KVM is not available yet
2072 finalize_gic_version(vms
);
2076 * The Secure view of the world is the same as the NonSecure,
2077 * but with a few extra devices. Create it as a container region
2078 * containing the system memory at low priority; any secure-only
2079 * devices go in at higher priority and take precedence.
2081 secure_sysmem
= g_new(MemoryRegion
, 1);
2082 memory_region_init(secure_sysmem
, OBJECT(machine
), "secure-memory",
2084 memory_region_add_subregion_overlap(secure_sysmem
, 0, sysmem
, -1);
2087 firmware_loaded
= virt_firmware_init(vms
, sysmem
,
2088 secure_sysmem
?: sysmem
);
2090 /* If we have an EL3 boot ROM then the assumption is that it will
2091 * implement PSCI itself, so disable QEMU's internal implementation
2092 * so it doesn't get in the way. Instead of starting secondary
2093 * CPUs in PSCI powerdown state we will start them all running and
2094 * let the boot ROM sort them out.
2095 * The usual case is that we do use QEMU's PSCI implementation;
2096 * if the guest has EL2 then we will use SMC as the conduit,
2097 * and otherwise we will use HVC (for backwards compatibility and
2098 * because if we're using KVM then we must use HVC).
2100 if (vms
->secure
&& firmware_loaded
) {
2101 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_DISABLED
;
2102 } else if (vms
->virt
) {
2103 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_SMC
;
2105 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_HVC
;
2109 * The maximum number of CPUs depends on the GIC version, or on how
2110 * many redistributors we can fit into the memory map (which in turn
2111 * depends on whether this is a GICv3 or v4).
2113 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
2114 virt_max_cpus
= GIC_NCPU
;
2116 virt_max_cpus
= virt_redist_capacity(vms
, VIRT_GIC_REDIST
);
2117 if (vms
->highmem_redists
) {
2118 virt_max_cpus
+= virt_redist_capacity(vms
, VIRT_HIGH_GIC_REDIST2
);
2122 if (max_cpus
> virt_max_cpus
) {
2123 error_report("Number of SMP CPUs requested (%d) exceeds max CPUs "
2124 "supported by machine 'mach-virt' (%d)",
2125 max_cpus
, virt_max_cpus
);
2126 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
&& !vms
->highmem_redists
) {
2127 error_printf("Try 'highmem-redists=on' for more CPUs\n");
2133 if (vms
->secure
&& (kvm_enabled() || hvf_enabled())) {
2134 error_report("mach-virt: %s does not support providing "
2135 "Security extensions (TrustZone) to the guest CPU",
2136 current_accel_name());
2140 if (vms
->virt
&& (kvm_enabled() || hvf_enabled())) {
2141 error_report("mach-virt: %s does not support providing "
2142 "Virtualization extensions to the guest CPU",
2143 current_accel_name());
2147 if (vms
->mte
&& (kvm_enabled() || hvf_enabled())) {
2148 error_report("mach-virt: %s does not support providing "
2149 "MTE to the guest CPU",
2150 current_accel_name());
2156 assert(possible_cpus
->len
== max_cpus
);
2157 for (n
= 0; n
< possible_cpus
->len
; n
++) {
2161 if (n
>= smp_cpus
) {
2165 cpuobj
= object_new(possible_cpus
->cpus
[n
].type
);
2166 object_property_set_int(cpuobj
, "mp-affinity",
2167 possible_cpus
->cpus
[n
].arch_id
, NULL
);
2172 numa_cpu_pre_plug(&possible_cpus
->cpus
[cs
->cpu_index
], DEVICE(cpuobj
),
2175 aarch64
&= object_property_get_bool(cpuobj
, "aarch64", NULL
);
2178 object_property_set_bool(cpuobj
, "has_el3", false, NULL
);
2181 if (!vms
->virt
&& object_property_find(cpuobj
, "has_el2")) {
2182 object_property_set_bool(cpuobj
, "has_el2", false, NULL
);
2185 if (vmc
->kvm_no_adjvtime
&&
2186 object_property_find(cpuobj
, "kvm-no-adjvtime")) {
2187 object_property_set_bool(cpuobj
, "kvm-no-adjvtime", true, NULL
);
2190 if (vmc
->no_kvm_steal_time
&&
2191 object_property_find(cpuobj
, "kvm-steal-time")) {
2192 object_property_set_bool(cpuobj
, "kvm-steal-time", false, NULL
);
2195 if (vmc
->no_pmu
&& object_property_find(cpuobj
, "pmu")) {
2196 object_property_set_bool(cpuobj
, "pmu", false, NULL
);
2199 if (vmc
->no_tcg_lpa2
&& object_property_find(cpuobj
, "lpa2")) {
2200 object_property_set_bool(cpuobj
, "lpa2", false, NULL
);
2203 if (object_property_find(cpuobj
, "reset-cbar")) {
2204 object_property_set_int(cpuobj
, "reset-cbar",
2205 vms
->memmap
[VIRT_CPUPERIPHS
].base
,
2209 object_property_set_link(cpuobj
, "memory", OBJECT(sysmem
),
2212 object_property_set_link(cpuobj
, "secure-memory",
2213 OBJECT(secure_sysmem
), &error_abort
);
2217 /* Create the memory region only once, but link to all cpus. */
2220 * The property exists only if MemTag is supported.
2221 * If it is, we must allocate the ram to back that up.
2223 if (!object_property_find(cpuobj
, "tag-memory")) {
2224 error_report("MTE requested, but not supported "
2225 "by the guest CPU");
2229 tag_sysmem
= g_new(MemoryRegion
, 1);
2230 memory_region_init(tag_sysmem
, OBJECT(machine
),
2231 "tag-memory", UINT64_MAX
/ 32);
2234 secure_tag_sysmem
= g_new(MemoryRegion
, 1);
2235 memory_region_init(secure_tag_sysmem
, OBJECT(machine
),
2236 "secure-tag-memory", UINT64_MAX
/ 32);
2238 /* As with ram, secure-tag takes precedence over tag. */
2239 memory_region_add_subregion_overlap(secure_tag_sysmem
, 0,
2244 object_property_set_link(cpuobj
, "tag-memory", OBJECT(tag_sysmem
),
2247 object_property_set_link(cpuobj
, "secure-tag-memory",
2248 OBJECT(secure_tag_sysmem
),
2253 qdev_realize(DEVICE(cpuobj
), NULL
, &error_fatal
);
2254 object_unref(cpuobj
);
2256 fdt_add_timer_nodes(vms
);
2257 fdt_add_cpu_nodes(vms
);
2259 memory_region_add_subregion(sysmem
, vms
->memmap
[VIRT_MEM
].base
,
2261 if (machine
->device_memory
) {
2262 memory_region_add_subregion(sysmem
, machine
->device_memory
->base
,
2263 &machine
->device_memory
->mr
);
2266 virt_flash_fdt(vms
, sysmem
, secure_sysmem
?: sysmem
);
2268 create_gic(vms
, sysmem
);
2270 virt_cpu_post_init(vms
, sysmem
);
2272 fdt_add_pmu_nodes(vms
);
2274 create_uart(vms
, VIRT_UART
, sysmem
, serial_hd(0));
2277 create_secure_ram(vms
, secure_sysmem
, secure_tag_sysmem
);
2278 create_uart(vms
, VIRT_SECURE_UART
, secure_sysmem
, serial_hd(1));
2282 create_tag_ram(tag_sysmem
, vms
->memmap
[VIRT_MEM
].base
,
2283 machine
->ram_size
, "mach-virt.tag");
2286 vms
->highmem_ecam
&= (!firmware_loaded
|| aarch64
);
2292 if (has_ged
&& aarch64
&& firmware_loaded
&& virt_is_acpi_enabled(vms
)) {
2293 vms
->acpi_dev
= create_acpi_ged(vms
);
2295 create_gpio_devices(vms
, VIRT_GPIO
, sysmem
);
2298 if (vms
->secure
&& !vmc
->no_secure_gpio
) {
2299 create_gpio_devices(vms
, VIRT_SECURE_GPIO
, secure_sysmem
);
2302 /* connect powerdown request */
2303 vms
->powerdown_notifier
.notify
= virt_powerdown_req
;
2304 qemu_register_powerdown_notifier(&vms
->powerdown_notifier
);
2306 /* Create mmio transports, so the user can create virtio backends
2307 * (which will be automatically plugged in to the transports). If
2308 * no backend is created the transport will just sit harmlessly idle.
2310 create_virtio_devices(vms
);
2312 vms
->fw_cfg
= create_fw_cfg(vms
, &address_space_memory
);
2313 rom_set_fw(vms
->fw_cfg
);
2315 create_platform_bus(vms
);
2317 if (machine
->nvdimms_state
->is_enabled
) {
2318 const struct AcpiGenericAddress arm_virt_nvdimm_acpi_dsmio
= {
2319 .space_id
= AML_AS_SYSTEM_MEMORY
,
2320 .address
= vms
->memmap
[VIRT_NVDIMM_ACPI
].base
,
2321 .bit_width
= NVDIMM_ACPI_IO_LEN
<< 3
2324 nvdimm_init_acpi_state(machine
->nvdimms_state
, sysmem
,
2325 arm_virt_nvdimm_acpi_dsmio
,
2326 vms
->fw_cfg
, OBJECT(vms
));
2329 vms
->bootinfo
.ram_size
= machine
->ram_size
;
2330 vms
->bootinfo
.board_id
= -1;
2331 vms
->bootinfo
.loader_start
= vms
->memmap
[VIRT_MEM
].base
;
2332 vms
->bootinfo
.get_dtb
= machvirt_dtb
;
2333 vms
->bootinfo
.skip_dtb_autoload
= true;
2334 vms
->bootinfo
.firmware_loaded
= firmware_loaded
;
2335 vms
->bootinfo
.psci_conduit
= vms
->psci_conduit
;
2336 arm_load_kernel(ARM_CPU(first_cpu
), machine
, &vms
->bootinfo
);
2338 vms
->machine_done
.notify
= virt_machine_done
;
2339 qemu_add_machine_init_done_notifier(&vms
->machine_done
);
2342 static bool virt_get_secure(Object
*obj
, Error
**errp
)
2344 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2349 static void virt_set_secure(Object
*obj
, bool value
, Error
**errp
)
2351 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2353 vms
->secure
= value
;
2356 static bool virt_get_virt(Object
*obj
, Error
**errp
)
2358 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2363 static void virt_set_virt(Object
*obj
, bool value
, Error
**errp
)
2365 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2370 static bool virt_get_highmem(Object
*obj
, Error
**errp
)
2372 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2374 return vms
->highmem
;
2377 static void virt_set_highmem(Object
*obj
, bool value
, Error
**errp
)
2379 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2381 vms
->highmem
= value
;
2384 static bool virt_get_compact_highmem(Object
*obj
, Error
**errp
)
2386 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2388 return vms
->highmem_compact
;
2391 static void virt_set_compact_highmem(Object
*obj
, bool value
, Error
**errp
)
2393 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2395 vms
->highmem_compact
= value
;
2398 static bool virt_get_highmem_redists(Object
*obj
, Error
**errp
)
2400 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2402 return vms
->highmem_redists
;
2405 static void virt_set_highmem_redists(Object
*obj
, bool value
, Error
**errp
)
2407 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2409 vms
->highmem_redists
= value
;
2412 static bool virt_get_highmem_ecam(Object
*obj
, Error
**errp
)
2414 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2416 return vms
->highmem_ecam
;
2419 static void virt_set_highmem_ecam(Object
*obj
, bool value
, Error
**errp
)
2421 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2423 vms
->highmem_ecam
= value
;
2426 static bool virt_get_highmem_mmio(Object
*obj
, Error
**errp
)
2428 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2430 return vms
->highmem_mmio
;
2433 static void virt_set_highmem_mmio(Object
*obj
, bool value
, Error
**errp
)
2435 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2437 vms
->highmem_mmio
= value
;
2441 static bool virt_get_its(Object
*obj
, Error
**errp
)
2443 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2448 static void virt_set_its(Object
*obj
, bool value
, Error
**errp
)
2450 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2455 static bool virt_get_dtb_randomness(Object
*obj
, Error
**errp
)
2457 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2459 return vms
->dtb_randomness
;
2462 static void virt_set_dtb_randomness(Object
*obj
, bool value
, Error
**errp
)
2464 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2466 vms
->dtb_randomness
= value
;
2469 static char *virt_get_oem_id(Object
*obj
, Error
**errp
)
2471 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2473 return g_strdup(vms
->oem_id
);
2476 static void virt_set_oem_id(Object
*obj
, const char *value
, Error
**errp
)
2478 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2479 size_t len
= strlen(value
);
2483 "User specified oem-id value is bigger than 6 bytes in size");
2487 strncpy(vms
->oem_id
, value
, 6);
2490 static char *virt_get_oem_table_id(Object
*obj
, Error
**errp
)
2492 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2494 return g_strdup(vms
->oem_table_id
);
2497 static void virt_set_oem_table_id(Object
*obj
, const char *value
,
2500 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2501 size_t len
= strlen(value
);
2505 "User specified oem-table-id value is bigger than 8 bytes in size");
2508 strncpy(vms
->oem_table_id
, value
, 8);
2512 bool virt_is_acpi_enabled(VirtMachineState
*vms
)
2514 if (vms
->acpi
== ON_OFF_AUTO_OFF
) {
2520 static void virt_get_acpi(Object
*obj
, Visitor
*v
, const char *name
,
2521 void *opaque
, Error
**errp
)
2523 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2524 OnOffAuto acpi
= vms
->acpi
;
2526 visit_type_OnOffAuto(v
, name
, &acpi
, errp
);
2529 static void virt_set_acpi(Object
*obj
, Visitor
*v
, const char *name
,
2530 void *opaque
, Error
**errp
)
2532 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2534 visit_type_OnOffAuto(v
, name
, &vms
->acpi
, errp
);
2537 static bool virt_get_ras(Object
*obj
, Error
**errp
)
2539 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2544 static void virt_set_ras(Object
*obj
, bool value
, Error
**errp
)
2546 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2551 static bool virt_get_mte(Object
*obj
, Error
**errp
)
2553 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2558 static void virt_set_mte(Object
*obj
, bool value
, Error
**errp
)
2560 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2565 static char *virt_get_gic_version(Object
*obj
, Error
**errp
)
2567 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2570 switch (vms
->gic_version
) {
2571 case VIRT_GIC_VERSION_4
:
2574 case VIRT_GIC_VERSION_3
:
2581 return g_strdup(val
);
2584 static void virt_set_gic_version(Object
*obj
, const char *value
, Error
**errp
)
2586 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2588 if (!strcmp(value
, "4")) {
2589 vms
->gic_version
= VIRT_GIC_VERSION_4
;
2590 } else if (!strcmp(value
, "3")) {
2591 vms
->gic_version
= VIRT_GIC_VERSION_3
;
2592 } else if (!strcmp(value
, "2")) {
2593 vms
->gic_version
= VIRT_GIC_VERSION_2
;
2594 } else if (!strcmp(value
, "host")) {
2595 vms
->gic_version
= VIRT_GIC_VERSION_HOST
; /* Will probe later */
2596 } else if (!strcmp(value
, "max")) {
2597 vms
->gic_version
= VIRT_GIC_VERSION_MAX
; /* Will probe later */
2599 error_setg(errp
, "Invalid gic-version value");
2600 error_append_hint(errp
, "Valid values are 3, 2, host, max.\n");
2604 static char *virt_get_iommu(Object
*obj
, Error
**errp
)
2606 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2608 switch (vms
->iommu
) {
2609 case VIRT_IOMMU_NONE
:
2610 return g_strdup("none");
2611 case VIRT_IOMMU_SMMUV3
:
2612 return g_strdup("smmuv3");
2614 g_assert_not_reached();
2618 static void virt_set_iommu(Object
*obj
, const char *value
, Error
**errp
)
2620 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2622 if (!strcmp(value
, "smmuv3")) {
2623 vms
->iommu
= VIRT_IOMMU_SMMUV3
;
2624 } else if (!strcmp(value
, "none")) {
2625 vms
->iommu
= VIRT_IOMMU_NONE
;
2627 error_setg(errp
, "Invalid iommu value");
2628 error_append_hint(errp
, "Valid values are none, smmuv3.\n");
2632 static bool virt_get_default_bus_bypass_iommu(Object
*obj
, Error
**errp
)
2634 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2636 return vms
->default_bus_bypass_iommu
;
2639 static void virt_set_default_bus_bypass_iommu(Object
*obj
, bool value
,
2642 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2644 vms
->default_bus_bypass_iommu
= value
;
2647 static CpuInstanceProperties
2648 virt_cpu_index_to_props(MachineState
*ms
, unsigned cpu_index
)
2650 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2651 const CPUArchIdList
*possible_cpus
= mc
->possible_cpu_arch_ids(ms
);
2653 assert(cpu_index
< possible_cpus
->len
);
2654 return possible_cpus
->cpus
[cpu_index
].props
;
2657 static int64_t virt_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
2659 int64_t socket_id
= ms
->possible_cpus
->cpus
[idx
].props
.socket_id
;
2661 return socket_id
% ms
->numa_state
->num_nodes
;
2664 static const CPUArchIdList
*virt_possible_cpu_arch_ids(MachineState
*ms
)
2667 unsigned int max_cpus
= ms
->smp
.max_cpus
;
2668 VirtMachineState
*vms
= VIRT_MACHINE(ms
);
2669 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
2671 if (ms
->possible_cpus
) {
2672 assert(ms
->possible_cpus
->len
== max_cpus
);
2673 return ms
->possible_cpus
;
2676 ms
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
2677 sizeof(CPUArchId
) * max_cpus
);
2678 ms
->possible_cpus
->len
= max_cpus
;
2679 for (n
= 0; n
< ms
->possible_cpus
->len
; n
++) {
2680 ms
->possible_cpus
->cpus
[n
].type
= ms
->cpu_type
;
2681 ms
->possible_cpus
->cpus
[n
].arch_id
=
2682 virt_cpu_mp_affinity(vms
, n
);
2684 assert(!mc
->smp_props
.dies_supported
);
2685 ms
->possible_cpus
->cpus
[n
].props
.has_socket_id
= true;
2686 ms
->possible_cpus
->cpus
[n
].props
.socket_id
=
2687 n
/ (ms
->smp
.clusters
* ms
->smp
.cores
* ms
->smp
.threads
);
2688 ms
->possible_cpus
->cpus
[n
].props
.has_cluster_id
= true;
2689 ms
->possible_cpus
->cpus
[n
].props
.cluster_id
=
2690 (n
/ (ms
->smp
.cores
* ms
->smp
.threads
)) % ms
->smp
.clusters
;
2691 ms
->possible_cpus
->cpus
[n
].props
.has_core_id
= true;
2692 ms
->possible_cpus
->cpus
[n
].props
.core_id
=
2693 (n
/ ms
->smp
.threads
) % ms
->smp
.cores
;
2694 ms
->possible_cpus
->cpus
[n
].props
.has_thread_id
= true;
2695 ms
->possible_cpus
->cpus
[n
].props
.thread_id
=
2696 n
% ms
->smp
.threads
;
2698 return ms
->possible_cpus
;
2701 static void virt_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
2704 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2705 const MachineState
*ms
= MACHINE(hotplug_dev
);
2706 const bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
2708 if (!vms
->acpi_dev
) {
2710 "memory hotplug is not enabled: missing acpi-ged device");
2715 error_setg(errp
, "memory hotplug is not enabled: MTE is enabled");
2719 if (is_nvdimm
&& !ms
->nvdimms_state
->is_enabled
) {
2720 error_setg(errp
, "nvdimm is not enabled: add 'nvdimm=on' to '-M'");
2724 pc_dimm_pre_plug(PC_DIMM(dev
), MACHINE(hotplug_dev
), NULL
, errp
);
2727 static void virt_memory_plug(HotplugHandler
*hotplug_dev
,
2728 DeviceState
*dev
, Error
**errp
)
2730 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2731 MachineState
*ms
= MACHINE(hotplug_dev
);
2732 bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
2734 pc_dimm_plug(PC_DIMM(dev
), MACHINE(vms
));
2737 nvdimm_plug(ms
->nvdimms_state
);
2740 hotplug_handler_plug(HOTPLUG_HANDLER(vms
->acpi_dev
),
2744 static void virt_virtio_md_pci_pre_plug(HotplugHandler
*hotplug_dev
,
2745 DeviceState
*dev
, Error
**errp
)
2747 HotplugHandler
*hotplug_dev2
= qdev_get_bus_hotplug_handler(dev
);
2748 Error
*local_err
= NULL
;
2750 if (!hotplug_dev2
&& dev
->hotplugged
) {
2752 * Without a bus hotplug handler, we cannot control the plug/unplug
2753 * order. We should never reach this point when hotplugging on ARM.
2754 * However, it's nice to add a safety net, similar to what we have
2757 error_setg(errp
, "hotplug of virtio based memory devices not supported"
2762 * First, see if we can plug this memory device at all. If that
2763 * succeeds, branch of to the actual hotplug handler.
2765 memory_device_pre_plug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
), NULL
,
2767 if (!local_err
&& hotplug_dev2
) {
2768 hotplug_handler_pre_plug(hotplug_dev2
, dev
, &local_err
);
2770 error_propagate(errp
, local_err
);
2773 static void virt_virtio_md_pci_plug(HotplugHandler
*hotplug_dev
,
2774 DeviceState
*dev
, Error
**errp
)
2776 HotplugHandler
*hotplug_dev2
= qdev_get_bus_hotplug_handler(dev
);
2777 Error
*local_err
= NULL
;
2780 * Plug the memory device first and then branch off to the actual
2781 * hotplug handler. If that one fails, we can easily undo the memory
2784 memory_device_plug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
));
2786 hotplug_handler_plug(hotplug_dev2
, dev
, &local_err
);
2788 memory_device_unplug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
));
2791 error_propagate(errp
, local_err
);
2794 static void virt_virtio_md_pci_unplug_request(HotplugHandler
*hotplug_dev
,
2795 DeviceState
*dev
, Error
**errp
)
2797 /* We don't support hot unplug of virtio based memory devices */
2798 error_setg(errp
, "virtio based memory devices cannot be unplugged.");
2802 static void virt_machine_device_pre_plug_cb(HotplugHandler
*hotplug_dev
,
2803 DeviceState
*dev
, Error
**errp
)
2805 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2807 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2808 virt_memory_pre_plug(hotplug_dev
, dev
, errp
);
2809 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2810 virt_virtio_md_pci_pre_plug(hotplug_dev
, dev
, errp
);
2811 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2812 hwaddr db_start
= 0, db_end
= 0;
2813 char *resv_prop_str
;
2815 if (vms
->iommu
!= VIRT_IOMMU_NONE
) {
2816 error_setg(errp
, "virt machine does not support multiple IOMMUs");
2820 switch (vms
->msi_controller
) {
2821 case VIRT_MSI_CTRL_NONE
:
2823 case VIRT_MSI_CTRL_ITS
:
2824 /* GITS_TRANSLATER page */
2825 db_start
= base_memmap
[VIRT_GIC_ITS
].base
+ 0x10000;
2826 db_end
= base_memmap
[VIRT_GIC_ITS
].base
+
2827 base_memmap
[VIRT_GIC_ITS
].size
- 1;
2829 case VIRT_MSI_CTRL_GICV2M
:
2830 /* MSI_SETSPI_NS page */
2831 db_start
= base_memmap
[VIRT_GIC_V2M
].base
;
2832 db_end
= db_start
+ base_memmap
[VIRT_GIC_V2M
].size
- 1;
2835 resv_prop_str
= g_strdup_printf("0x%"PRIx64
":0x%"PRIx64
":%u",
2837 VIRTIO_IOMMU_RESV_MEM_T_MSI
);
2839 object_property_set_uint(OBJECT(dev
), "len-reserved-regions", 1, errp
);
2840 object_property_set_str(OBJECT(dev
), "reserved-regions[0]",
2841 resv_prop_str
, errp
);
2842 g_free(resv_prop_str
);
2846 static void virt_machine_device_plug_cb(HotplugHandler
*hotplug_dev
,
2847 DeviceState
*dev
, Error
**errp
)
2849 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2851 if (vms
->platform_bus_dev
) {
2852 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
2854 if (device_is_dynamic_sysbus(mc
, dev
)) {
2855 platform_bus_link_device(PLATFORM_BUS_DEVICE(vms
->platform_bus_dev
),
2856 SYS_BUS_DEVICE(dev
));
2859 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2860 virt_memory_plug(hotplug_dev
, dev
, errp
);
2863 if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2864 virt_virtio_md_pci_plug(hotplug_dev
, dev
, errp
);
2867 if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2868 PCIDevice
*pdev
= PCI_DEVICE(dev
);
2870 vms
->iommu
= VIRT_IOMMU_VIRTIO
;
2871 vms
->virtio_iommu_bdf
= pci_get_bdf(pdev
);
2872 create_virtio_iommu_dt_bindings(vms
);
2876 static void virt_dimm_unplug_request(HotplugHandler
*hotplug_dev
,
2877 DeviceState
*dev
, Error
**errp
)
2879 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2881 if (!vms
->acpi_dev
) {
2883 "memory hotplug is not enabled: missing acpi-ged device");
2887 if (object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
)) {
2888 error_setg(errp
, "nvdimm device hot unplug is not supported yet.");
2892 hotplug_handler_unplug_request(HOTPLUG_HANDLER(vms
->acpi_dev
), dev
,
2896 static void virt_dimm_unplug(HotplugHandler
*hotplug_dev
,
2897 DeviceState
*dev
, Error
**errp
)
2899 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2900 Error
*local_err
= NULL
;
2902 hotplug_handler_unplug(HOTPLUG_HANDLER(vms
->acpi_dev
), dev
, &local_err
);
2907 pc_dimm_unplug(PC_DIMM(dev
), MACHINE(vms
));
2908 qdev_unrealize(dev
);
2911 error_propagate(errp
, local_err
);
2914 static void virt_machine_device_unplug_request_cb(HotplugHandler
*hotplug_dev
,
2915 DeviceState
*dev
, Error
**errp
)
2917 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2918 virt_dimm_unplug_request(hotplug_dev
, dev
, errp
);
2919 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2920 virt_virtio_md_pci_unplug_request(hotplug_dev
, dev
, errp
);
2922 error_setg(errp
, "device unplug request for unsupported device"
2923 " type: %s", object_get_typename(OBJECT(dev
)));
2927 static void virt_machine_device_unplug_cb(HotplugHandler
*hotplug_dev
,
2928 DeviceState
*dev
, Error
**errp
)
2930 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2931 virt_dimm_unplug(hotplug_dev
, dev
, errp
);
2933 error_setg(errp
, "virt: device unplug for unsupported device"
2934 " type: %s", object_get_typename(OBJECT(dev
)));
2938 static HotplugHandler
*virt_machine_get_hotplug_handler(MachineState
*machine
,
2941 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2943 if (device_is_dynamic_sysbus(mc
, dev
) ||
2944 object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
2945 object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
) ||
2946 object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2947 return HOTPLUG_HANDLER(machine
);
2953 * for arm64 kvm_type [7-0] encodes the requested number of bits
2954 * in the IPA address space
2956 static int virt_kvm_type(MachineState
*ms
, const char *type_str
)
2958 VirtMachineState
*vms
= VIRT_MACHINE(ms
);
2959 int max_vm_pa_size
, requested_pa_size
;
2962 max_vm_pa_size
= kvm_arm_get_max_vm_ipa_size(ms
, &fixed_ipa
);
2964 /* we freeze the memory map to compute the highest gpa */
2965 virt_set_memmap(vms
, max_vm_pa_size
);
2967 requested_pa_size
= 64 - clz64(vms
->highest_gpa
);
2970 * KVM requires the IPA size to be at least 32 bits.
2972 if (requested_pa_size
< 32) {
2973 requested_pa_size
= 32;
2976 if (requested_pa_size
> max_vm_pa_size
) {
2977 error_report("-m and ,maxmem option values "
2978 "require an IPA range (%d bits) larger than "
2979 "the one supported by the host (%d bits)",
2980 requested_pa_size
, max_vm_pa_size
);
2984 * We return the requested PA log size, unless KVM only supports
2985 * the implicit legacy 40b IPA setting, in which case the kvm_type
2988 return fixed_ipa
? 0 : requested_pa_size
;
2991 static void virt_machine_class_init(ObjectClass
*oc
, void *data
)
2993 MachineClass
*mc
= MACHINE_CLASS(oc
);
2994 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
2996 mc
->init
= machvirt_init
;
2997 /* Start with max_cpus set to 512, which is the maximum supported by KVM.
2998 * The value may be reduced later when we have more information about the
2999 * configuration of the particular instance.
3002 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_CALXEDA_XGMAC
);
3003 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_AMD_XGBE
);
3004 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_RAMFB_DEVICE
);
3005 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_PLATFORM
);
3007 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_TPM_TIS_SYSBUS
);
3009 mc
->block_default_type
= IF_VIRTIO
;
3011 mc
->pci_allow_0_address
= true;
3012 /* We know we will never create a pre-ARMv7 CPU which needs 1K pages */
3013 mc
->minimum_page_bits
= 12;
3014 mc
->possible_cpu_arch_ids
= virt_possible_cpu_arch_ids
;
3015 mc
->cpu_index_to_instance_props
= virt_cpu_index_to_props
;
3017 mc
->default_cpu_type
= ARM_CPU_TYPE_NAME("cortex-a15");
3019 mc
->default_cpu_type
= ARM_CPU_TYPE_NAME("max");
3021 mc
->get_default_cpu_node_id
= virt_get_default_cpu_node_id
;
3022 mc
->kvm_type
= virt_kvm_type
;
3023 assert(!mc
->get_hotplug_handler
);
3024 mc
->get_hotplug_handler
= virt_machine_get_hotplug_handler
;
3025 hc
->pre_plug
= virt_machine_device_pre_plug_cb
;
3026 hc
->plug
= virt_machine_device_plug_cb
;
3027 hc
->unplug_request
= virt_machine_device_unplug_request_cb
;
3028 hc
->unplug
= virt_machine_device_unplug_cb
;
3029 mc
->nvdimm_supported
= true;
3030 mc
->smp_props
.clusters_supported
= true;
3031 mc
->auto_enable_numa_with_memhp
= true;
3032 mc
->auto_enable_numa_with_memdev
= true;
3033 mc
->default_ram_id
= "mach-virt.ram";
3035 object_class_property_add(oc
, "acpi", "OnOffAuto",
3036 virt_get_acpi
, virt_set_acpi
,
3038 object_class_property_set_description(oc
, "acpi",
3040 object_class_property_add_bool(oc
, "secure", virt_get_secure
,
3042 object_class_property_set_description(oc
, "secure",
3043 "Set on/off to enable/disable the ARM "
3044 "Security Extensions (TrustZone)");
3046 object_class_property_add_bool(oc
, "virtualization", virt_get_virt
,
3048 object_class_property_set_description(oc
, "virtualization",
3049 "Set on/off to enable/disable emulating a "
3050 "guest CPU which implements the ARM "
3051 "Virtualization Extensions");
3053 object_class_property_add_bool(oc
, "highmem", virt_get_highmem
,
3055 object_class_property_set_description(oc
, "highmem",
3056 "Set on/off to enable/disable using "
3057 "physical address space above 32 bits");
3059 object_class_property_add_bool(oc
, "compact-highmem",
3060 virt_get_compact_highmem
,
3061 virt_set_compact_highmem
);
3062 object_class_property_set_description(oc
, "compact-highmem",
3063 "Set on/off to enable/disable compact "
3064 "layout for high memory regions");
3066 object_class_property_add_bool(oc
, "highmem-redists",
3067 virt_get_highmem_redists
,
3068 virt_set_highmem_redists
);
3069 object_class_property_set_description(oc
, "highmem-redists",
3070 "Set on/off to enable/disable high "
3071 "memory region for GICv3 or GICv4 "
3074 object_class_property_add_bool(oc
, "highmem-ecam",
3075 virt_get_highmem_ecam
,
3076 virt_set_highmem_ecam
);
3077 object_class_property_set_description(oc
, "highmem-ecam",
3078 "Set on/off to enable/disable high "
3079 "memory region for PCI ECAM");
3081 object_class_property_add_bool(oc
, "highmem-mmio",
3082 virt_get_highmem_mmio
,
3083 virt_set_highmem_mmio
);
3084 object_class_property_set_description(oc
, "highmem-mmio",
3085 "Set on/off to enable/disable high "
3086 "memory region for PCI MMIO");
3088 object_class_property_add_str(oc
, "gic-version", virt_get_gic_version
,
3089 virt_set_gic_version
);
3090 object_class_property_set_description(oc
, "gic-version",
3092 "Valid values are 2, 3, 4, host and max");
3094 object_class_property_add_str(oc
, "iommu", virt_get_iommu
, virt_set_iommu
);
3095 object_class_property_set_description(oc
, "iommu",
3096 "Set the IOMMU type. "
3097 "Valid values are none and smmuv3");
3099 object_class_property_add_bool(oc
, "default-bus-bypass-iommu",
3100 virt_get_default_bus_bypass_iommu
,
3101 virt_set_default_bus_bypass_iommu
);
3102 object_class_property_set_description(oc
, "default-bus-bypass-iommu",
3103 "Set on/off to enable/disable "
3104 "bypass_iommu for default root bus");
3106 object_class_property_add_bool(oc
, "ras", virt_get_ras
,
3108 object_class_property_set_description(oc
, "ras",
3109 "Set on/off to enable/disable reporting host memory errors "
3110 "to a KVM guest using ACPI and guest external abort exceptions");
3112 object_class_property_add_bool(oc
, "mte", virt_get_mte
, virt_set_mte
);
3113 object_class_property_set_description(oc
, "mte",
3114 "Set on/off to enable/disable emulating a "
3115 "guest CPU which implements the ARM "
3116 "Memory Tagging Extension");
3118 object_class_property_add_bool(oc
, "its", virt_get_its
,
3120 object_class_property_set_description(oc
, "its",
3121 "Set on/off to enable/disable "
3122 "ITS instantiation");
3124 object_class_property_add_bool(oc
, "dtb-randomness",
3125 virt_get_dtb_randomness
,
3126 virt_set_dtb_randomness
);
3127 object_class_property_set_description(oc
, "dtb-randomness",
3128 "Set off to disable passing random or "
3129 "non-deterministic dtb nodes to guest");
3131 object_class_property_add_bool(oc
, "dtb-kaslr-seed",
3132 virt_get_dtb_randomness
,
3133 virt_set_dtb_randomness
);
3134 object_class_property_set_description(oc
, "dtb-kaslr-seed",
3135 "Deprecated synonym of dtb-randomness");
3137 object_class_property_add_str(oc
, "x-oem-id",
3140 object_class_property_set_description(oc
, "x-oem-id",
3141 "Override the default value of field OEMID "
3142 "in ACPI table header."
3143 "The string may be up to 6 bytes in size");
3146 object_class_property_add_str(oc
, "x-oem-table-id",
3147 virt_get_oem_table_id
,
3148 virt_set_oem_table_id
);
3149 object_class_property_set_description(oc
, "x-oem-table-id",
3150 "Override the default value of field OEM Table ID "
3151 "in ACPI table header."
3152 "The string may be up to 8 bytes in size");
3156 static void virt_instance_init(Object
*obj
)
3158 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
3159 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
3161 /* EL3 is disabled by default on virt: this makes us consistent
3162 * between KVM and TCG for this board, and it also allows us to
3163 * boot UEFI blobs which assume no TrustZone support.
3165 vms
->secure
= false;
3167 /* EL2 is also disabled by default, for similar reasons */
3170 /* High memory is enabled by default */
3171 vms
->highmem
= true;
3172 vms
->highmem_compact
= !vmc
->no_highmem_compact
;
3173 vms
->gic_version
= VIRT_GIC_VERSION_NOSEL
;
3175 vms
->highmem_ecam
= !vmc
->no_highmem_ecam
;
3176 vms
->highmem_mmio
= true;
3177 vms
->highmem_redists
= true;
3182 /* Default allows ITS instantiation */
3185 if (vmc
->no_tcg_its
) {
3186 vms
->tcg_its
= false;
3188 vms
->tcg_its
= true;
3192 /* Default disallows iommu instantiation */
3193 vms
->iommu
= VIRT_IOMMU_NONE
;
3195 /* The default root bus is attached to iommu by default */
3196 vms
->default_bus_bypass_iommu
= false;
3198 /* Default disallows RAS instantiation */
3201 /* MTE is disabled by default. */
3204 /* Supply kaslr-seed and rng-seed by default */
3205 vms
->dtb_randomness
= true;
3207 vms
->irqmap
= a15irqmap
;
3209 virt_flash_create(vms
);
3211 vms
->oem_id
= g_strndup(ACPI_BUILD_APPNAME6
, 6);
3212 vms
->oem_table_id
= g_strndup(ACPI_BUILD_APPNAME8
, 8);
3215 static const TypeInfo virt_machine_info
= {
3216 .name
= TYPE_VIRT_MACHINE
,
3217 .parent
= TYPE_MACHINE
,
3219 .instance_size
= sizeof(VirtMachineState
),
3220 .class_size
= sizeof(VirtMachineClass
),
3221 .class_init
= virt_machine_class_init
,
3222 .instance_init
= virt_instance_init
,
3223 .interfaces
= (InterfaceInfo
[]) {
3224 { TYPE_HOTPLUG_HANDLER
},
3229 static void machvirt_machine_init(void)
3231 type_register_static(&virt_machine_info
);
3233 type_init(machvirt_machine_init
);
3235 static void virt_machine_8_0_options(MachineClass
*mc
)
3238 DEFINE_VIRT_MACHINE_AS_LATEST(8, 0)
3240 static void virt_machine_7_2_options(MachineClass
*mc
)
3242 virt_machine_8_0_options(mc
);
3243 compat_props_add(mc
->compat_props
, hw_compat_7_2
, hw_compat_7_2_len
);
3245 DEFINE_VIRT_MACHINE(7, 2)
3247 static void virt_machine_7_1_options(MachineClass
*mc
)
3249 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3251 virt_machine_7_2_options(mc
);
3252 compat_props_add(mc
->compat_props
, hw_compat_7_1
, hw_compat_7_1_len
);
3253 /* Compact layout for high memory regions was introduced with 7.2 */
3254 vmc
->no_highmem_compact
= true;
3256 DEFINE_VIRT_MACHINE(7, 1)
3258 static void virt_machine_7_0_options(MachineClass
*mc
)
3260 virt_machine_7_1_options(mc
);
3261 compat_props_add(mc
->compat_props
, hw_compat_7_0
, hw_compat_7_0_len
);
3263 DEFINE_VIRT_MACHINE(7, 0)
3265 static void virt_machine_6_2_options(MachineClass
*mc
)
3267 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3269 virt_machine_7_0_options(mc
);
3270 compat_props_add(mc
->compat_props
, hw_compat_6_2
, hw_compat_6_2_len
);
3271 vmc
->no_tcg_lpa2
= true;
3273 DEFINE_VIRT_MACHINE(6, 2)
3275 static void virt_machine_6_1_options(MachineClass
*mc
)
3277 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3279 virt_machine_6_2_options(mc
);
3280 compat_props_add(mc
->compat_props
, hw_compat_6_1
, hw_compat_6_1_len
);
3281 mc
->smp_props
.prefer_sockets
= true;
3282 vmc
->no_cpu_topology
= true;
3284 /* qemu ITS was introduced with 6.2 */
3285 vmc
->no_tcg_its
= true;
3287 DEFINE_VIRT_MACHINE(6, 1)
3289 static void virt_machine_6_0_options(MachineClass
*mc
)
3291 virt_machine_6_1_options(mc
);
3292 compat_props_add(mc
->compat_props
, hw_compat_6_0
, hw_compat_6_0_len
);
3294 DEFINE_VIRT_MACHINE(6, 0)
3296 static void virt_machine_5_2_options(MachineClass
*mc
)
3298 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3300 virt_machine_6_0_options(mc
);
3301 compat_props_add(mc
->compat_props
, hw_compat_5_2
, hw_compat_5_2_len
);
3302 vmc
->no_secure_gpio
= true;
3304 DEFINE_VIRT_MACHINE(5, 2)
3306 static void virt_machine_5_1_options(MachineClass
*mc
)
3308 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3310 virt_machine_5_2_options(mc
);
3311 compat_props_add(mc
->compat_props
, hw_compat_5_1
, hw_compat_5_1_len
);
3312 vmc
->no_kvm_steal_time
= true;
3314 DEFINE_VIRT_MACHINE(5, 1)
3316 static void virt_machine_5_0_options(MachineClass
*mc
)
3318 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3320 virt_machine_5_1_options(mc
);
3321 compat_props_add(mc
->compat_props
, hw_compat_5_0
, hw_compat_5_0_len
);
3322 mc
->numa_mem_supported
= true;
3323 vmc
->acpi_expose_flash
= true;
3324 mc
->auto_enable_numa_with_memdev
= false;
3326 DEFINE_VIRT_MACHINE(5, 0)
3328 static void virt_machine_4_2_options(MachineClass
*mc
)
3330 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3332 virt_machine_5_0_options(mc
);
3333 compat_props_add(mc
->compat_props
, hw_compat_4_2
, hw_compat_4_2_len
);
3334 vmc
->kvm_no_adjvtime
= true;
3336 DEFINE_VIRT_MACHINE(4, 2)
3338 static void virt_machine_4_1_options(MachineClass
*mc
)
3340 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3342 virt_machine_4_2_options(mc
);
3343 compat_props_add(mc
->compat_props
, hw_compat_4_1
, hw_compat_4_1_len
);
3345 mc
->auto_enable_numa_with_memhp
= false;
3347 DEFINE_VIRT_MACHINE(4, 1)
3349 static void virt_machine_4_0_options(MachineClass
*mc
)
3351 virt_machine_4_1_options(mc
);
3352 compat_props_add(mc
->compat_props
, hw_compat_4_0
, hw_compat_4_0_len
);
3354 DEFINE_VIRT_MACHINE(4, 0)
3356 static void virt_machine_3_1_options(MachineClass
*mc
)
3358 virt_machine_4_0_options(mc
);
3359 compat_props_add(mc
->compat_props
, hw_compat_3_1
, hw_compat_3_1_len
);
3361 DEFINE_VIRT_MACHINE(3, 1)
3363 static void virt_machine_3_0_options(MachineClass
*mc
)
3365 virt_machine_3_1_options(mc
);
3366 compat_props_add(mc
->compat_props
, hw_compat_3_0
, hw_compat_3_0_len
);
3368 DEFINE_VIRT_MACHINE(3, 0)
3370 static void virt_machine_2_12_options(MachineClass
*mc
)
3372 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3374 virt_machine_3_0_options(mc
);
3375 compat_props_add(mc
->compat_props
, hw_compat_2_12
, hw_compat_2_12_len
);
3376 vmc
->no_highmem_ecam
= true;
3379 DEFINE_VIRT_MACHINE(2, 12)
3381 static void virt_machine_2_11_options(MachineClass
*mc
)
3383 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3385 virt_machine_2_12_options(mc
);
3386 compat_props_add(mc
->compat_props
, hw_compat_2_11
, hw_compat_2_11_len
);
3387 vmc
->smbios_old_sys_ver
= true;
3389 DEFINE_VIRT_MACHINE(2, 11)
3391 static void virt_machine_2_10_options(MachineClass
*mc
)
3393 virt_machine_2_11_options(mc
);
3394 compat_props_add(mc
->compat_props
, hw_compat_2_10
, hw_compat_2_10_len
);
3395 /* before 2.11 we never faulted accesses to bad addresses */
3396 mc
->ignore_memory_transaction_failures
= true;
3398 DEFINE_VIRT_MACHINE(2, 10)
3400 static void virt_machine_2_9_options(MachineClass
*mc
)
3402 virt_machine_2_10_options(mc
);
3403 compat_props_add(mc
->compat_props
, hw_compat_2_9
, hw_compat_2_9_len
);
3405 DEFINE_VIRT_MACHINE(2, 9)
3407 static void virt_machine_2_8_options(MachineClass
*mc
)
3409 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3411 virt_machine_2_9_options(mc
);
3412 compat_props_add(mc
->compat_props
, hw_compat_2_8
, hw_compat_2_8_len
);
3413 /* For 2.8 and earlier we falsely claimed in the DT that
3414 * our timers were edge-triggered, not level-triggered.
3416 vmc
->claim_edge_triggered_timers
= true;
3418 DEFINE_VIRT_MACHINE(2, 8)
3420 static void virt_machine_2_7_options(MachineClass
*mc
)
3422 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3424 virt_machine_2_8_options(mc
);
3425 compat_props_add(mc
->compat_props
, hw_compat_2_7
, hw_compat_2_7_len
);
3426 /* ITS was introduced with 2.8 */
3428 /* Stick with 1K pages for migration compatibility */
3429 mc
->minimum_page_bits
= 0;
3431 DEFINE_VIRT_MACHINE(2, 7)
3433 static void virt_machine_2_6_options(MachineClass
*mc
)
3435 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3437 virt_machine_2_7_options(mc
);
3438 compat_props_add(mc
->compat_props
, hw_compat_2_6
, hw_compat_2_6_len
);
3439 vmc
->disallow_affinity_adjustment
= true;
3440 /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */
3443 DEFINE_VIRT_MACHINE(2, 6)