1 /* Support for generating ACPI tables and passing them to Guests
3 * ARM virt ACPI generation
5 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
6 * Copyright (C) 2006 Fabrice Bellard
7 * Copyright (C) 2013 Red Hat Inc
9 * Author: Michael S. Tsirkin <mst@redhat.com>
11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, see <http://www.gnu.org/licenses/>.
29 #include "qemu/osdep.h"
30 #include "qapi/error.h"
31 #include "qemu/bitmap.h"
33 #include "hw/core/cpu.h"
34 #include "target/arm/cpu.h"
35 #include "hw/acpi/acpi-defs.h"
36 #include "hw/acpi/acpi.h"
37 #include "hw/nvram/fw_cfg.h"
38 #include "hw/acpi/bios-linker-loader.h"
39 #include "hw/acpi/aml-build.h"
40 #include "hw/acpi/utils.h"
41 #include "hw/acpi/pci.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "hw/acpi/generic_event_device.h"
44 #include "hw/pci/pcie_host.h"
45 #include "hw/pci/pci.h"
46 #include "hw/arm/virt.h"
47 #include "sysemu/numa.h"
48 #include "sysemu/reset.h"
50 #include "migration/vmstate.h"
52 #define ARM_SPI_BASE 32
54 static void acpi_dsdt_add_cpus(Aml
*scope
, int smp_cpus
)
58 for (i
= 0; i
< smp_cpus
; i
++) {
59 Aml
*dev
= aml_device("C%.03X", i
);
60 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
61 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
62 aml_append(scope
, dev
);
66 static void acpi_dsdt_add_uart(Aml
*scope
, const MemMapEntry
*uart_memmap
,
69 Aml
*dev
= aml_device("COM0");
70 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0011")));
71 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
73 Aml
*crs
= aml_resource_template();
74 aml_append(crs
, aml_memory32_fixed(uart_memmap
->base
,
75 uart_memmap
->size
, AML_READ_WRITE
));
77 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
78 AML_EXCLUSIVE
, &uart_irq
, 1));
79 aml_append(dev
, aml_name_decl("_CRS", crs
));
81 aml_append(scope
, dev
);
84 static void acpi_dsdt_add_fw_cfg(Aml
*scope
, const MemMapEntry
*fw_cfg_memmap
)
86 Aml
*dev
= aml_device("FWCF");
87 aml_append(dev
, aml_name_decl("_HID", aml_string("QEMU0002")));
88 /* device present, functioning, decoding, not shown in UI */
89 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
90 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
92 Aml
*crs
= aml_resource_template();
93 aml_append(crs
, aml_memory32_fixed(fw_cfg_memmap
->base
,
94 fw_cfg_memmap
->size
, AML_READ_WRITE
));
95 aml_append(dev
, aml_name_decl("_CRS", crs
));
96 aml_append(scope
, dev
);
99 static void acpi_dsdt_add_flash(Aml
*scope
, const MemMapEntry
*flash_memmap
)
102 hwaddr base
= flash_memmap
->base
;
103 hwaddr size
= flash_memmap
->size
/ 2;
105 dev
= aml_device("FLS0");
106 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
107 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
109 crs
= aml_resource_template();
110 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
111 aml_append(dev
, aml_name_decl("_CRS", crs
));
112 aml_append(scope
, dev
);
114 dev
= aml_device("FLS1");
115 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
116 aml_append(dev
, aml_name_decl("_UID", aml_int(1)));
117 crs
= aml_resource_template();
118 aml_append(crs
, aml_memory32_fixed(base
+ size
, size
, AML_READ_WRITE
));
119 aml_append(dev
, aml_name_decl("_CRS", crs
));
120 aml_append(scope
, dev
);
123 static void acpi_dsdt_add_virtio(Aml
*scope
,
124 const MemMapEntry
*virtio_mmio_memmap
,
125 uint32_t mmio_irq
, int num
)
127 hwaddr base
= virtio_mmio_memmap
->base
;
128 hwaddr size
= virtio_mmio_memmap
->size
;
131 for (i
= 0; i
< num
; i
++) {
132 uint32_t irq
= mmio_irq
+ i
;
133 Aml
*dev
= aml_device("VR%02u", i
);
134 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0005")));
135 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
136 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
138 Aml
*crs
= aml_resource_template();
139 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
141 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
142 AML_EXCLUSIVE
, &irq
, 1));
143 aml_append(dev
, aml_name_decl("_CRS", crs
));
144 aml_append(scope
, dev
);
149 static void acpi_dsdt_add_pci(Aml
*scope
, const MemMapEntry
*memmap
,
150 uint32_t irq
, bool use_highmem
, bool highmem_ecam
)
152 int ecam_id
= VIRT_ECAM_ID(highmem_ecam
);
153 Aml
*method
, *crs
, *ifctx
, *UUID
, *ifctx1
, *elsectx
, *buf
;
155 hwaddr base_mmio
= memmap
[VIRT_PCIE_MMIO
].base
;
156 hwaddr size_mmio
= memmap
[VIRT_PCIE_MMIO
].size
;
157 hwaddr base_pio
= memmap
[VIRT_PCIE_PIO
].base
;
158 hwaddr size_pio
= memmap
[VIRT_PCIE_PIO
].size
;
159 hwaddr base_ecam
= memmap
[ecam_id
].base
;
160 hwaddr size_ecam
= memmap
[ecam_id
].size
;
161 int nr_pcie_buses
= size_ecam
/ PCIE_MMCFG_SIZE_MIN
;
163 Aml
*dev
= aml_device("%s", "PCI0");
164 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0A08")));
165 aml_append(dev
, aml_name_decl("_CID", aml_string("PNP0A03")));
166 aml_append(dev
, aml_name_decl("_SEG", aml_int(0)));
167 aml_append(dev
, aml_name_decl("_BBN", aml_int(0)));
168 aml_append(dev
, aml_name_decl("_UID", aml_string("PCI0")));
169 aml_append(dev
, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
170 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
172 /* Declare the PCI Routing Table. */
173 Aml
*rt_pkg
= aml_varpackage(PCI_SLOT_MAX
* PCI_NUM_PINS
);
174 for (slot_no
= 0; slot_no
< PCI_SLOT_MAX
; slot_no
++) {
175 for (i
= 0; i
< PCI_NUM_PINS
; i
++) {
176 int gsi
= (i
+ slot_no
) % PCI_NUM_PINS
;
177 Aml
*pkg
= aml_package(4);
178 aml_append(pkg
, aml_int((slot_no
<< 16) | 0xFFFF));
179 aml_append(pkg
, aml_int(i
));
180 aml_append(pkg
, aml_name("GSI%d", gsi
));
181 aml_append(pkg
, aml_int(0));
182 aml_append(rt_pkg
, pkg
);
185 aml_append(dev
, aml_name_decl("_PRT", rt_pkg
));
187 /* Create GSI link device */
188 for (i
= 0; i
< PCI_NUM_PINS
; i
++) {
189 uint32_t irqs
= irq
+ i
;
190 Aml
*dev_gsi
= aml_device("GSI%d", i
);
191 aml_append(dev_gsi
, aml_name_decl("_HID", aml_string("PNP0C0F")));
192 aml_append(dev_gsi
, aml_name_decl("_UID", aml_int(i
)));
193 crs
= aml_resource_template();
195 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
196 AML_EXCLUSIVE
, &irqs
, 1));
197 aml_append(dev_gsi
, aml_name_decl("_PRS", crs
));
198 crs
= aml_resource_template();
200 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
201 AML_EXCLUSIVE
, &irqs
, 1));
202 aml_append(dev_gsi
, aml_name_decl("_CRS", crs
));
203 method
= aml_method("_SRS", 1, AML_NOTSERIALIZED
);
204 aml_append(dev_gsi
, method
);
205 aml_append(dev
, dev_gsi
);
208 method
= aml_method("_CBA", 0, AML_NOTSERIALIZED
);
209 aml_append(method
, aml_return(aml_int(base_ecam
)));
210 aml_append(dev
, method
);
212 method
= aml_method("_CRS", 0, AML_NOTSERIALIZED
);
213 Aml
*rbuf
= aml_resource_template();
215 aml_word_bus_number(AML_MIN_FIXED
, AML_MAX_FIXED
, AML_POS_DECODE
,
216 0x0000, 0x0000, nr_pcie_buses
- 1, 0x0000,
219 aml_dword_memory(AML_POS_DECODE
, AML_MIN_FIXED
, AML_MAX_FIXED
,
220 AML_NON_CACHEABLE
, AML_READ_WRITE
, 0x0000, base_mmio
,
221 base_mmio
+ size_mmio
- 1, 0x0000, size_mmio
));
223 aml_dword_io(AML_MIN_FIXED
, AML_MAX_FIXED
, AML_POS_DECODE
,
224 AML_ENTIRE_RANGE
, 0x0000, 0x0000, size_pio
- 1, base_pio
,
228 hwaddr base_mmio_high
= memmap
[VIRT_HIGH_PCIE_MMIO
].base
;
229 hwaddr size_mmio_high
= memmap
[VIRT_HIGH_PCIE_MMIO
].size
;
232 aml_qword_memory(AML_POS_DECODE
, AML_MIN_FIXED
, AML_MAX_FIXED
,
233 AML_NON_CACHEABLE
, AML_READ_WRITE
, 0x0000,
235 base_mmio_high
+ size_mmio_high
- 1, 0x0000,
239 aml_append(method
, aml_return(rbuf
));
240 aml_append(dev
, method
);
242 /* Declare an _OSC (OS Control Handoff) method */
243 aml_append(dev
, aml_name_decl("SUPP", aml_int(0)));
244 aml_append(dev
, aml_name_decl("CTRL", aml_int(0)));
245 method
= aml_method("_OSC", 4, AML_NOTSERIALIZED
);
247 aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
249 /* PCI Firmware Specification 3.0
250 * 4.5.1. _OSC Interface for PCI Host Bridge Devices
251 * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
252 * identified by the Universal Unique IDentifier (UUID)
253 * 33DB4D5B-1FF7-401C-9657-7441C03DD766
255 UUID
= aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
256 ifctx
= aml_if(aml_equal(aml_arg(0), UUID
));
258 aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
260 aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
261 aml_append(ifctx
, aml_store(aml_name("CDW2"), aml_name("SUPP")));
262 aml_append(ifctx
, aml_store(aml_name("CDW3"), aml_name("CTRL")));
265 * Allow OS control for all 5 features:
266 * PCIeHotplug SHPCHotplug PME AER PCIeCapability.
268 aml_append(ifctx
, aml_and(aml_name("CTRL"), aml_int(0x1F),
271 ifctx1
= aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
272 aml_append(ifctx1
, aml_or(aml_name("CDW1"), aml_int(0x08),
274 aml_append(ifctx
, ifctx1
);
276 ifctx1
= aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
277 aml_append(ifctx1
, aml_or(aml_name("CDW1"), aml_int(0x10),
279 aml_append(ifctx
, ifctx1
);
281 aml_append(ifctx
, aml_store(aml_name("CTRL"), aml_name("CDW3")));
282 aml_append(ifctx
, aml_return(aml_arg(3)));
283 aml_append(method
, ifctx
);
285 elsectx
= aml_else();
286 aml_append(elsectx
, aml_or(aml_name("CDW1"), aml_int(4),
288 aml_append(elsectx
, aml_return(aml_arg(3)));
289 aml_append(method
, elsectx
);
290 aml_append(dev
, method
);
292 method
= aml_method("_DSM", 4, AML_NOTSERIALIZED
);
294 /* PCI Firmware Specification 3.0
295 * 4.6.1. _DSM for PCI Express Slot Information
296 * The UUID in _DSM in this context is
297 * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
299 UUID
= aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
300 ifctx
= aml_if(aml_equal(aml_arg(0), UUID
));
301 ifctx1
= aml_if(aml_equal(aml_arg(2), aml_int(0)));
302 uint8_t byte_list
[1] = {1};
303 buf
= aml_buffer(1, byte_list
);
304 aml_append(ifctx1
, aml_return(buf
));
305 aml_append(ifctx
, ifctx1
);
306 aml_append(method
, ifctx
);
309 buf
= aml_buffer(1, byte_list
);
310 aml_append(method
, aml_return(buf
));
311 aml_append(dev
, method
);
313 Aml
*dev_res0
= aml_device("%s", "RES0");
314 aml_append(dev_res0
, aml_name_decl("_HID", aml_string("PNP0C02")));
315 crs
= aml_resource_template();
317 aml_qword_memory(AML_POS_DECODE
, AML_MIN_FIXED
, AML_MAX_FIXED
,
318 AML_NON_CACHEABLE
, AML_READ_WRITE
, 0x0000, base_ecam
,
319 base_ecam
+ size_ecam
- 1, 0x0000, size_ecam
));
320 aml_append(dev_res0
, aml_name_decl("_CRS", crs
));
321 aml_append(dev
, dev_res0
);
322 aml_append(scope
, dev
);
325 static void acpi_dsdt_add_gpio(Aml
*scope
, const MemMapEntry
*gpio_memmap
,
328 Aml
*dev
= aml_device("GPO0");
329 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0061")));
330 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
332 Aml
*crs
= aml_resource_template();
333 aml_append(crs
, aml_memory32_fixed(gpio_memmap
->base
, gpio_memmap
->size
,
335 aml_append(crs
, aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
336 AML_EXCLUSIVE
, &gpio_irq
, 1));
337 aml_append(dev
, aml_name_decl("_CRS", crs
));
339 Aml
*aei
= aml_resource_template();
340 /* Pin 3 for power button */
341 const uint32_t pin_list
[1] = {3};
342 aml_append(aei
, aml_gpio_int(AML_CONSUMER
, AML_EDGE
, AML_ACTIVE_HIGH
,
343 AML_EXCLUSIVE
, AML_PULL_UP
, 0, pin_list
, 1,
345 aml_append(dev
, aml_name_decl("_AEI", aei
));
347 /* _E03 is handle for power button */
348 Aml
*method
= aml_method("_E03", 0, AML_NOTSERIALIZED
);
349 aml_append(method
, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE
),
351 aml_append(dev
, method
);
352 aml_append(scope
, dev
);
355 static void acpi_dsdt_add_power_button(Aml
*scope
)
357 Aml
*dev
= aml_device(ACPI_POWER_BUTTON_DEVICE
);
358 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0C0C")));
359 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
360 aml_append(scope
, dev
);
364 build_iort(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
366 int nb_nodes
, iort_start
= table_data
->len
;
367 AcpiIortIdMapping
*idmap
;
368 AcpiIortItsGroup
*its
;
371 size_t node_size
, iort_node_offset
, iort_length
, smmu_offset
= 0;
374 iort
= acpi_data_push(table_data
, sizeof(*iort
));
376 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
377 nb_nodes
= 3; /* RC, ITS, SMMUv3 */
379 nb_nodes
= 2; /* RC, ITS */
382 iort_length
= sizeof(*iort
);
383 iort
->node_count
= cpu_to_le32(nb_nodes
);
385 * Use a copy in case table_data->data moves during acpi_data_push
388 iort_node_offset
= sizeof(*iort
);
389 iort
->node_offset
= cpu_to_le32(iort_node_offset
);
392 node_size
= sizeof(*its
) + sizeof(uint32_t);
393 iort_length
+= node_size
;
394 its
= acpi_data_push(table_data
, node_size
);
396 its
->type
= ACPI_IORT_NODE_ITS_GROUP
;
397 its
->length
= cpu_to_le16(node_size
);
398 its
->its_count
= cpu_to_le32(1);
399 its
->identifiers
[0] = 0; /* MADT translation_id */
401 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
402 int irq
= vms
->irqmap
[VIRT_SMMU
] + ARM_SPI_BASE
;
405 smmu_offset
= iort_node_offset
+ node_size
;
406 node_size
= sizeof(*smmu
) + sizeof(*idmap
);
407 iort_length
+= node_size
;
408 smmu
= acpi_data_push(table_data
, node_size
);
410 smmu
->type
= ACPI_IORT_NODE_SMMU_V3
;
411 smmu
->length
= cpu_to_le16(node_size
);
412 smmu
->mapping_count
= cpu_to_le32(1);
413 smmu
->mapping_offset
= cpu_to_le32(sizeof(*smmu
));
414 smmu
->base_address
= cpu_to_le64(vms
->memmap
[VIRT_SMMU
].base
);
415 smmu
->flags
= cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
);
416 smmu
->event_gsiv
= cpu_to_le32(irq
);
417 smmu
->pri_gsiv
= cpu_to_le32(irq
+ 1);
418 smmu
->gerr_gsiv
= cpu_to_le32(irq
+ 2);
419 smmu
->sync_gsiv
= cpu_to_le32(irq
+ 3);
421 /* Identity RID mapping covering the whole input RID range */
422 idmap
= &smmu
->id_mapping_array
[0];
423 idmap
->input_base
= 0;
424 idmap
->id_count
= cpu_to_le32(0xFFFF);
425 idmap
->output_base
= 0;
426 /* output IORT node is the ITS group node (the first node) */
427 idmap
->output_reference
= cpu_to_le32(iort_node_offset
);
430 /* Root Complex Node */
431 node_size
= sizeof(*rc
) + sizeof(*idmap
);
432 iort_length
+= node_size
;
433 rc
= acpi_data_push(table_data
, node_size
);
435 rc
->type
= ACPI_IORT_NODE_PCI_ROOT_COMPLEX
;
436 rc
->length
= cpu_to_le16(node_size
);
437 rc
->mapping_count
= cpu_to_le32(1);
438 rc
->mapping_offset
= cpu_to_le32(sizeof(*rc
));
440 /* fully coherent device */
441 rc
->memory_properties
.cache_coherency
= cpu_to_le32(1);
442 rc
->memory_properties
.memory_flags
= 0x3; /* CCA = CPM = DCAS = 1 */
443 rc
->pci_segment_number
= 0; /* MCFG pci_segment */
445 /* Identity RID mapping covering the whole input RID range */
446 idmap
= &rc
->id_mapping_array
[0];
447 idmap
->input_base
= 0;
448 idmap
->id_count
= cpu_to_le32(0xFFFF);
449 idmap
->output_base
= 0;
451 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
452 /* output IORT node is the smmuv3 node */
453 idmap
->output_reference
= cpu_to_le32(smmu_offset
);
455 /* output IORT node is the ITS group node (the first node) */
456 idmap
->output_reference
= cpu_to_le32(iort_node_offset
);
460 * Update the pointer address in case table_data->data moves during above
461 * acpi_data_push operations.
463 iort
= (AcpiIortTable
*)(table_data
->data
+ iort_start
);
464 iort
->length
= cpu_to_le32(iort_length
);
466 build_header(linker
, table_data
, (void *)(table_data
->data
+ iort_start
),
467 "IORT", table_data
->len
- iort_start
, 0, NULL
, NULL
);
471 build_spcr(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
473 AcpiSerialPortConsoleRedirection
*spcr
;
474 const MemMapEntry
*uart_memmap
= &vms
->memmap
[VIRT_UART
];
475 int irq
= vms
->irqmap
[VIRT_UART
] + ARM_SPI_BASE
;
476 int spcr_start
= table_data
->len
;
478 spcr
= acpi_data_push(table_data
, sizeof(*spcr
));
480 spcr
->interface_type
= 0x3; /* ARM PL011 UART */
482 spcr
->base_address
.space_id
= AML_SYSTEM_MEMORY
;
483 spcr
->base_address
.bit_width
= 8;
484 spcr
->base_address
.bit_offset
= 0;
485 spcr
->base_address
.access_width
= 1;
486 spcr
->base_address
.address
= cpu_to_le64(uart_memmap
->base
);
488 spcr
->interrupt_types
= (1 << 3); /* Bit[3] ARMH GIC interrupt */
489 spcr
->gsi
= cpu_to_le32(irq
); /* Global System Interrupt */
491 spcr
->baud
= 3; /* Baud Rate: 3 = 9600 */
492 spcr
->parity
= 0; /* No Parity */
493 spcr
->stopbits
= 1; /* 1 Stop bit */
494 spcr
->flowctrl
= (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */
495 spcr
->term_type
= 0; /* Terminal Type: 0 = VT100 */
497 spcr
->pci_device_id
= 0xffff; /* PCI Device ID: not a PCI device */
498 spcr
->pci_vendor_id
= 0xffff; /* PCI Vendor ID: not a PCI device */
500 build_header(linker
, table_data
, (void *)(table_data
->data
+ spcr_start
),
501 "SPCR", table_data
->len
- spcr_start
, 2, NULL
, NULL
);
505 build_srat(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
507 AcpiSystemResourceAffinityTable
*srat
;
508 AcpiSratProcessorGiccAffinity
*core
;
509 AcpiSratMemoryAffinity
*numamem
;
512 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
513 MachineState
*ms
= MACHINE(vms
);
514 const CPUArchIdList
*cpu_list
= mc
->possible_cpu_arch_ids(ms
);
516 srat_start
= table_data
->len
;
517 srat
= acpi_data_push(table_data
, sizeof(*srat
));
518 srat
->reserved1
= cpu_to_le32(1);
520 for (i
= 0; i
< cpu_list
->len
; ++i
) {
521 core
= acpi_data_push(table_data
, sizeof(*core
));
522 core
->type
= ACPI_SRAT_PROCESSOR_GICC
;
523 core
->length
= sizeof(*core
);
524 core
->proximity
= cpu_to_le32(cpu_list
->cpus
[i
].props
.node_id
);
525 core
->acpi_processor_uid
= cpu_to_le32(i
);
526 core
->flags
= cpu_to_le32(1);
529 mem_base
= vms
->memmap
[VIRT_MEM
].base
;
530 for (i
= 0; i
< ms
->numa_state
->num_nodes
; ++i
) {
531 if (ms
->numa_state
->nodes
[i
].node_mem
> 0) {
532 numamem
= acpi_data_push(table_data
, sizeof(*numamem
));
533 build_srat_memory(numamem
, mem_base
,
534 ms
->numa_state
->nodes
[i
].node_mem
, i
,
535 MEM_AFFINITY_ENABLED
);
536 mem_base
+= ms
->numa_state
->nodes
[i
].node_mem
;
540 if (ms
->device_memory
) {
541 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
542 build_srat_memory(numamem
, ms
->device_memory
->base
,
543 memory_region_size(&ms
->device_memory
->mr
),
544 ms
->numa_state
->num_nodes
- 1,
545 MEM_AFFINITY_HOTPLUGGABLE
| MEM_AFFINITY_ENABLED
);
548 build_header(linker
, table_data
, (void *)(table_data
->data
+ srat_start
),
549 "SRAT", table_data
->len
- srat_start
, 3, NULL
, NULL
);
554 build_gtdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
556 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
557 int gtdt_start
= table_data
->len
;
558 AcpiGenericTimerTable
*gtdt
;
561 if (vmc
->claim_edge_triggered_timers
) {
562 irqflags
= ACPI_GTDT_INTERRUPT_MODE_EDGE
;
564 irqflags
= ACPI_GTDT_INTERRUPT_MODE_LEVEL
;
567 gtdt
= acpi_data_push(table_data
, sizeof *gtdt
);
568 /* The interrupt values are the same with the device tree when adding 16 */
569 gtdt
->secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_S_EL1_IRQ
+ 16);
570 gtdt
->secure_el1_flags
= cpu_to_le32(irqflags
);
572 gtdt
->non_secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ
+ 16);
573 gtdt
->non_secure_el1_flags
= cpu_to_le32(irqflags
|
574 ACPI_GTDT_CAP_ALWAYS_ON
);
576 gtdt
->virtual_timer_interrupt
= cpu_to_le32(ARCH_TIMER_VIRT_IRQ
+ 16);
577 gtdt
->virtual_timer_flags
= cpu_to_le32(irqflags
);
579 gtdt
->non_secure_el2_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ
+ 16);
580 gtdt
->non_secure_el2_flags
= cpu_to_le32(irqflags
);
582 build_header(linker
, table_data
,
583 (void *)(table_data
->data
+ gtdt_start
), "GTDT",
584 table_data
->len
- gtdt_start
, 2, NULL
, NULL
);
589 build_madt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
591 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
592 int madt_start
= table_data
->len
;
593 const MemMapEntry
*memmap
= vms
->memmap
;
594 const int *irqmap
= vms
->irqmap
;
595 AcpiMultipleApicTable
*madt
;
596 AcpiMadtGenericDistributor
*gicd
;
597 AcpiMadtGenericMsiFrame
*gic_msi
;
600 madt
= acpi_data_push(table_data
, sizeof *madt
);
602 gicd
= acpi_data_push(table_data
, sizeof *gicd
);
603 gicd
->type
= ACPI_APIC_GENERIC_DISTRIBUTOR
;
604 gicd
->length
= sizeof(*gicd
);
605 gicd
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_DIST
].base
);
606 gicd
->version
= vms
->gic_version
;
608 for (i
= 0; i
< vms
->smp_cpus
; i
++) {
609 AcpiMadtGenericCpuInterface
*gicc
= acpi_data_push(table_data
,
611 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(i
));
613 gicc
->type
= ACPI_APIC_GENERIC_CPU_INTERFACE
;
614 gicc
->length
= sizeof(*gicc
);
615 if (vms
->gic_version
== 2) {
616 gicc
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_CPU
].base
);
617 gicc
->gich_base_address
= cpu_to_le64(memmap
[VIRT_GIC_HYP
].base
);
618 gicc
->gicv_base_address
= cpu_to_le64(memmap
[VIRT_GIC_VCPU
].base
);
620 gicc
->cpu_interface_number
= cpu_to_le32(i
);
621 gicc
->arm_mpidr
= cpu_to_le64(armcpu
->mp_affinity
);
622 gicc
->uid
= cpu_to_le32(i
);
623 gicc
->flags
= cpu_to_le32(ACPI_MADT_GICC_ENABLED
);
625 if (arm_feature(&armcpu
->env
, ARM_FEATURE_PMU
)) {
626 gicc
->performance_interrupt
= cpu_to_le32(PPI(VIRTUAL_PMU_IRQ
));
629 gicc
->vgic_interrupt
= cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ
));
633 if (vms
->gic_version
== 3) {
634 AcpiMadtGenericTranslator
*gic_its
;
635 int nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
636 AcpiMadtGenericRedistributor
*gicr
= acpi_data_push(table_data
,
639 gicr
->type
= ACPI_APIC_GENERIC_REDISTRIBUTOR
;
640 gicr
->length
= sizeof(*gicr
);
641 gicr
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_REDIST
].base
);
642 gicr
->range_length
= cpu_to_le32(memmap
[VIRT_GIC_REDIST
].size
);
644 if (nb_redist_regions
== 2) {
645 gicr
= acpi_data_push(table_data
, sizeof(*gicr
));
646 gicr
->type
= ACPI_APIC_GENERIC_REDISTRIBUTOR
;
647 gicr
->length
= sizeof(*gicr
);
649 cpu_to_le64(memmap
[VIRT_HIGH_GIC_REDIST2
].base
);
651 cpu_to_le32(memmap
[VIRT_HIGH_GIC_REDIST2
].size
);
654 if (its_class_name() && !vmc
->no_its
) {
655 gic_its
= acpi_data_push(table_data
, sizeof *gic_its
);
656 gic_its
->type
= ACPI_APIC_GENERIC_TRANSLATOR
;
657 gic_its
->length
= sizeof(*gic_its
);
658 gic_its
->translation_id
= 0;
659 gic_its
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_ITS
].base
);
662 gic_msi
= acpi_data_push(table_data
, sizeof *gic_msi
);
663 gic_msi
->type
= ACPI_APIC_GENERIC_MSI_FRAME
;
664 gic_msi
->length
= sizeof(*gic_msi
);
665 gic_msi
->gic_msi_frame_id
= 0;
666 gic_msi
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_V2M
].base
);
667 gic_msi
->flags
= cpu_to_le32(1);
668 gic_msi
->spi_count
= cpu_to_le16(NUM_GICV2M_SPIS
);
669 gic_msi
->spi_base
= cpu_to_le16(irqmap
[VIRT_GIC_V2M
] + ARM_SPI_BASE
);
672 build_header(linker
, table_data
,
673 (void *)(table_data
->data
+ madt_start
), "APIC",
674 table_data
->len
- madt_start
, 3, NULL
, NULL
);
678 static void build_fadt_rev5(GArray
*table_data
, BIOSLinker
*linker
,
679 VirtMachineState
*vms
, unsigned dsdt_tbl_offset
)
682 AcpiFadtData fadt
= {
685 .flags
= 1 << ACPI_FADT_F_HW_REDUCED_ACPI
,
686 .xdsdt_tbl_offset
= &dsdt_tbl_offset
,
689 switch (vms
->psci_conduit
) {
690 case QEMU_PSCI_CONDUIT_DISABLED
:
691 fadt
.arm_boot_arch
= 0;
693 case QEMU_PSCI_CONDUIT_HVC
:
694 fadt
.arm_boot_arch
= ACPI_FADT_ARM_PSCI_COMPLIANT
|
695 ACPI_FADT_ARM_PSCI_USE_HVC
;
697 case QEMU_PSCI_CONDUIT_SMC
:
698 fadt
.arm_boot_arch
= ACPI_FADT_ARM_PSCI_COMPLIANT
;
701 g_assert_not_reached();
704 build_fadt(table_data
, linker
, &fadt
, NULL
, NULL
);
709 build_dsdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
712 MachineState
*ms
= MACHINE(vms
);
713 const MemMapEntry
*memmap
= vms
->memmap
;
714 const int *irqmap
= vms
->irqmap
;
716 dsdt
= init_aml_allocator();
717 /* Reserve space for header */
718 acpi_data_push(dsdt
->buf
, sizeof(AcpiTableHeader
));
720 /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
721 * While UEFI can use libfdt to disable the RTC device node in the DTB that
722 * it passes to the OS, it cannot modify AML. Therefore, we won't generate
723 * the RTC ACPI device at all when using UEFI.
725 scope
= aml_scope("\\_SB");
726 acpi_dsdt_add_cpus(scope
, vms
->smp_cpus
);
727 acpi_dsdt_add_uart(scope
, &memmap
[VIRT_UART
],
728 (irqmap
[VIRT_UART
] + ARM_SPI_BASE
));
729 acpi_dsdt_add_flash(scope
, &memmap
[VIRT_FLASH
]);
730 acpi_dsdt_add_fw_cfg(scope
, &memmap
[VIRT_FW_CFG
]);
731 acpi_dsdt_add_virtio(scope
, &memmap
[VIRT_MMIO
],
732 (irqmap
[VIRT_MMIO
] + ARM_SPI_BASE
), NUM_VIRTIO_TRANSPORTS
);
733 acpi_dsdt_add_pci(scope
, memmap
, (irqmap
[VIRT_PCIE
] + ARM_SPI_BASE
),
734 vms
->highmem
, vms
->highmem_ecam
);
736 build_ged_aml(scope
, "\\_SB."GED_DEVICE
,
737 HOTPLUG_HANDLER(vms
->acpi_dev
),
738 irqmap
[VIRT_ACPI_GED
] + ARM_SPI_BASE
, AML_SYSTEM_MEMORY
,
739 memmap
[VIRT_ACPI_GED
].base
);
741 acpi_dsdt_add_gpio(scope
, &memmap
[VIRT_GPIO
],
742 (irqmap
[VIRT_GPIO
] + ARM_SPI_BASE
));
746 uint32_t event
= object_property_get_uint(OBJECT(vms
->acpi_dev
),
747 "ged-event", &error_abort
);
749 if (event
& ACPI_GED_MEM_HOTPLUG_EVT
) {
750 build_memory_hotplug_aml(scope
, ms
->ram_slots
, "\\_SB", NULL
,
752 memmap
[VIRT_PCDIMM_ACPI
].base
);
756 acpi_dsdt_add_power_button(scope
);
758 aml_append(dsdt
, scope
);
760 /* copy AML table into ACPI tables blob and patch header there */
761 g_array_append_vals(table_data
, dsdt
->buf
->data
, dsdt
->buf
->len
);
762 build_header(linker
, table_data
,
763 (void *)(table_data
->data
+ table_data
->len
- dsdt
->buf
->len
),
764 "DSDT", dsdt
->buf
->len
, 2, NULL
, NULL
);
765 free_aml_allocator();
769 struct AcpiBuildState
{
770 /* Copy of table in RAM (for patching). */
771 MemoryRegion
*table_mr
;
772 MemoryRegion
*rsdp_mr
;
773 MemoryRegion
*linker_mr
;
774 /* Is table patched? */
779 void virt_acpi_build(VirtMachineState
*vms
, AcpiBuildTables
*tables
)
781 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
782 GArray
*table_offsets
;
784 GArray
*tables_blob
= tables
->table_data
;
785 MachineState
*ms
= MACHINE(vms
);
787 table_offsets
= g_array_new(false, true /* clear */,
790 bios_linker_loader_alloc(tables
->linker
,
791 ACPI_BUILD_TABLE_FILE
, tables_blob
,
792 64, false /* high memory */);
794 /* DSDT is pointed to by FADT */
795 dsdt
= tables_blob
->len
;
796 build_dsdt(tables_blob
, tables
->linker
, vms
);
798 /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
799 acpi_add_table(table_offsets
, tables_blob
);
800 build_fadt_rev5(tables_blob
, tables
->linker
, vms
, dsdt
);
802 acpi_add_table(table_offsets
, tables_blob
);
803 build_madt(tables_blob
, tables
->linker
, vms
);
805 acpi_add_table(table_offsets
, tables_blob
);
806 build_gtdt(tables_blob
, tables
->linker
, vms
);
808 acpi_add_table(table_offsets
, tables_blob
);
810 AcpiMcfgInfo mcfg
= {
811 .base
= vms
->memmap
[VIRT_ECAM_ID(vms
->highmem_ecam
)].base
,
812 .size
= vms
->memmap
[VIRT_ECAM_ID(vms
->highmem_ecam
)].size
,
814 build_mcfg(tables_blob
, tables
->linker
, &mcfg
);
817 acpi_add_table(table_offsets
, tables_blob
);
818 build_spcr(tables_blob
, tables
->linker
, vms
);
820 if (ms
->numa_state
->num_nodes
> 0) {
821 acpi_add_table(table_offsets
, tables_blob
);
822 build_srat(tables_blob
, tables
->linker
, vms
);
823 if (ms
->numa_state
->have_numa_distance
) {
824 acpi_add_table(table_offsets
, tables_blob
);
825 build_slit(tables_blob
, tables
->linker
, ms
);
829 if (its_class_name() && !vmc
->no_its
) {
830 acpi_add_table(table_offsets
, tables_blob
);
831 build_iort(tables_blob
, tables
->linker
, vms
);
834 /* XSDT is pointed to by RSDP */
835 xsdt
= tables_blob
->len
;
836 build_xsdt(tables_blob
, tables
->linker
, table_offsets
, NULL
, NULL
);
838 /* RSDP is in FSEG memory, so allocate it separately */
840 AcpiRsdpData rsdp_data
= {
842 .oem_id
= ACPI_BUILD_APPNAME6
,
843 .xsdt_tbl_offset
= &xsdt
,
844 .rsdt_tbl_offset
= NULL
,
846 build_rsdp(tables
->rsdp
, tables
->linker
, &rsdp_data
);
849 /* Cleanup memory that's no longer used. */
850 g_array_free(table_offsets
, true);
853 static void acpi_ram_update(MemoryRegion
*mr
, GArray
*data
)
855 uint32_t size
= acpi_data_len(data
);
857 /* Make sure RAM size is correct - in case it got changed
858 * e.g. by migration */
859 memory_region_ram_resize(mr
, size
, &error_abort
);
861 memcpy(memory_region_get_ram_ptr(mr
), data
->data
, size
);
862 memory_region_set_dirty(mr
, 0, size
);
865 static void virt_acpi_build_update(void *build_opaque
)
867 AcpiBuildState
*build_state
= build_opaque
;
868 AcpiBuildTables tables
;
870 /* No state to update or already patched? Nothing to do. */
871 if (!build_state
|| build_state
->patched
) {
874 build_state
->patched
= true;
876 acpi_build_tables_init(&tables
);
878 virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables
);
880 acpi_ram_update(build_state
->table_mr
, tables
.table_data
);
881 acpi_ram_update(build_state
->rsdp_mr
, tables
.rsdp
);
882 acpi_ram_update(build_state
->linker_mr
, tables
.linker
->cmd_blob
);
884 acpi_build_tables_cleanup(&tables
, true);
887 static void virt_acpi_build_reset(void *build_opaque
)
889 AcpiBuildState
*build_state
= build_opaque
;
890 build_state
->patched
= false;
893 static const VMStateDescription vmstate_virt_acpi_build
= {
894 .name
= "virt_acpi_build",
896 .minimum_version_id
= 1,
897 .fields
= (VMStateField
[]) {
898 VMSTATE_BOOL(patched
, AcpiBuildState
),
899 VMSTATE_END_OF_LIST()
903 void virt_acpi_setup(VirtMachineState
*vms
)
905 AcpiBuildTables tables
;
906 AcpiBuildState
*build_state
;
909 trace_virt_acpi_setup();
914 trace_virt_acpi_setup();
918 build_state
= g_malloc0(sizeof *build_state
);
920 acpi_build_tables_init(&tables
);
921 virt_acpi_build(vms
, &tables
);
923 /* Now expose it all to Guest */
924 build_state
->table_mr
= acpi_add_rom_blob(virt_acpi_build_update
,
925 build_state
, tables
.table_data
,
926 ACPI_BUILD_TABLE_FILE
,
927 ACPI_BUILD_TABLE_MAX_SIZE
);
928 assert(build_state
->table_mr
!= NULL
);
930 build_state
->linker_mr
=
931 acpi_add_rom_blob(virt_acpi_build_update
, build_state
,
932 tables
.linker
->cmd_blob
, "etc/table-loader", 0);
934 fw_cfg_add_file(vms
->fw_cfg
, ACPI_BUILD_TPMLOG_FILE
, tables
.tcpalog
->data
,
935 acpi_data_len(tables
.tcpalog
));
937 build_state
->rsdp_mr
= acpi_add_rom_blob(virt_acpi_build_update
,
938 build_state
, tables
.rsdp
,
939 ACPI_BUILD_RSDP_FILE
, 0);
941 qemu_register_reset(virt_acpi_build_reset
, build_state
);
942 virt_acpi_build_reset(build_state
);
943 vmstate_register(NULL
, 0, &vmstate_virt_acpi_build
, build_state
);
945 /* Cleanup tables but don't free the memory: we track it
948 acpi_build_tables_cleanup(&tables
, false);