1 /* Support for generating ACPI tables and passing them to Guests
3 * ARM virt ACPI generation
5 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
6 * Copyright (C) 2006 Fabrice Bellard
7 * Copyright (C) 2013 Red Hat Inc
9 * Author: Michael S. Tsirkin <mst@redhat.com>
11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, see <http://www.gnu.org/licenses/>.
29 #include "qemu/osdep.h"
30 #include "qapi/error.h"
31 #include "qemu-common.h"
32 #include "qemu/bitmap.h"
35 #include "target/arm/cpu.h"
36 #include "hw/acpi/acpi-defs.h"
37 #include "hw/acpi/acpi.h"
38 #include "hw/nvram/fw_cfg.h"
39 #include "hw/acpi/bios-linker-loader.h"
40 #include "hw/loader.h"
42 #include "hw/acpi/aml-build.h"
43 #include "hw/pci/pcie_host.h"
44 #include "hw/pci/pci.h"
45 #include "hw/arm/virt.h"
46 #include "sysemu/numa.h"
49 #define ARM_SPI_BASE 32
50 #define ACPI_POWER_BUTTON_DEVICE "PWRB"
52 static void acpi_dsdt_add_cpus(Aml
*scope
, int smp_cpus
)
56 for (i
= 0; i
< smp_cpus
; i
++) {
57 Aml
*dev
= aml_device("C%.03X", i
);
58 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
59 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
60 aml_append(scope
, dev
);
64 static void acpi_dsdt_add_uart(Aml
*scope
, const MemMapEntry
*uart_memmap
,
67 Aml
*dev
= aml_device("COM0");
68 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0011")));
69 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
71 Aml
*crs
= aml_resource_template();
72 aml_append(crs
, aml_memory32_fixed(uart_memmap
->base
,
73 uart_memmap
->size
, AML_READ_WRITE
));
75 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
76 AML_EXCLUSIVE
, &uart_irq
, 1));
77 aml_append(dev
, aml_name_decl("_CRS", crs
));
79 /* The _ADR entry is used to link this device to the UART described
80 * in the SPCR table, i.e. SPCR.base_address.address == _ADR.
82 aml_append(dev
, aml_name_decl("_ADR", aml_int(uart_memmap
->base
)));
84 aml_append(scope
, dev
);
87 static void acpi_dsdt_add_fw_cfg(Aml
*scope
, const MemMapEntry
*fw_cfg_memmap
)
89 Aml
*dev
= aml_device("FWCF");
90 aml_append(dev
, aml_name_decl("_HID", aml_string("QEMU0002")));
91 /* device present, functioning, decoding, not shown in UI */
92 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
93 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
95 Aml
*crs
= aml_resource_template();
96 aml_append(crs
, aml_memory32_fixed(fw_cfg_memmap
->base
,
97 fw_cfg_memmap
->size
, AML_READ_WRITE
));
98 aml_append(dev
, aml_name_decl("_CRS", crs
));
99 aml_append(scope
, dev
);
102 static void acpi_dsdt_add_flash(Aml
*scope
, const MemMapEntry
*flash_memmap
)
105 hwaddr base
= flash_memmap
->base
;
106 hwaddr size
= flash_memmap
->size
/ 2;
108 dev
= aml_device("FLS0");
109 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
110 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
112 crs
= aml_resource_template();
113 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
114 aml_append(dev
, aml_name_decl("_CRS", crs
));
115 aml_append(scope
, dev
);
117 dev
= aml_device("FLS1");
118 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
119 aml_append(dev
, aml_name_decl("_UID", aml_int(1)));
120 crs
= aml_resource_template();
121 aml_append(crs
, aml_memory32_fixed(base
+ size
, size
, AML_READ_WRITE
));
122 aml_append(dev
, aml_name_decl("_CRS", crs
));
123 aml_append(scope
, dev
);
126 static void acpi_dsdt_add_virtio(Aml
*scope
,
127 const MemMapEntry
*virtio_mmio_memmap
,
128 uint32_t mmio_irq
, int num
)
130 hwaddr base
= virtio_mmio_memmap
->base
;
131 hwaddr size
= virtio_mmio_memmap
->size
;
134 for (i
= 0; i
< num
; i
++) {
135 uint32_t irq
= mmio_irq
+ i
;
136 Aml
*dev
= aml_device("VR%02u", i
);
137 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0005")));
138 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
139 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
141 Aml
*crs
= aml_resource_template();
142 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
144 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
145 AML_EXCLUSIVE
, &irq
, 1));
146 aml_append(dev
, aml_name_decl("_CRS", crs
));
147 aml_append(scope
, dev
);
152 static void acpi_dsdt_add_pci(Aml
*scope
, const MemMapEntry
*memmap
,
153 uint32_t irq
, bool use_highmem
)
155 Aml
*method
, *crs
, *ifctx
, *UUID
, *ifctx1
, *elsectx
, *buf
;
157 hwaddr base_mmio
= memmap
[VIRT_PCIE_MMIO
].base
;
158 hwaddr size_mmio
= memmap
[VIRT_PCIE_MMIO
].size
;
159 hwaddr base_pio
= memmap
[VIRT_PCIE_PIO
].base
;
160 hwaddr size_pio
= memmap
[VIRT_PCIE_PIO
].size
;
161 hwaddr base_ecam
= memmap
[VIRT_PCIE_ECAM
].base
;
162 hwaddr size_ecam
= memmap
[VIRT_PCIE_ECAM
].size
;
163 int nr_pcie_buses
= size_ecam
/ PCIE_MMCFG_SIZE_MIN
;
165 Aml
*dev
= aml_device("%s", "PCI0");
166 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0A08")));
167 aml_append(dev
, aml_name_decl("_CID", aml_string("PNP0A03")));
168 aml_append(dev
, aml_name_decl("_SEG", aml_int(0)));
169 aml_append(dev
, aml_name_decl("_BBN", aml_int(0)));
170 aml_append(dev
, aml_name_decl("_ADR", aml_int(0)));
171 aml_append(dev
, aml_name_decl("_UID", aml_string("PCI0")));
172 aml_append(dev
, aml_name_decl("_STR", aml_unicode("PCIe 0 Device")));
173 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
175 /* Declare the PCI Routing Table. */
176 Aml
*rt_pkg
= aml_package(nr_pcie_buses
* PCI_NUM_PINS
);
177 for (bus_no
= 0; bus_no
< nr_pcie_buses
; bus_no
++) {
178 for (i
= 0; i
< PCI_NUM_PINS
; i
++) {
179 int gsi
= (i
+ bus_no
) % PCI_NUM_PINS
;
180 Aml
*pkg
= aml_package(4);
181 aml_append(pkg
, aml_int((bus_no
<< 16) | 0xFFFF));
182 aml_append(pkg
, aml_int(i
));
183 aml_append(pkg
, aml_name("GSI%d", gsi
));
184 aml_append(pkg
, aml_int(0));
185 aml_append(rt_pkg
, pkg
);
188 aml_append(dev
, aml_name_decl("_PRT", rt_pkg
));
190 /* Create GSI link device */
191 for (i
= 0; i
< PCI_NUM_PINS
; i
++) {
192 uint32_t irqs
= irq
+ i
;
193 Aml
*dev_gsi
= aml_device("GSI%d", i
);
194 aml_append(dev_gsi
, aml_name_decl("_HID", aml_string("PNP0C0F")));
195 aml_append(dev_gsi
, aml_name_decl("_UID", aml_int(0)));
196 crs
= aml_resource_template();
198 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
199 AML_EXCLUSIVE
, &irqs
, 1));
200 aml_append(dev_gsi
, aml_name_decl("_PRS", crs
));
201 crs
= aml_resource_template();
203 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
204 AML_EXCLUSIVE
, &irqs
, 1));
205 aml_append(dev_gsi
, aml_name_decl("_CRS", crs
));
206 method
= aml_method("_SRS", 1, AML_NOTSERIALIZED
);
207 aml_append(dev_gsi
, method
);
208 aml_append(dev
, dev_gsi
);
211 method
= aml_method("_CBA", 0, AML_NOTSERIALIZED
);
212 aml_append(method
, aml_return(aml_int(base_ecam
)));
213 aml_append(dev
, method
);
215 method
= aml_method("_CRS", 0, AML_NOTSERIALIZED
);
216 Aml
*rbuf
= aml_resource_template();
218 aml_word_bus_number(AML_MIN_FIXED
, AML_MAX_FIXED
, AML_POS_DECODE
,
219 0x0000, 0x0000, nr_pcie_buses
- 1, 0x0000,
222 aml_dword_memory(AML_POS_DECODE
, AML_MIN_FIXED
, AML_MAX_FIXED
,
223 AML_NON_CACHEABLE
, AML_READ_WRITE
, 0x0000, base_mmio
,
224 base_mmio
+ size_mmio
- 1, 0x0000, size_mmio
));
226 aml_dword_io(AML_MIN_FIXED
, AML_MAX_FIXED
, AML_POS_DECODE
,
227 AML_ENTIRE_RANGE
, 0x0000, 0x0000, size_pio
- 1, base_pio
,
231 hwaddr base_mmio_high
= memmap
[VIRT_PCIE_MMIO_HIGH
].base
;
232 hwaddr size_mmio_high
= memmap
[VIRT_PCIE_MMIO_HIGH
].size
;
235 aml_qword_memory(AML_POS_DECODE
, AML_MIN_FIXED
, AML_MAX_FIXED
,
236 AML_NON_CACHEABLE
, AML_READ_WRITE
, 0x0000,
238 base_mmio_high
+ size_mmio_high
- 1, 0x0000,
242 aml_append(method
, aml_name_decl("RBUF", rbuf
));
243 aml_append(method
, aml_return(rbuf
));
244 aml_append(dev
, method
);
246 /* Declare an _OSC (OS Control Handoff) method */
247 aml_append(dev
, aml_name_decl("SUPP", aml_int(0)));
248 aml_append(dev
, aml_name_decl("CTRL", aml_int(0)));
249 method
= aml_method("_OSC", 4, AML_NOTSERIALIZED
);
251 aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
253 /* PCI Firmware Specification 3.0
254 * 4.5.1. _OSC Interface for PCI Host Bridge Devices
255 * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
256 * identified by the Universal Unique IDentifier (UUID)
257 * 33DB4D5B-1FF7-401C-9657-7441C03DD766
259 UUID
= aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
260 ifctx
= aml_if(aml_equal(aml_arg(0), UUID
));
262 aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
264 aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
265 aml_append(ifctx
, aml_store(aml_name("CDW2"), aml_name("SUPP")));
266 aml_append(ifctx
, aml_store(aml_name("CDW3"), aml_name("CTRL")));
267 aml_append(ifctx
, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL
),
270 ifctx1
= aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
271 aml_append(ifctx1
, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL
),
273 aml_append(ifctx
, ifctx1
);
275 ifctx1
= aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
276 aml_append(ifctx1
, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL
),
278 aml_append(ifctx
, ifctx1
);
280 aml_append(ifctx
, aml_store(aml_name("CTRL"), aml_name("CDW3")));
281 aml_append(ifctx
, aml_return(aml_arg(3)));
282 aml_append(method
, ifctx
);
284 elsectx
= aml_else();
285 aml_append(elsectx
, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL
),
287 aml_append(elsectx
, aml_return(aml_arg(3)));
288 aml_append(method
, elsectx
);
289 aml_append(dev
, method
);
291 method
= aml_method("_DSM", 4, AML_NOTSERIALIZED
);
293 /* PCI Firmware Specification 3.0
294 * 4.6.1. _DSM for PCI Express Slot Information
295 * The UUID in _DSM in this context is
296 * {E5C937D0-3553-4D7A-9117-EA4D19C3434D}
298 UUID
= aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
299 ifctx
= aml_if(aml_equal(aml_arg(0), UUID
));
300 ifctx1
= aml_if(aml_equal(aml_arg(2), aml_int(0)));
301 uint8_t byte_list
[1] = {1};
302 buf
= aml_buffer(1, byte_list
);
303 aml_append(ifctx1
, aml_return(buf
));
304 aml_append(ifctx
, ifctx1
);
305 aml_append(method
, ifctx
);
308 buf
= aml_buffer(1, byte_list
);
309 aml_append(method
, aml_return(buf
));
310 aml_append(dev
, method
);
312 Aml
*dev_rp0
= aml_device("%s", "RP0");
313 aml_append(dev_rp0
, aml_name_decl("_ADR", aml_int(0)));
314 aml_append(dev
, dev_rp0
);
316 Aml
*dev_res0
= aml_device("%s", "RES0");
317 aml_append(dev_res0
, aml_name_decl("_HID", aml_string("PNP0C02")));
318 crs
= aml_resource_template();
319 aml_append(crs
, aml_memory32_fixed(base_ecam
, size_ecam
, AML_READ_WRITE
));
320 aml_append(dev_res0
, aml_name_decl("_CRS", crs
));
321 aml_append(dev
, dev_res0
);
322 aml_append(scope
, dev
);
325 static void acpi_dsdt_add_gpio(Aml
*scope
, const MemMapEntry
*gpio_memmap
,
328 Aml
*dev
= aml_device("GPO0");
329 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0061")));
330 aml_append(dev
, aml_name_decl("_ADR", aml_int(0)));
331 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
333 Aml
*crs
= aml_resource_template();
334 aml_append(crs
, aml_memory32_fixed(gpio_memmap
->base
, gpio_memmap
->size
,
336 aml_append(crs
, aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
337 AML_EXCLUSIVE
, &gpio_irq
, 1));
338 aml_append(dev
, aml_name_decl("_CRS", crs
));
340 Aml
*aei
= aml_resource_template();
341 /* Pin 3 for power button */
342 const uint32_t pin_list
[1] = {3};
343 aml_append(aei
, aml_gpio_int(AML_CONSUMER
, AML_EDGE
, AML_ACTIVE_HIGH
,
344 AML_EXCLUSIVE
, AML_PULL_UP
, 0, pin_list
, 1,
346 aml_append(dev
, aml_name_decl("_AEI", aei
));
348 /* _E03 is handle for power button */
349 Aml
*method
= aml_method("_E03", 0, AML_NOTSERIALIZED
);
350 aml_append(method
, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE
),
352 aml_append(dev
, method
);
353 aml_append(scope
, dev
);
356 static void acpi_dsdt_add_power_button(Aml
*scope
)
358 Aml
*dev
= aml_device(ACPI_POWER_BUTTON_DEVICE
);
359 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0C0C")));
360 aml_append(dev
, aml_name_decl("_ADR", aml_int(0)));
361 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
362 aml_append(scope
, dev
);
367 build_rsdp(GArray
*rsdp_table
, BIOSLinker
*linker
, unsigned xsdt_tbl_offset
)
369 AcpiRsdpDescriptor
*rsdp
= acpi_data_push(rsdp_table
, sizeof *rsdp
);
370 unsigned xsdt_pa_size
= sizeof(rsdp
->xsdt_physical_address
);
371 unsigned xsdt_pa_offset
=
372 (char *)&rsdp
->xsdt_physical_address
- rsdp_table
->data
;
374 bios_linker_loader_alloc(linker
, ACPI_BUILD_RSDP_FILE
, rsdp_table
, 16,
375 true /* fseg memory */);
377 memcpy(&rsdp
->signature
, "RSD PTR ", sizeof(rsdp
->signature
));
378 memcpy(rsdp
->oem_id
, ACPI_BUILD_APPNAME6
, sizeof(rsdp
->oem_id
));
379 rsdp
->length
= cpu_to_le32(sizeof(*rsdp
));
380 rsdp
->revision
= 0x02;
382 /* Address to be filled by Guest linker */
383 bios_linker_loader_add_pointer(linker
,
384 ACPI_BUILD_RSDP_FILE
, xsdt_pa_offset
, xsdt_pa_size
,
385 ACPI_BUILD_TABLE_FILE
, xsdt_tbl_offset
);
387 /* Checksum to be filled by Guest linker */
388 bios_linker_loader_add_checksum(linker
, ACPI_BUILD_RSDP_FILE
,
389 (char *)rsdp
- rsdp_table
->data
, sizeof *rsdp
,
390 (char *)&rsdp
->checksum
- rsdp_table
->data
);
396 build_iort(GArray
*table_data
, BIOSLinker
*linker
)
398 int iort_start
= table_data
->len
;
399 AcpiIortIdMapping
*idmap
;
400 AcpiIortItsGroup
*its
;
402 size_t node_size
, iort_length
;
405 iort
= acpi_data_push(table_data
, sizeof(*iort
));
407 iort_length
= sizeof(*iort
);
408 iort
->node_count
= cpu_to_le32(2); /* RC and ITS nodes */
409 iort
->node_offset
= cpu_to_le32(sizeof(*iort
));
412 node_size
= sizeof(*its
) + sizeof(uint32_t);
413 iort_length
+= node_size
;
414 its
= acpi_data_push(table_data
, node_size
);
416 its
->type
= ACPI_IORT_NODE_ITS_GROUP
;
417 its
->length
= cpu_to_le16(node_size
);
418 its
->its_count
= cpu_to_le32(1);
419 its
->identifiers
[0] = 0; /* MADT translation_id */
421 /* Root Complex Node */
422 node_size
= sizeof(*rc
) + sizeof(*idmap
);
423 iort_length
+= node_size
;
424 rc
= acpi_data_push(table_data
, node_size
);
426 rc
->type
= ACPI_IORT_NODE_PCI_ROOT_COMPLEX
;
427 rc
->length
= cpu_to_le16(node_size
);
428 rc
->mapping_count
= cpu_to_le32(1);
429 rc
->mapping_offset
= cpu_to_le32(sizeof(*rc
));
431 /* fully coherent device */
432 rc
->memory_properties
.cache_coherency
= cpu_to_le32(1);
433 rc
->memory_properties
.memory_flags
= 0x3; /* CCA = CPM = DCAS = 1 */
434 rc
->pci_segment_number
= 0; /* MCFG pci_segment */
436 /* Identity RID mapping covering the whole input RID range */
437 idmap
= &rc
->id_mapping_array
[0];
438 idmap
->input_base
= 0;
439 idmap
->id_count
= cpu_to_le32(0xFFFF);
440 idmap
->output_base
= 0;
441 /* output IORT node is the ITS group node (the first node) */
442 idmap
->output_reference
= cpu_to_le32(iort
->node_offset
);
444 iort
->length
= cpu_to_le32(iort_length
);
446 build_header(linker
, table_data
, (void *)(table_data
->data
+ iort_start
),
447 "IORT", table_data
->len
- iort_start
, 0, NULL
, NULL
);
451 build_spcr(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
453 AcpiSerialPortConsoleRedirection
*spcr
;
454 const MemMapEntry
*uart_memmap
= &vms
->memmap
[VIRT_UART
];
455 int irq
= vms
->irqmap
[VIRT_UART
] + ARM_SPI_BASE
;
457 spcr
= acpi_data_push(table_data
, sizeof(*spcr
));
459 spcr
->interface_type
= 0x3; /* ARM PL011 UART */
461 spcr
->base_address
.space_id
= AML_SYSTEM_MEMORY
;
462 spcr
->base_address
.bit_width
= 8;
463 spcr
->base_address
.bit_offset
= 0;
464 spcr
->base_address
.access_width
= 1;
465 spcr
->base_address
.address
= cpu_to_le64(uart_memmap
->base
);
467 spcr
->interrupt_types
= (1 << 3); /* Bit[3] ARMH GIC interrupt */
468 spcr
->gsi
= cpu_to_le32(irq
); /* Global System Interrupt */
470 spcr
->baud
= 3; /* Baud Rate: 3 = 9600 */
471 spcr
->parity
= 0; /* No Parity */
472 spcr
->stopbits
= 1; /* 1 Stop bit */
473 spcr
->flowctrl
= (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */
474 spcr
->term_type
= 0; /* Terminal Type: 0 = VT100 */
476 spcr
->pci_device_id
= 0xffff; /* PCI Device ID: not a PCI device */
477 spcr
->pci_vendor_id
= 0xffff; /* PCI Vendor ID: not a PCI device */
479 build_header(linker
, table_data
, (void *)spcr
, "SPCR", sizeof(*spcr
), 2,
484 build_srat(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
486 AcpiSystemResourceAffinityTable
*srat
;
487 AcpiSratProcessorGiccAffinity
*core
;
488 AcpiSratMemoryAffinity
*numamem
;
491 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
492 const CPUArchIdList
*cpu_list
= mc
->possible_cpu_arch_ids(MACHINE(vms
));
494 srat_start
= table_data
->len
;
495 srat
= acpi_data_push(table_data
, sizeof(*srat
));
496 srat
->reserved1
= cpu_to_le32(1);
498 for (i
= 0; i
< cpu_list
->len
; ++i
) {
499 core
= acpi_data_push(table_data
, sizeof(*core
));
500 core
->type
= ACPI_SRAT_PROCESSOR_GICC
;
501 core
->length
= sizeof(*core
);
502 core
->proximity
= cpu_to_le32(cpu_list
->cpus
[i
].props
.node_id
);
503 core
->acpi_processor_uid
= cpu_to_le32(i
);
504 core
->flags
= cpu_to_le32(1);
507 mem_base
= vms
->memmap
[VIRT_MEM
].base
;
508 for (i
= 0; i
< nb_numa_nodes
; ++i
) {
509 numamem
= acpi_data_push(table_data
, sizeof(*numamem
));
510 build_srat_memory(numamem
, mem_base
, numa_info
[i
].node_mem
, i
,
511 MEM_AFFINITY_ENABLED
);
512 mem_base
+= numa_info
[i
].node_mem
;
515 build_header(linker
, table_data
, (void *)srat
, "SRAT",
516 table_data
->len
- srat_start
, 3, NULL
, NULL
);
520 build_mcfg(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
523 const MemMapEntry
*memmap
= vms
->memmap
;
524 int len
= sizeof(*mcfg
) + sizeof(mcfg
->allocation
[0]);
526 mcfg
= acpi_data_push(table_data
, len
);
527 mcfg
->allocation
[0].address
= cpu_to_le64(memmap
[VIRT_PCIE_ECAM
].base
);
529 /* Only a single allocation so no need to play with segments */
530 mcfg
->allocation
[0].pci_segment
= cpu_to_le16(0);
531 mcfg
->allocation
[0].start_bus_number
= 0;
532 mcfg
->allocation
[0].end_bus_number
= (memmap
[VIRT_PCIE_ECAM
].size
533 / PCIE_MMCFG_SIZE_MIN
) - 1;
535 build_header(linker
, table_data
, (void *)mcfg
, "MCFG", len
, 1, NULL
, NULL
);
540 build_gtdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
542 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
543 int gtdt_start
= table_data
->len
;
544 AcpiGenericTimerTable
*gtdt
;
547 if (vmc
->claim_edge_triggered_timers
) {
548 irqflags
= ACPI_GTDT_INTERRUPT_MODE_EDGE
;
550 irqflags
= ACPI_GTDT_INTERRUPT_MODE_LEVEL
;
553 gtdt
= acpi_data_push(table_data
, sizeof *gtdt
);
554 /* The interrupt values are the same with the device tree when adding 16 */
555 gtdt
->secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_S_EL1_IRQ
+ 16);
556 gtdt
->secure_el1_flags
= cpu_to_le32(irqflags
);
558 gtdt
->non_secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ
+ 16);
559 gtdt
->non_secure_el1_flags
= cpu_to_le32(irqflags
|
560 ACPI_GTDT_CAP_ALWAYS_ON
);
562 gtdt
->virtual_timer_interrupt
= cpu_to_le32(ARCH_TIMER_VIRT_IRQ
+ 16);
563 gtdt
->virtual_timer_flags
= cpu_to_le32(irqflags
);
565 gtdt
->non_secure_el2_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ
+ 16);
566 gtdt
->non_secure_el2_flags
= cpu_to_le32(irqflags
);
568 build_header(linker
, table_data
,
569 (void *)(table_data
->data
+ gtdt_start
), "GTDT",
570 table_data
->len
- gtdt_start
, 2, NULL
, NULL
);
575 build_madt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
577 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
578 int madt_start
= table_data
->len
;
579 const MemMapEntry
*memmap
= vms
->memmap
;
580 const int *irqmap
= vms
->irqmap
;
581 AcpiMultipleApicTable
*madt
;
582 AcpiMadtGenericDistributor
*gicd
;
583 AcpiMadtGenericMsiFrame
*gic_msi
;
586 madt
= acpi_data_push(table_data
, sizeof *madt
);
588 gicd
= acpi_data_push(table_data
, sizeof *gicd
);
589 gicd
->type
= ACPI_APIC_GENERIC_DISTRIBUTOR
;
590 gicd
->length
= sizeof(*gicd
);
591 gicd
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_DIST
].base
);
592 gicd
->version
= vms
->gic_version
;
594 for (i
= 0; i
< vms
->smp_cpus
; i
++) {
595 AcpiMadtGenericCpuInterface
*gicc
= acpi_data_push(table_data
,
597 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(i
));
599 gicc
->type
= ACPI_APIC_GENERIC_CPU_INTERFACE
;
600 gicc
->length
= sizeof(*gicc
);
601 if (vms
->gic_version
== 2) {
602 gicc
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_CPU
].base
);
604 gicc
->cpu_interface_number
= cpu_to_le32(i
);
605 gicc
->arm_mpidr
= cpu_to_le64(armcpu
->mp_affinity
);
606 gicc
->uid
= cpu_to_le32(i
);
607 gicc
->flags
= cpu_to_le32(ACPI_MADT_GICC_ENABLED
);
609 if (arm_feature(&armcpu
->env
, ARM_FEATURE_PMU
)) {
610 gicc
->performance_interrupt
= cpu_to_le32(PPI(VIRTUAL_PMU_IRQ
));
612 if (vms
->virt
&& vms
->gic_version
== 3) {
613 gicc
->vgic_interrupt
= cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ
));
617 if (vms
->gic_version
== 3) {
618 AcpiMadtGenericTranslator
*gic_its
;
619 AcpiMadtGenericRedistributor
*gicr
= acpi_data_push(table_data
,
622 gicr
->type
= ACPI_APIC_GENERIC_REDISTRIBUTOR
;
623 gicr
->length
= sizeof(*gicr
);
624 gicr
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_REDIST
].base
);
625 gicr
->range_length
= cpu_to_le32(memmap
[VIRT_GIC_REDIST
].size
);
627 if (its_class_name() && !vmc
->no_its
) {
628 gic_its
= acpi_data_push(table_data
, sizeof *gic_its
);
629 gic_its
->type
= ACPI_APIC_GENERIC_TRANSLATOR
;
630 gic_its
->length
= sizeof(*gic_its
);
631 gic_its
->translation_id
= 0;
632 gic_its
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_ITS
].base
);
635 gic_msi
= acpi_data_push(table_data
, sizeof *gic_msi
);
636 gic_msi
->type
= ACPI_APIC_GENERIC_MSI_FRAME
;
637 gic_msi
->length
= sizeof(*gic_msi
);
638 gic_msi
->gic_msi_frame_id
= 0;
639 gic_msi
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_V2M
].base
);
640 gic_msi
->flags
= cpu_to_le32(1);
641 gic_msi
->spi_count
= cpu_to_le16(NUM_GICV2M_SPIS
);
642 gic_msi
->spi_base
= cpu_to_le16(irqmap
[VIRT_GIC_V2M
] + ARM_SPI_BASE
);
645 build_header(linker
, table_data
,
646 (void *)(table_data
->data
+ madt_start
), "APIC",
647 table_data
->len
- madt_start
, 3, NULL
, NULL
);
651 static void build_fadt(GArray
*table_data
, BIOSLinker
*linker
,
652 VirtMachineState
*vms
, unsigned dsdt_tbl_offset
)
654 AcpiFadtDescriptorRev5_1
*fadt
= acpi_data_push(table_data
, sizeof(*fadt
));
655 unsigned xdsdt_entry_offset
= (char *)&fadt
->x_dsdt
- table_data
->data
;
658 switch (vms
->psci_conduit
) {
659 case QEMU_PSCI_CONDUIT_DISABLED
:
662 case QEMU_PSCI_CONDUIT_HVC
:
663 bootflags
= ACPI_FADT_ARM_PSCI_COMPLIANT
| ACPI_FADT_ARM_PSCI_USE_HVC
;
665 case QEMU_PSCI_CONDUIT_SMC
:
666 bootflags
= ACPI_FADT_ARM_PSCI_COMPLIANT
;
669 g_assert_not_reached();
672 /* Hardware Reduced = 1 and use PSCI 0.2+ */
673 fadt
->flags
= cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI
);
674 fadt
->arm_boot_flags
= cpu_to_le16(bootflags
);
676 /* ACPI v5.1 (fadt->revision.fadt->minor_revision) */
677 fadt
->minor_revision
= 0x1;
679 /* DSDT address to be filled by Guest linker */
680 bios_linker_loader_add_pointer(linker
,
681 ACPI_BUILD_TABLE_FILE
, xdsdt_entry_offset
, sizeof(fadt
->x_dsdt
),
682 ACPI_BUILD_TABLE_FILE
, dsdt_tbl_offset
);
684 build_header(linker
, table_data
,
685 (void *)fadt
, "FACP", sizeof(*fadt
), 5, NULL
, NULL
);
690 build_dsdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
693 const MemMapEntry
*memmap
= vms
->memmap
;
694 const int *irqmap
= vms
->irqmap
;
696 dsdt
= init_aml_allocator();
697 /* Reserve space for header */
698 acpi_data_push(dsdt
->buf
, sizeof(AcpiTableHeader
));
700 /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
701 * While UEFI can use libfdt to disable the RTC device node in the DTB that
702 * it passes to the OS, it cannot modify AML. Therefore, we won't generate
703 * the RTC ACPI device at all when using UEFI.
705 scope
= aml_scope("\\_SB");
706 acpi_dsdt_add_cpus(scope
, vms
->smp_cpus
);
707 acpi_dsdt_add_uart(scope
, &memmap
[VIRT_UART
],
708 (irqmap
[VIRT_UART
] + ARM_SPI_BASE
));
709 acpi_dsdt_add_flash(scope
, &memmap
[VIRT_FLASH
]);
710 acpi_dsdt_add_fw_cfg(scope
, &memmap
[VIRT_FW_CFG
]);
711 acpi_dsdt_add_virtio(scope
, &memmap
[VIRT_MMIO
],
712 (irqmap
[VIRT_MMIO
] + ARM_SPI_BASE
), NUM_VIRTIO_TRANSPORTS
);
713 acpi_dsdt_add_pci(scope
, memmap
, (irqmap
[VIRT_PCIE
] + ARM_SPI_BASE
),
715 acpi_dsdt_add_gpio(scope
, &memmap
[VIRT_GPIO
],
716 (irqmap
[VIRT_GPIO
] + ARM_SPI_BASE
));
717 acpi_dsdt_add_power_button(scope
);
719 aml_append(dsdt
, scope
);
721 /* copy AML table into ACPI tables blob and patch header there */
722 g_array_append_vals(table_data
, dsdt
->buf
->data
, dsdt
->buf
->len
);
723 build_header(linker
, table_data
,
724 (void *)(table_data
->data
+ table_data
->len
- dsdt
->buf
->len
),
725 "DSDT", dsdt
->buf
->len
, 2, NULL
, NULL
);
726 free_aml_allocator();
730 struct AcpiBuildState
{
731 /* Copy of table in RAM (for patching). */
732 MemoryRegion
*table_mr
;
733 MemoryRegion
*rsdp_mr
;
734 MemoryRegion
*linker_mr
;
735 /* Is table patched? */
740 void virt_acpi_build(VirtMachineState
*vms
, AcpiBuildTables
*tables
)
742 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
743 GArray
*table_offsets
;
745 GArray
*tables_blob
= tables
->table_data
;
747 table_offsets
= g_array_new(false, true /* clear */,
750 bios_linker_loader_alloc(tables
->linker
,
751 ACPI_BUILD_TABLE_FILE
, tables_blob
,
752 64, false /* high memory */);
754 /* DSDT is pointed to by FADT */
755 dsdt
= tables_blob
->len
;
756 build_dsdt(tables_blob
, tables
->linker
, vms
);
758 /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
759 acpi_add_table(table_offsets
, tables_blob
);
760 build_fadt(tables_blob
, tables
->linker
, vms
, dsdt
);
762 acpi_add_table(table_offsets
, tables_blob
);
763 build_madt(tables_blob
, tables
->linker
, vms
);
765 acpi_add_table(table_offsets
, tables_blob
);
766 build_gtdt(tables_blob
, tables
->linker
, vms
);
768 acpi_add_table(table_offsets
, tables_blob
);
769 build_mcfg(tables_blob
, tables
->linker
, vms
);
771 acpi_add_table(table_offsets
, tables_blob
);
772 build_spcr(tables_blob
, tables
->linker
, vms
);
774 if (nb_numa_nodes
> 0) {
775 acpi_add_table(table_offsets
, tables_blob
);
776 build_srat(tables_blob
, tables
->linker
, vms
);
777 if (have_numa_distance
) {
778 acpi_add_table(table_offsets
, tables_blob
);
779 build_slit(tables_blob
, tables
->linker
);
783 if (its_class_name() && !vmc
->no_its
) {
784 acpi_add_table(table_offsets
, tables_blob
);
785 build_iort(tables_blob
, tables
->linker
);
788 /* XSDT is pointed to by RSDP */
789 xsdt
= tables_blob
->len
;
790 build_xsdt(tables_blob
, tables
->linker
, table_offsets
, NULL
, NULL
);
792 /* RSDP is in FSEG memory, so allocate it separately */
793 build_rsdp(tables
->rsdp
, tables
->linker
, xsdt
);
795 /* Cleanup memory that's no longer used. */
796 g_array_free(table_offsets
, true);
799 static void acpi_ram_update(MemoryRegion
*mr
, GArray
*data
)
801 uint32_t size
= acpi_data_len(data
);
803 /* Make sure RAM size is correct - in case it got changed
804 * e.g. by migration */
805 memory_region_ram_resize(mr
, size
, &error_abort
);
807 memcpy(memory_region_get_ram_ptr(mr
), data
->data
, size
);
808 memory_region_set_dirty(mr
, 0, size
);
811 static void virt_acpi_build_update(void *build_opaque
)
813 AcpiBuildState
*build_state
= build_opaque
;
814 AcpiBuildTables tables
;
816 /* No state to update or already patched? Nothing to do. */
817 if (!build_state
|| build_state
->patched
) {
820 build_state
->patched
= true;
822 acpi_build_tables_init(&tables
);
824 virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables
);
826 acpi_ram_update(build_state
->table_mr
, tables
.table_data
);
827 acpi_ram_update(build_state
->rsdp_mr
, tables
.rsdp
);
828 acpi_ram_update(build_state
->linker_mr
, tables
.linker
->cmd_blob
);
830 acpi_build_tables_cleanup(&tables
, true);
833 static void virt_acpi_build_reset(void *build_opaque
)
835 AcpiBuildState
*build_state
= build_opaque
;
836 build_state
->patched
= false;
839 static MemoryRegion
*acpi_add_rom_blob(AcpiBuildState
*build_state
,
840 GArray
*blob
, const char *name
,
843 return rom_add_blob(name
, blob
->data
, acpi_data_len(blob
), max_size
, -1,
844 name
, virt_acpi_build_update
, build_state
, NULL
, true);
847 static const VMStateDescription vmstate_virt_acpi_build
= {
848 .name
= "virt_acpi_build",
850 .minimum_version_id
= 1,
851 .fields
= (VMStateField
[]) {
852 VMSTATE_BOOL(patched
, AcpiBuildState
),
853 VMSTATE_END_OF_LIST()
857 void virt_acpi_setup(VirtMachineState
*vms
)
859 AcpiBuildTables tables
;
860 AcpiBuildState
*build_state
;
863 trace_virt_acpi_setup();
868 trace_virt_acpi_setup();
872 build_state
= g_malloc0(sizeof *build_state
);
874 acpi_build_tables_init(&tables
);
875 virt_acpi_build(vms
, &tables
);
877 /* Now expose it all to Guest */
878 build_state
->table_mr
= acpi_add_rom_blob(build_state
, tables
.table_data
,
879 ACPI_BUILD_TABLE_FILE
,
880 ACPI_BUILD_TABLE_MAX_SIZE
);
881 assert(build_state
->table_mr
!= NULL
);
883 build_state
->linker_mr
=
884 acpi_add_rom_blob(build_state
, tables
.linker
->cmd_blob
,
885 "etc/table-loader", 0);
887 fw_cfg_add_file(vms
->fw_cfg
, ACPI_BUILD_TPMLOG_FILE
, tables
.tcpalog
->data
,
888 acpi_data_len(tables
.tcpalog
));
890 build_state
->rsdp_mr
= acpi_add_rom_blob(build_state
, tables
.rsdp
,
891 ACPI_BUILD_RSDP_FILE
, 0);
893 qemu_register_reset(virt_acpi_build_reset
, build_state
);
894 virt_acpi_build_reset(build_state
);
895 vmstate_register(NULL
, 0, &vmstate_virt_acpi_build
, build_state
);
897 /* Cleanup tables but don't free the memory: we track it
900 acpi_build_tables_cleanup(&tables
, false);