pc: acpi-build: generate _S[345] packages dynamically
[qemu/cris-port.git] / hw / i386 / acpi-build.c
blob4536fba0abe868d89abb911bdf7bd502e595f5db
1 /* Support for generating ACPI tables and passing them to Guests
3 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
4 * Copyright (C) 2006 Fabrice Bellard
5 * Copyright (C) 2013 Red Hat Inc
7 * Author: Michael S. Tsirkin <mst@redhat.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "acpi-build.h"
24 #include <stddef.h>
25 #include <glib.h>
26 #include "qemu-common.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/osdep.h"
29 #include "qemu/range.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/pci.h"
32 #include "qom/cpu.h"
33 #include "hw/i386/pc.h"
34 #include "target-i386/cpu.h"
35 #include "hw/timer/hpet.h"
36 #include "hw/i386/acpi-defs.h"
37 #include "hw/acpi/acpi.h"
38 #include "hw/nvram/fw_cfg.h"
39 #include "hw/acpi/bios-linker-loader.h"
40 #include "hw/loader.h"
41 #include "hw/isa/isa.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "sysemu/tpm.h"
44 #include "hw/acpi/tpm.h"
46 /* Supported chipsets: */
47 #include "hw/acpi/piix4.h"
48 #include "hw/acpi/pcihp.h"
49 #include "hw/i386/ich9.h"
50 #include "hw/pci/pci_bus.h"
51 #include "hw/pci-host/q35.h"
52 #include "hw/i386/intel_iommu.h"
54 #include "hw/i386/q35-acpi-dsdt.hex"
55 #include "hw/i386/acpi-dsdt.hex"
57 #include "hw/acpi/aml-build.h"
59 #include "qapi/qmp/qint.h"
60 #include "qom/qom-qobject.h"
61 #include "exec/ram_addr.h"
63 /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
64 * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
65 * a little bit, there should be plenty of free space since the DSDT
66 * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
68 #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97
69 #define ACPI_BUILD_ALIGN_SIZE 0x1000
71 #define ACPI_BUILD_TABLE_SIZE 0x20000
73 /* Reserve RAM space for tables: add another order of magnitude. */
74 #define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
76 /* #define DEBUG_ACPI_BUILD */
77 #ifdef DEBUG_ACPI_BUILD
78 #define ACPI_BUILD_DPRINTF(fmt, ...) \
79 do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
80 #else
81 #define ACPI_BUILD_DPRINTF(fmt, ...)
82 #endif
84 typedef struct AcpiCpuInfo {
85 DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT);
86 } AcpiCpuInfo;
88 typedef struct AcpiMcfgInfo {
89 uint64_t mcfg_base;
90 uint32_t mcfg_size;
91 } AcpiMcfgInfo;
93 typedef struct AcpiPmInfo {
94 bool s3_disabled;
95 bool s4_disabled;
96 bool pcihp_bridge_en;
97 uint8_t s4_val;
98 uint16_t sci_int;
99 uint8_t acpi_enable_cmd;
100 uint8_t acpi_disable_cmd;
101 uint32_t gpe0_blk;
102 uint32_t gpe0_blk_len;
103 uint32_t io_base;
104 } AcpiPmInfo;
106 typedef struct AcpiMiscInfo {
107 bool has_hpet;
108 bool has_tpm;
109 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX);
110 const unsigned char *dsdt_code;
111 unsigned dsdt_size;
112 uint16_t pvpanic_port;
113 } AcpiMiscInfo;
115 typedef struct AcpiBuildPciBusHotplugState {
116 GArray *device_table;
117 GArray *notify_table;
118 struct AcpiBuildPciBusHotplugState *parent;
119 bool pcihp_bridge_en;
120 } AcpiBuildPciBusHotplugState;
122 static void acpi_get_dsdt(AcpiMiscInfo *info)
124 uint16_t *applesmc_sta;
125 Object *piix = piix4_pm_find();
126 Object *lpc = ich9_lpc_find();
127 assert(!!piix != !!lpc);
129 if (piix) {
130 info->dsdt_code = AcpiDsdtAmlCode;
131 info->dsdt_size = sizeof AcpiDsdtAmlCode;
132 applesmc_sta = piix_dsdt_applesmc_sta;
134 if (lpc) {
135 info->dsdt_code = Q35AcpiDsdtAmlCode;
136 info->dsdt_size = sizeof Q35AcpiDsdtAmlCode;
137 applesmc_sta = q35_dsdt_applesmc_sta;
140 /* Patch in appropriate value for AppleSMC _STA */
141 *(uint8_t *)(info->dsdt_code + *applesmc_sta) =
142 applesmc_find() ? 0x0b : 0x00;
145 static
146 int acpi_add_cpu_info(Object *o, void *opaque)
148 AcpiCpuInfo *cpu = opaque;
149 uint64_t apic_id;
151 if (object_dynamic_cast(o, TYPE_CPU)) {
152 apic_id = object_property_get_int(o, "apic-id", NULL);
153 assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
155 set_bit(apic_id, cpu->found_cpus);
158 object_child_foreach(o, acpi_add_cpu_info, opaque);
159 return 0;
162 static void acpi_get_cpu_info(AcpiCpuInfo *cpu)
164 Object *root = object_get_root();
166 memset(cpu->found_cpus, 0, sizeof cpu->found_cpus);
167 object_child_foreach(root, acpi_add_cpu_info, cpu);
170 static void acpi_get_pm_info(AcpiPmInfo *pm)
172 Object *piix = piix4_pm_find();
173 Object *lpc = ich9_lpc_find();
174 Object *obj = NULL;
175 QObject *o;
177 if (piix) {
178 obj = piix;
180 if (lpc) {
181 obj = lpc;
183 assert(obj);
185 /* Fill in optional s3/s4 related properties */
186 o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
187 if (o) {
188 pm->s3_disabled = qint_get_int(qobject_to_qint(o));
189 } else {
190 pm->s3_disabled = false;
192 qobject_decref(o);
193 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL);
194 if (o) {
195 pm->s4_disabled = qint_get_int(qobject_to_qint(o));
196 } else {
197 pm->s4_disabled = false;
199 qobject_decref(o);
200 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL);
201 if (o) {
202 pm->s4_val = qint_get_int(qobject_to_qint(o));
203 } else {
204 pm->s4_val = false;
206 qobject_decref(o);
208 /* Fill in mandatory properties */
209 pm->sci_int = object_property_get_int(obj, ACPI_PM_PROP_SCI_INT, NULL);
211 pm->acpi_enable_cmd = object_property_get_int(obj,
212 ACPI_PM_PROP_ACPI_ENABLE_CMD,
213 NULL);
214 pm->acpi_disable_cmd = object_property_get_int(obj,
215 ACPI_PM_PROP_ACPI_DISABLE_CMD,
216 NULL);
217 pm->io_base = object_property_get_int(obj, ACPI_PM_PROP_PM_IO_BASE,
218 NULL);
219 pm->gpe0_blk = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK,
220 NULL);
221 pm->gpe0_blk_len = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK_LEN,
222 NULL);
223 pm->pcihp_bridge_en =
224 object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support",
225 NULL);
228 static void acpi_get_misc_info(AcpiMiscInfo *info)
230 info->has_hpet = hpet_find();
231 info->has_tpm = tpm_find();
232 info->pvpanic_port = pvpanic_port();
235 static void acpi_get_pci_info(PcPciInfo *info)
237 Object *pci_host;
238 bool ambiguous;
240 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
241 g_assert(!ambiguous);
242 g_assert(pci_host);
244 info->w32.begin = object_property_get_int(pci_host,
245 PCI_HOST_PROP_PCI_HOLE_START,
246 NULL);
247 info->w32.end = object_property_get_int(pci_host,
248 PCI_HOST_PROP_PCI_HOLE_END,
249 NULL);
250 info->w64.begin = object_property_get_int(pci_host,
251 PCI_HOST_PROP_PCI_HOLE64_START,
252 NULL);
253 info->w64.end = object_property_get_int(pci_host,
254 PCI_HOST_PROP_PCI_HOLE64_END,
255 NULL);
258 #define ACPI_BUILD_APPNAME "Bochs"
259 #define ACPI_BUILD_APPNAME6 "BOCHS "
260 #define ACPI_BUILD_APPNAME4 "BXPC"
262 #define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
263 #define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
264 #define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log"
266 static void
267 build_header(GArray *linker, GArray *table_data,
268 AcpiTableHeader *h, const char *sig, int len, uint8_t rev)
270 memcpy(&h->signature, sig, 4);
271 h->length = cpu_to_le32(len);
272 h->revision = rev;
273 memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
274 memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
275 memcpy(h->oem_table_id + 4, sig, 4);
276 h->oem_revision = cpu_to_le32(1);
277 memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4);
278 h->asl_compiler_revision = cpu_to_le32(1);
279 h->checksum = 0;
280 /* Checksum to be filled in by Guest linker */
281 bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE,
282 table_data->data, h, len, &h->checksum);
285 static GArray *build_alloc_method(const char *name, uint8_t arg_count)
287 GArray *method = build_alloc_array();
289 build_append_namestring(method, "%s", name);
290 build_append_byte(method, arg_count); /* MethodFlags: ArgCount */
292 return method;
295 static void build_append_and_cleanup_method(GArray *device, GArray *method)
297 uint8_t op = 0x14; /* MethodOp */
299 build_package(method, op);
301 build_append_array(device, method);
302 build_free_array(method);
305 static void build_append_notify_target_ifequal(GArray *method,
306 GArray *target_name,
307 uint32_t value)
309 GArray *notify = build_alloc_array();
310 uint8_t op = 0xA0; /* IfOp */
312 build_append_byte(notify, 0x93); /* LEqualOp */
313 build_append_byte(notify, 0x68); /* Arg0Op */
314 build_append_int(notify, value);
315 build_append_byte(notify, 0x86); /* NotifyOp */
316 build_append_array(notify, target_name);
317 build_append_byte(notify, 0x69); /* Arg1Op */
319 /* Pack it up */
320 build_package(notify, op);
322 build_append_array(method, notify);
324 build_free_array(notify);
327 /* End here */
328 #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */
330 static inline void *acpi_data_push(GArray *table_data, unsigned size)
332 unsigned off = table_data->len;
333 g_array_set_size(table_data, off + size);
334 return table_data->data + off;
337 static unsigned acpi_data_len(GArray *table)
339 #if GLIB_CHECK_VERSION(2, 22, 0)
340 assert(g_array_get_element_size(table) == 1);
341 #endif
342 return table->len;
345 static void acpi_align_size(GArray *blob, unsigned align)
347 /* Align size to multiple of given size. This reduces the chance
348 * we need to change size in the future (breaking cross version migration).
350 g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
353 /* Set a value within table in a safe manner */
354 #define ACPI_BUILD_SET_LE(table, size, off, bits, val) \
355 do { \
356 uint64_t ACPI_BUILD_SET_LE_val = cpu_to_le64(val); \
357 memcpy(acpi_data_get_ptr(table, size, off, \
358 (bits) / BITS_PER_BYTE), \
359 &ACPI_BUILD_SET_LE_val, \
360 (bits) / BITS_PER_BYTE); \
361 } while (0)
363 static inline void *acpi_data_get_ptr(uint8_t *table_data, unsigned table_size,
364 unsigned off, unsigned size)
366 assert(off + size > off);
367 assert(off + size <= table_size);
368 return table_data + off;
371 static inline void acpi_add_table(GArray *table_offsets, GArray *table_data)
373 uint32_t offset = cpu_to_le32(table_data->len);
374 g_array_append_val(table_offsets, offset);
377 /* FACS */
378 static void
379 build_facs(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
381 AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs);
382 memcpy(&facs->signature, "FACS", 4);
383 facs->length = cpu_to_le32(sizeof(*facs));
386 /* Load chipset information in FADT */
387 static void fadt_setup(AcpiFadtDescriptorRev1 *fadt, AcpiPmInfo *pm)
389 fadt->model = 1;
390 fadt->reserved1 = 0;
391 fadt->sci_int = cpu_to_le16(pm->sci_int);
392 fadt->smi_cmd = cpu_to_le32(ACPI_PORT_SMI_CMD);
393 fadt->acpi_enable = pm->acpi_enable_cmd;
394 fadt->acpi_disable = pm->acpi_disable_cmd;
395 /* EVT, CNT, TMR offset matches hw/acpi/core.c */
396 fadt->pm1a_evt_blk = cpu_to_le32(pm->io_base);
397 fadt->pm1a_cnt_blk = cpu_to_le32(pm->io_base + 0x04);
398 fadt->pm_tmr_blk = cpu_to_le32(pm->io_base + 0x08);
399 fadt->gpe0_blk = cpu_to_le32(pm->gpe0_blk);
400 /* EVT, CNT, TMR length matches hw/acpi/core.c */
401 fadt->pm1_evt_len = 4;
402 fadt->pm1_cnt_len = 2;
403 fadt->pm_tmr_len = 4;
404 fadt->gpe0_blk_len = pm->gpe0_blk_len;
405 fadt->plvl2_lat = cpu_to_le16(0xfff); /* C2 state not supported */
406 fadt->plvl3_lat = cpu_to_le16(0xfff); /* C3 state not supported */
407 fadt->flags = cpu_to_le32((1 << ACPI_FADT_F_WBINVD) |
408 (1 << ACPI_FADT_F_PROC_C1) |
409 (1 << ACPI_FADT_F_SLP_BUTTON) |
410 (1 << ACPI_FADT_F_RTC_S4));
411 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK);
412 /* APIC destination mode ("Flat Logical") has an upper limit of 8 CPUs
413 * For more than 8 CPUs, "Clustered Logical" mode has to be used
415 if (max_cpus > 8) {
416 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL);
421 /* FADT */
422 static void
423 build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
424 unsigned facs, unsigned dsdt)
426 AcpiFadtDescriptorRev1 *fadt = acpi_data_push(table_data, sizeof(*fadt));
428 fadt->firmware_ctrl = cpu_to_le32(facs);
429 /* FACS address to be filled by Guest linker */
430 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
431 ACPI_BUILD_TABLE_FILE,
432 table_data, &fadt->firmware_ctrl,
433 sizeof fadt->firmware_ctrl);
435 fadt->dsdt = cpu_to_le32(dsdt);
436 /* DSDT address to be filled by Guest linker */
437 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
438 ACPI_BUILD_TABLE_FILE,
439 table_data, &fadt->dsdt,
440 sizeof fadt->dsdt);
442 fadt_setup(fadt, pm);
444 build_header(linker, table_data,
445 (void *)fadt, "FACP", sizeof(*fadt), 1);
448 static void
449 build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu,
450 PcGuestInfo *guest_info)
452 int madt_start = table_data->len;
454 AcpiMultipleApicTable *madt;
455 AcpiMadtIoApic *io_apic;
456 AcpiMadtIntsrcovr *intsrcovr;
457 AcpiMadtLocalNmi *local_nmi;
458 int i;
460 madt = acpi_data_push(table_data, sizeof *madt);
461 madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS);
462 madt->flags = cpu_to_le32(1);
464 for (i = 0; i < guest_info->apic_id_limit; i++) {
465 AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic);
466 apic->type = ACPI_APIC_PROCESSOR;
467 apic->length = sizeof(*apic);
468 apic->processor_id = i;
469 apic->local_apic_id = i;
470 if (test_bit(i, cpu->found_cpus)) {
471 apic->flags = cpu_to_le32(1);
472 } else {
473 apic->flags = cpu_to_le32(0);
476 io_apic = acpi_data_push(table_data, sizeof *io_apic);
477 io_apic->type = ACPI_APIC_IO;
478 io_apic->length = sizeof(*io_apic);
479 #define ACPI_BUILD_IOAPIC_ID 0x0
480 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID;
481 io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS);
482 io_apic->interrupt = cpu_to_le32(0);
484 if (guest_info->apic_xrupt_override) {
485 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
486 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
487 intsrcovr->length = sizeof(*intsrcovr);
488 intsrcovr->source = 0;
489 intsrcovr->gsi = cpu_to_le32(2);
490 intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */
492 for (i = 1; i < 16; i++) {
493 #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
494 if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) {
495 /* No need for a INT source override structure. */
496 continue;
498 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
499 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
500 intsrcovr->length = sizeof(*intsrcovr);
501 intsrcovr->source = i;
502 intsrcovr->gsi = cpu_to_le32(i);
503 intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */
506 local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
507 local_nmi->type = ACPI_APIC_LOCAL_NMI;
508 local_nmi->length = sizeof(*local_nmi);
509 local_nmi->processor_id = 0xff; /* all processors */
510 local_nmi->flags = cpu_to_le16(0);
511 local_nmi->lint = 1; /* ACPI_LINT1 */
513 build_header(linker, table_data,
514 (void *)(table_data->data + madt_start), "APIC",
515 table_data->len - madt_start, 1);
518 /* Encode a hex value */
519 static inline char acpi_get_hex(uint32_t val)
521 val &= 0x0f;
522 return (val <= 9) ? ('0' + val) : ('A' + val - 10);
525 #include "hw/i386/ssdt-proc.hex"
527 /* 0x5B 0x83 ProcessorOp PkgLength NameString ProcID */
528 #define ACPI_PROC_OFFSET_CPUHEX (*ssdt_proc_name - *ssdt_proc_start + 2)
529 #define ACPI_PROC_OFFSET_CPUID1 (*ssdt_proc_name - *ssdt_proc_start + 4)
530 #define ACPI_PROC_OFFSET_CPUID2 (*ssdt_proc_id - *ssdt_proc_start)
531 #define ACPI_PROC_SIZEOF (*ssdt_proc_end - *ssdt_proc_start)
532 #define ACPI_PROC_AML (ssdp_proc_aml + *ssdt_proc_start)
534 /* 0x5B 0x82 DeviceOp PkgLength NameString */
535 #define ACPI_PCIHP_OFFSET_HEX (*ssdt_pcihp_name - *ssdt_pcihp_start + 1)
536 #define ACPI_PCIHP_OFFSET_ID (*ssdt_pcihp_id - *ssdt_pcihp_start)
537 #define ACPI_PCIHP_OFFSET_ADR (*ssdt_pcihp_adr - *ssdt_pcihp_start)
538 #define ACPI_PCIHP_OFFSET_EJ0 (*ssdt_pcihp_ej0 - *ssdt_pcihp_start)
539 #define ACPI_PCIHP_SIZEOF (*ssdt_pcihp_end - *ssdt_pcihp_start)
540 #define ACPI_PCIHP_AML (ssdp_pcihp_aml + *ssdt_pcihp_start)
542 #define ACPI_PCINOHP_OFFSET_HEX (*ssdt_pcinohp_name - *ssdt_pcinohp_start + 1)
543 #define ACPI_PCINOHP_OFFSET_ADR (*ssdt_pcinohp_adr - *ssdt_pcinohp_start)
544 #define ACPI_PCINOHP_SIZEOF (*ssdt_pcinohp_end - *ssdt_pcinohp_start)
545 #define ACPI_PCINOHP_AML (ssdp_pcihp_aml + *ssdt_pcinohp_start)
547 #define ACPI_PCIVGA_OFFSET_HEX (*ssdt_pcivga_name - *ssdt_pcivga_start + 1)
548 #define ACPI_PCIVGA_OFFSET_ADR (*ssdt_pcivga_adr - *ssdt_pcivga_start)
549 #define ACPI_PCIVGA_SIZEOF (*ssdt_pcivga_end - *ssdt_pcivga_start)
550 #define ACPI_PCIVGA_AML (ssdp_pcihp_aml + *ssdt_pcivga_start)
552 #define ACPI_PCIQXL_OFFSET_HEX (*ssdt_pciqxl_name - *ssdt_pciqxl_start + 1)
553 #define ACPI_PCIQXL_OFFSET_ADR (*ssdt_pciqxl_adr - *ssdt_pciqxl_start)
554 #define ACPI_PCIQXL_SIZEOF (*ssdt_pciqxl_end - *ssdt_pciqxl_start)
555 #define ACPI_PCIQXL_AML (ssdp_pcihp_aml + *ssdt_pciqxl_start)
557 #include "hw/i386/ssdt-mem.hex"
559 /* 0x5B 0x82 DeviceOp PkgLength NameString DimmID */
560 #define ACPI_MEM_OFFSET_HEX (*ssdt_mem_name - *ssdt_mem_start + 2)
561 #define ACPI_MEM_OFFSET_ID (*ssdt_mem_id - *ssdt_mem_start + 7)
562 #define ACPI_MEM_SIZEOF (*ssdt_mem_end - *ssdt_mem_start)
563 #define ACPI_MEM_AML (ssdm_mem_aml + *ssdt_mem_start)
565 #define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */
566 #define ACPI_SSDT_HEADER_LENGTH 36
568 #include "hw/i386/ssdt-misc.hex"
569 #include "hw/i386/ssdt-pcihp.hex"
570 #include "hw/i386/ssdt-tpm.hex"
572 static void
573 build_append_notify_method(GArray *device, const char *name,
574 const char *format, int count)
576 int i;
577 GArray *method = build_alloc_method(name, 2);
579 for (i = 0; i < count; i++) {
580 GArray *target = build_alloc_array();
581 build_append_namestring(target, format, i);
582 assert(i < 256); /* Fits in 1 byte */
583 build_append_notify_target_ifequal(method, target, i);
584 build_free_array(target);
587 build_append_and_cleanup_method(device, method);
590 static void patch_pcihp(int slot, uint8_t *ssdt_ptr)
592 unsigned devfn = PCI_DEVFN(slot, 0);
594 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
595 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX + 1] = acpi_get_hex(devfn);
596 ssdt_ptr[ACPI_PCIHP_OFFSET_ID] = slot;
597 ssdt_ptr[ACPI_PCIHP_OFFSET_ADR + 2] = slot;
600 static void patch_pcinohp(int slot, uint8_t *ssdt_ptr)
602 unsigned devfn = PCI_DEVFN(slot, 0);
604 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
605 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX + 1] = acpi_get_hex(devfn);
606 ssdt_ptr[ACPI_PCINOHP_OFFSET_ADR + 2] = slot;
609 static void patch_pcivga(int slot, uint8_t *ssdt_ptr)
611 unsigned devfn = PCI_DEVFN(slot, 0);
613 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
614 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX + 1] = acpi_get_hex(devfn);
615 ssdt_ptr[ACPI_PCIVGA_OFFSET_ADR + 2] = slot;
618 static void patch_pciqxl(int slot, uint8_t *ssdt_ptr)
620 unsigned devfn = PCI_DEVFN(slot, 0);
622 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
623 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX + 1] = acpi_get_hex(devfn);
624 ssdt_ptr[ACPI_PCIQXL_OFFSET_ADR + 2] = slot;
627 /* Assign BSEL property to all buses. In the future, this can be changed
628 * to only assign to buses that support hotplug.
630 static void *acpi_set_bsel(PCIBus *bus, void *opaque)
632 unsigned *bsel_alloc = opaque;
633 unsigned *bus_bsel;
635 if (qbus_is_hotpluggable(BUS(bus))) {
636 bus_bsel = g_malloc(sizeof *bus_bsel);
638 *bus_bsel = (*bsel_alloc)++;
639 object_property_add_uint32_ptr(OBJECT(bus), ACPI_PCIHP_PROP_BSEL,
640 bus_bsel, NULL);
643 return bsel_alloc;
646 static void acpi_set_pci_info(void)
648 PCIBus *bus = find_i440fx(); /* TODO: Q35 support */
649 unsigned bsel_alloc = 0;
651 if (bus) {
652 /* Scan all PCI buses. Set property to enable acpi based hotplug. */
653 pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &bsel_alloc);
657 static void build_pci_bus_state_init(AcpiBuildPciBusHotplugState *state,
658 AcpiBuildPciBusHotplugState *parent,
659 bool pcihp_bridge_en)
661 state->parent = parent;
662 state->device_table = build_alloc_array();
663 state->notify_table = build_alloc_array();
664 state->pcihp_bridge_en = pcihp_bridge_en;
667 static void build_pci_bus_state_cleanup(AcpiBuildPciBusHotplugState *state)
669 build_free_array(state->device_table);
670 build_free_array(state->notify_table);
673 static void *build_pci_bus_begin(PCIBus *bus, void *parent_state)
675 AcpiBuildPciBusHotplugState *parent = parent_state;
676 AcpiBuildPciBusHotplugState *child = g_malloc(sizeof *child);
678 build_pci_bus_state_init(child, parent, parent->pcihp_bridge_en);
680 return child;
683 static void build_pci_bus_end(PCIBus *bus, void *bus_state)
685 AcpiBuildPciBusHotplugState *child = bus_state;
686 AcpiBuildPciBusHotplugState *parent = child->parent;
687 GArray *bus_table = build_alloc_array();
688 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX);
689 DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX);
690 DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX);
691 DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX);
692 DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX);
693 uint8_t op;
694 int i;
695 QObject *bsel;
696 GArray *method;
697 bool bus_hotplug_support = false;
700 * Skip bridge subtree creation if bridge hotplug is disabled
701 * to make acpi tables compatible with legacy machine types.
702 * Skip creation for hotplugged bridges as well.
704 if (bus->parent_dev && (!child->pcihp_bridge_en ||
705 DEVICE(bus->parent_dev)->hotplugged)) {
706 build_free_array(bus_table);
707 build_pci_bus_state_cleanup(child);
708 g_free(child);
709 return;
712 if (bus->parent_dev) {
713 op = 0x82; /* DeviceOp */
714 build_append_namestring(bus_table, "S%.02X",
715 bus->parent_dev->devfn);
716 build_append_byte(bus_table, 0x08); /* NameOp */
717 build_append_namestring(bus_table, "_SUN");
718 build_append_int(bus_table, PCI_SLOT(bus->parent_dev->devfn));
719 build_append_byte(bus_table, 0x08); /* NameOp */
720 build_append_namestring(bus_table, "_ADR");
721 build_append_int(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) |
722 PCI_FUNC(bus->parent_dev->devfn));
723 } else {
724 op = 0x10; /* ScopeOp */;
725 build_append_namestring(bus_table, "PCI0");
728 bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
729 if (bsel) {
730 build_append_byte(bus_table, 0x08); /* NameOp */
731 build_append_namestring(bus_table, "BSEL");
732 build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel)));
733 memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable);
734 } else {
735 /* No bsel - no slots are hot-pluggable */
736 memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable);
739 memset(slot_device_present, 0x00, sizeof slot_device_present);
740 memset(slot_device_system, 0x00, sizeof slot_device_present);
741 memset(slot_device_vga, 0x00, sizeof slot_device_vga);
742 memset(slot_device_qxl, 0x00, sizeof slot_device_qxl);
744 for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) {
745 DeviceClass *dc;
746 PCIDeviceClass *pc;
747 PCIDevice *pdev = bus->devices[i];
748 int slot = PCI_SLOT(i);
749 bool bridge_in_acpi;
751 if (!pdev) {
752 continue;
755 set_bit(slot, slot_device_present);
756 pc = PCI_DEVICE_GET_CLASS(pdev);
757 dc = DEVICE_GET_CLASS(pdev);
759 /* When hotplug for bridges is enabled, bridges are
760 * described in ACPI separately (see build_pci_bus_end).
761 * In this case they aren't themselves hot-pluggable.
762 * Hotplugged bridges *are* hot-pluggable.
764 bridge_in_acpi = pc->is_bridge && child->pcihp_bridge_en &&
765 !DEVICE(pdev)->hotplugged;
767 if (pc->class_id == PCI_CLASS_BRIDGE_ISA || bridge_in_acpi) {
768 set_bit(slot, slot_device_system);
771 if (pc->class_id == PCI_CLASS_DISPLAY_VGA) {
772 set_bit(slot, slot_device_vga);
774 if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) {
775 set_bit(slot, slot_device_qxl);
779 if (!dc->hotpluggable || bridge_in_acpi) {
780 clear_bit(slot, slot_hotplug_enable);
784 /* Append Device object for each slot */
785 for (i = 0; i < PCI_SLOT_MAX; i++) {
786 bool can_eject = test_bit(i, slot_hotplug_enable);
787 bool present = test_bit(i, slot_device_present);
788 bool vga = test_bit(i, slot_device_vga);
789 bool qxl = test_bit(i, slot_device_qxl);
790 bool system = test_bit(i, slot_device_system);
791 if (can_eject) {
792 void *pcihp = acpi_data_push(bus_table,
793 ACPI_PCIHP_SIZEOF);
794 memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF);
795 patch_pcihp(i, pcihp);
796 bus_hotplug_support = true;
797 } else if (qxl) {
798 void *pcihp = acpi_data_push(bus_table,
799 ACPI_PCIQXL_SIZEOF);
800 memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF);
801 patch_pciqxl(i, pcihp);
802 } else if (vga) {
803 void *pcihp = acpi_data_push(bus_table,
804 ACPI_PCIVGA_SIZEOF);
805 memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF);
806 patch_pcivga(i, pcihp);
807 } else if (system) {
808 /* Nothing to do: system devices are in DSDT or in SSDT above. */
809 } else if (present) {
810 void *pcihp = acpi_data_push(bus_table,
811 ACPI_PCINOHP_SIZEOF);
812 memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF);
813 patch_pcinohp(i, pcihp);
817 if (bsel) {
818 method = build_alloc_method("DVNT", 2);
820 for (i = 0; i < PCI_SLOT_MAX; i++) {
821 GArray *notify;
822 uint8_t op;
824 if (!test_bit(i, slot_hotplug_enable)) {
825 continue;
828 notify = build_alloc_array();
829 op = 0xA0; /* IfOp */
831 build_append_byte(notify, 0x7B); /* AndOp */
832 build_append_byte(notify, 0x68); /* Arg0Op */
833 build_append_int(notify, 0x1U << i);
834 build_append_byte(notify, 0x00); /* NullName */
835 build_append_byte(notify, 0x86); /* NotifyOp */
836 build_append_namestring(notify, "S%.02X", PCI_DEVFN(i, 0));
837 build_append_byte(notify, 0x69); /* Arg1Op */
839 /* Pack it up */
840 build_package(notify, op);
842 build_append_array(method, notify);
844 build_free_array(notify);
847 build_append_and_cleanup_method(bus_table, method);
850 /* Append PCNT method to notify about events on local and child buses.
851 * Add unconditionally for root since DSDT expects it.
853 if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) {
854 method = build_alloc_method("PCNT", 0);
856 /* If bus supports hotplug select it and notify about local events */
857 if (bsel) {
858 build_append_byte(method, 0x70); /* StoreOp */
859 build_append_int(method, qint_get_int(qobject_to_qint(bsel)));
860 build_append_namestring(method, "BNUM");
861 build_append_namestring(method, "DVNT");
862 build_append_namestring(method, "PCIU");
863 build_append_int(method, 1); /* Device Check */
864 build_append_namestring(method, "DVNT");
865 build_append_namestring(method, "PCID");
866 build_append_int(method, 3); /* Eject Request */
869 /* Notify about child bus events in any case */
870 build_append_array(method, child->notify_table);
872 build_append_and_cleanup_method(bus_table, method);
874 /* Append description of child buses */
875 build_append_array(bus_table, child->device_table);
877 /* Pack it up */
878 if (bus->parent_dev) {
879 build_extop_package(bus_table, op);
880 } else {
881 build_package(bus_table, op);
884 /* Append our bus description to parent table */
885 build_append_array(parent->device_table, bus_table);
887 /* Also tell parent how to notify us, invoking PCNT method.
888 * At the moment this is not needed for root as we have a single root.
890 if (bus->parent_dev) {
891 build_append_namestring(parent->notify_table, "^PCNT.S%.02X",
892 bus->parent_dev->devfn);
896 qobject_decref(bsel);
897 build_free_array(bus_table);
898 build_pci_bus_state_cleanup(child);
899 g_free(child);
902 static void patch_pci_windows(PcPciInfo *pci, uint8_t *start, unsigned size)
904 ACPI_BUILD_SET_LE(start, size, acpi_pci32_start[0], 32, pci->w32.begin);
906 ACPI_BUILD_SET_LE(start, size, acpi_pci32_end[0], 32, pci->w32.end - 1);
908 if (pci->w64.end || pci->w64.begin) {
909 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 1);
910 ACPI_BUILD_SET_LE(start, size, acpi_pci64_start[0], 64, pci->w64.begin);
911 ACPI_BUILD_SET_LE(start, size, acpi_pci64_end[0], 64, pci->w64.end - 1);
912 ACPI_BUILD_SET_LE(start, size, acpi_pci64_length[0], 64, pci->w64.end - pci->w64.begin);
913 } else {
914 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 0);
918 static void
919 build_ssdt(GArray *table_data, GArray *linker,
920 AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
921 PcPciInfo *pci, PcGuestInfo *guest_info)
923 MachineState *machine = MACHINE(qdev_get_machine());
924 uint32_t nr_mem = machine->ram_slots;
925 unsigned acpi_cpus = guest_info->apic_id_limit;
926 uint8_t *ssdt_ptr;
927 Aml *ssdt, *sb_scope, *scope, *pkg;
928 int i;
930 ssdt = init_aml_allocator();
931 /* The current AML generator can cover the APIC ID range [0..255],
932 * inclusive, for VCPU hotplug. */
933 QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
934 g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT);
936 /* Copy header and patch values in the S3_ / S4_ / S5_ packages */
937 ssdt_ptr = acpi_data_push(ssdt->buf, sizeof(ssdp_misc_aml));
938 memcpy(ssdt_ptr, ssdp_misc_aml, sizeof(ssdp_misc_aml));
940 patch_pci_windows(pci, ssdt_ptr, sizeof(ssdp_misc_aml));
942 ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml),
943 ssdt_isa_pest[0], 16, misc->pvpanic_port);
945 ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml),
946 ssdt_mctrl_nr_slots[0], 32, nr_mem);
948 /* create S3_ / S4_ / S5_ packages if necessary */
949 scope = aml_scope("\\");
950 if (!pm->s3_disabled) {
951 pkg = aml_package(4);
952 aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */
953 aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
954 aml_append(pkg, aml_int(0)); /* reserved */
955 aml_append(pkg, aml_int(0)); /* reserved */
956 aml_append(scope, aml_name_decl("_S3", pkg));
959 if (!pm->s4_disabled) {
960 pkg = aml_package(4);
961 aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */
962 /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
963 aml_append(pkg, aml_int(pm->s4_val));
964 aml_append(pkg, aml_int(0)); /* reserved */
965 aml_append(pkg, aml_int(0)); /* reserved */
966 aml_append(scope, aml_name_decl("_S4", pkg));
969 pkg = aml_package(4);
970 aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */
971 aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */
972 aml_append(pkg, aml_int(0)); /* reserved */
973 aml_append(pkg, aml_int(0)); /* reserved */
974 aml_append(scope, aml_name_decl("_S5", pkg));
975 aml_append(ssdt, scope);
977 sb_scope = aml_scope("_SB");
979 /* build Processor object for each processor */
980 for (i = 0; i < acpi_cpus; i++) {
981 uint8_t *proc = acpi_data_push(sb_scope->buf, ACPI_PROC_SIZEOF);
982 memcpy(proc, ACPI_PROC_AML, ACPI_PROC_SIZEOF);
983 proc[ACPI_PROC_OFFSET_CPUHEX] = acpi_get_hex(i >> 4);
984 proc[ACPI_PROC_OFFSET_CPUHEX+1] = acpi_get_hex(i);
985 proc[ACPI_PROC_OFFSET_CPUID1] = i;
986 proc[ACPI_PROC_OFFSET_CPUID2] = i;
989 /* build this code:
990 * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...}
992 /* Arg0 = Processor ID = APIC ID */
993 build_append_notify_method(sb_scope->buf, "NTFY",
994 "CP%0.02X", acpi_cpus);
996 /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" */
997 build_append_byte(sb_scope->buf, 0x08); /* NameOp */
998 build_append_namestring(sb_scope->buf, "CPON");
1001 GArray *package = build_alloc_array();
1002 uint8_t op;
1005 * Note: The ability to create variable-sized packages was first introduced in ACPI 2.0. ACPI 1.0 only
1006 * allowed fixed-size packages with up to 255 elements.
1007 * Windows guests up to win2k8 fail when VarPackageOp is used.
1009 if (acpi_cpus <= 255) {
1010 op = 0x12; /* PackageOp */
1011 build_append_byte(package, acpi_cpus); /* NumElements */
1012 } else {
1013 op = 0x13; /* VarPackageOp */
1014 build_append_int(package, acpi_cpus); /* VarNumElements */
1017 for (i = 0; i < acpi_cpus; i++) {
1018 uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00;
1019 build_append_byte(package, b);
1022 build_package(package, op);
1023 build_append_array(sb_scope->buf, package);
1024 build_free_array(package);
1027 if (nr_mem) {
1028 assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
1029 /* build memory devices */
1030 for (i = 0; i < nr_mem; i++) {
1031 char id[3];
1032 uint8_t *mem = acpi_data_push(sb_scope->buf, ACPI_MEM_SIZEOF);
1034 snprintf(id, sizeof(id), "%02X", i);
1035 memcpy(mem, ACPI_MEM_AML, ACPI_MEM_SIZEOF);
1036 memcpy(mem + ACPI_MEM_OFFSET_HEX, id, 2);
1037 memcpy(mem + ACPI_MEM_OFFSET_ID, id, 2);
1040 /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
1041 * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ...
1043 build_append_notify_method(sb_scope->buf,
1044 stringify(MEMORY_SLOT_NOTIFY_METHOD),
1045 "MP%0.02X", nr_mem);
1049 AcpiBuildPciBusHotplugState hotplug_state;
1050 Object *pci_host;
1051 PCIBus *bus = NULL;
1052 bool ambiguous;
1054 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
1055 if (!ambiguous && pci_host) {
1056 bus = PCI_HOST_BRIDGE(pci_host)->bus;
1059 build_pci_bus_state_init(&hotplug_state, NULL, pm->pcihp_bridge_en);
1061 if (bus) {
1062 /* Scan all PCI buses. Generate tables to support hotplug. */
1063 pci_for_each_bus_depth_first(bus, build_pci_bus_begin,
1064 build_pci_bus_end, &hotplug_state);
1067 build_append_array(sb_scope->buf, hotplug_state.device_table);
1068 build_pci_bus_state_cleanup(&hotplug_state);
1070 aml_append(ssdt, sb_scope);
1073 /* copy AML table into ACPI tables blob and patch header there */
1074 g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
1075 build_header(linker, table_data,
1076 (void *)(table_data->data + table_data->len - ssdt->buf->len),
1077 "SSDT", ssdt->buf->len, 1);
1078 free_aml_allocator();
1081 static void
1082 build_hpet(GArray *table_data, GArray *linker)
1084 Acpi20Hpet *hpet;
1086 hpet = acpi_data_push(table_data, sizeof(*hpet));
1087 /* Note timer_block_id value must be kept in sync with value advertised by
1088 * emulated hpet
1090 hpet->timer_block_id = cpu_to_le32(0x8086a201);
1091 hpet->addr.address = cpu_to_le64(HPET_BASE);
1092 build_header(linker, table_data,
1093 (void *)hpet, "HPET", sizeof(*hpet), 1);
1096 static void
1097 build_tpm_tcpa(GArray *table_data, GArray *linker, GArray *tcpalog)
1099 Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa);
1100 uint64_t log_area_start_address = acpi_data_len(tcpalog);
1102 tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT);
1103 tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE);
1104 tcpa->log_area_start_address = cpu_to_le64(log_area_start_address);
1106 bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, 1,
1107 false /* high memory */);
1109 /* log area start address to be filled by Guest linker */
1110 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
1111 ACPI_BUILD_TPMLOG_FILE,
1112 table_data, &tcpa->log_area_start_address,
1113 sizeof(tcpa->log_area_start_address));
1115 build_header(linker, table_data,
1116 (void *)tcpa, "TCPA", sizeof(*tcpa), 2);
1118 acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE);
1121 static void
1122 build_tpm_ssdt(GArray *table_data, GArray *linker)
1124 void *tpm_ptr;
1126 tpm_ptr = acpi_data_push(table_data, sizeof(ssdt_tpm_aml));
1127 memcpy(tpm_ptr, ssdt_tpm_aml, sizeof(ssdt_tpm_aml));
1130 typedef enum {
1131 MEM_AFFINITY_NOFLAGS = 0,
1132 MEM_AFFINITY_ENABLED = (1 << 0),
1133 MEM_AFFINITY_HOTPLUGGABLE = (1 << 1),
1134 MEM_AFFINITY_NON_VOLATILE = (1 << 2),
1135 } MemoryAffinityFlags;
1137 static void
1138 acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
1139 uint64_t len, int node, MemoryAffinityFlags flags)
1141 numamem->type = ACPI_SRAT_MEMORY;
1142 numamem->length = sizeof(*numamem);
1143 memset(numamem->proximity, 0, 4);
1144 numamem->proximity[0] = node;
1145 numamem->flags = cpu_to_le32(flags);
1146 numamem->base_addr = cpu_to_le64(base);
1147 numamem->range_length = cpu_to_le64(len);
1150 static void
1151 build_srat(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
1153 AcpiSystemResourceAffinityTable *srat;
1154 AcpiSratProcessorAffinity *core;
1155 AcpiSratMemoryAffinity *numamem;
1157 int i;
1158 uint64_t curnode;
1159 int srat_start, numa_start, slots;
1160 uint64_t mem_len, mem_base, next_base;
1161 PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
1162 ram_addr_t hotplugabble_address_space_size =
1163 object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE,
1164 NULL);
1166 srat_start = table_data->len;
1168 srat = acpi_data_push(table_data, sizeof *srat);
1169 srat->reserved1 = cpu_to_le32(1);
1170 core = (void *)(srat + 1);
1172 for (i = 0; i < guest_info->apic_id_limit; ++i) {
1173 core = acpi_data_push(table_data, sizeof *core);
1174 core->type = ACPI_SRAT_PROCESSOR;
1175 core->length = sizeof(*core);
1176 core->local_apic_id = i;
1177 curnode = guest_info->node_cpu[i];
1178 core->proximity_lo = curnode;
1179 memset(core->proximity_hi, 0, 3);
1180 core->local_sapic_eid = 0;
1181 core->flags = cpu_to_le32(1);
1185 /* the memory map is a bit tricky, it contains at least one hole
1186 * from 640k-1M and possibly another one from 3.5G-4G.
1188 next_base = 0;
1189 numa_start = table_data->len;
1191 numamem = acpi_data_push(table_data, sizeof *numamem);
1192 acpi_build_srat_memory(numamem, 0, 640*1024, 0, MEM_AFFINITY_ENABLED);
1193 next_base = 1024 * 1024;
1194 for (i = 1; i < guest_info->numa_nodes + 1; ++i) {
1195 mem_base = next_base;
1196 mem_len = guest_info->node_mem[i - 1];
1197 if (i == 1) {
1198 mem_len -= 1024 * 1024;
1200 next_base = mem_base + mem_len;
1202 /* Cut out the ACPI_PCI hole */
1203 if (mem_base <= guest_info->ram_size_below_4g &&
1204 next_base > guest_info->ram_size_below_4g) {
1205 mem_len -= next_base - guest_info->ram_size_below_4g;
1206 if (mem_len > 0) {
1207 numamem = acpi_data_push(table_data, sizeof *numamem);
1208 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
1209 MEM_AFFINITY_ENABLED);
1211 mem_base = 1ULL << 32;
1212 mem_len = next_base - guest_info->ram_size_below_4g;
1213 next_base += (1ULL << 32) - guest_info->ram_size_below_4g;
1215 numamem = acpi_data_push(table_data, sizeof *numamem);
1216 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
1217 MEM_AFFINITY_ENABLED);
1219 slots = (table_data->len - numa_start) / sizeof *numamem;
1220 for (; slots < guest_info->numa_nodes + 2; slots++) {
1221 numamem = acpi_data_push(table_data, sizeof *numamem);
1222 acpi_build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
1226 * Entry is required for Windows to enable memory hotplug in OS.
1227 * Memory devices may override proximity set by this entry,
1228 * providing _PXM method if necessary.
1230 if (hotplugabble_address_space_size) {
1231 numamem = acpi_data_push(table_data, sizeof *numamem);
1232 acpi_build_srat_memory(numamem, pcms->hotplug_memory_base,
1233 hotplugabble_address_space_size, 0,
1234 MEM_AFFINITY_HOTPLUGGABLE |
1235 MEM_AFFINITY_ENABLED);
1238 build_header(linker, table_data,
1239 (void *)(table_data->data + srat_start),
1240 "SRAT",
1241 table_data->len - srat_start, 1);
1244 static void
1245 build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info)
1247 AcpiTableMcfg *mcfg;
1248 const char *sig;
1249 int len = sizeof(*mcfg) + 1 * sizeof(mcfg->allocation[0]);
1251 mcfg = acpi_data_push(table_data, len);
1252 mcfg->allocation[0].address = cpu_to_le64(info->mcfg_base);
1253 /* Only a single allocation so no need to play with segments */
1254 mcfg->allocation[0].pci_segment = cpu_to_le16(0);
1255 mcfg->allocation[0].start_bus_number = 0;
1256 mcfg->allocation[0].end_bus_number = PCIE_MMCFG_BUS(info->mcfg_size - 1);
1258 /* MCFG is used for ECAM which can be enabled or disabled by guest.
1259 * To avoid table size changes (which create migration issues),
1260 * always create the table even if there are no allocations,
1261 * but set the signature to a reserved value in this case.
1262 * ACPI spec requires OSPMs to ignore such tables.
1264 if (info->mcfg_base == PCIE_BASE_ADDR_UNMAPPED) {
1265 /* Reserved signature: ignored by OSPM */
1266 sig = "QEMU";
1267 } else {
1268 sig = "MCFG";
1270 build_header(linker, table_data, (void *)mcfg, sig, len, 1);
1273 static void
1274 build_dmar_q35(GArray *table_data, GArray *linker)
1276 int dmar_start = table_data->len;
1278 AcpiTableDmar *dmar;
1279 AcpiDmarHardwareUnit *drhd;
1281 dmar = acpi_data_push(table_data, sizeof(*dmar));
1282 dmar->host_address_width = VTD_HOST_ADDRESS_WIDTH - 1;
1283 dmar->flags = 0; /* No intr_remap for now */
1285 /* DMAR Remapping Hardware Unit Definition structure */
1286 drhd = acpi_data_push(table_data, sizeof(*drhd));
1287 drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT);
1288 drhd->length = cpu_to_le16(sizeof(*drhd)); /* No device scope now */
1289 drhd->flags = ACPI_DMAR_INCLUDE_PCI_ALL;
1290 drhd->pci_segment = cpu_to_le16(0);
1291 drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR);
1293 build_header(linker, table_data, (void *)(table_data->data + dmar_start),
1294 "DMAR", table_data->len - dmar_start, 1);
1297 static void
1298 build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc)
1300 AcpiTableHeader *dsdt;
1302 assert(misc->dsdt_code && misc->dsdt_size);
1304 dsdt = acpi_data_push(table_data, misc->dsdt_size);
1305 memcpy(dsdt, misc->dsdt_code, misc->dsdt_size);
1307 memset(dsdt, 0, sizeof *dsdt);
1308 build_header(linker, table_data, dsdt, "DSDT",
1309 misc->dsdt_size, 1);
1312 /* Build final rsdt table */
1313 static void
1314 build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
1316 AcpiRsdtDescriptorRev1 *rsdt;
1317 size_t rsdt_len;
1318 int i;
1320 rsdt_len = sizeof(*rsdt) + sizeof(uint32_t) * table_offsets->len;
1321 rsdt = acpi_data_push(table_data, rsdt_len);
1322 memcpy(rsdt->table_offset_entry, table_offsets->data,
1323 sizeof(uint32_t) * table_offsets->len);
1324 for (i = 0; i < table_offsets->len; ++i) {
1325 /* rsdt->table_offset_entry to be filled by Guest linker */
1326 bios_linker_loader_add_pointer(linker,
1327 ACPI_BUILD_TABLE_FILE,
1328 ACPI_BUILD_TABLE_FILE,
1329 table_data, &rsdt->table_offset_entry[i],
1330 sizeof(uint32_t));
1332 build_header(linker, table_data,
1333 (void *)rsdt, "RSDT", rsdt_len, 1);
1336 static GArray *
1337 build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
1339 AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
1341 bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 16,
1342 true /* fseg memory */);
1344 memcpy(&rsdp->signature, "RSD PTR ", 8);
1345 memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, 6);
1346 rsdp->rsdt_physical_address = cpu_to_le32(rsdt);
1347 /* Address to be filled by Guest linker */
1348 bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE,
1349 ACPI_BUILD_TABLE_FILE,
1350 rsdp_table, &rsdp->rsdt_physical_address,
1351 sizeof rsdp->rsdt_physical_address);
1352 rsdp->checksum = 0;
1353 /* Checksum to be filled by Guest linker */
1354 bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
1355 rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
1357 return rsdp_table;
1360 typedef
1361 struct AcpiBuildTables {
1362 GArray *table_data;
1363 GArray *rsdp;
1364 GArray *tcpalog;
1365 GArray *linker;
1366 } AcpiBuildTables;
1368 static inline void acpi_build_tables_init(AcpiBuildTables *tables)
1370 tables->rsdp = g_array_new(false, true /* clear */, 1);
1371 tables->table_data = g_array_new(false, true /* clear */, 1);
1372 tables->tcpalog = g_array_new(false, true /* clear */, 1);
1373 tables->linker = bios_linker_loader_init();
1376 static inline void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre)
1378 void *linker_data = bios_linker_loader_cleanup(tables->linker);
1379 g_free(linker_data);
1380 g_array_free(tables->rsdp, true);
1381 g_array_free(tables->table_data, true);
1382 g_array_free(tables->tcpalog, mfre);
1385 typedef
1386 struct AcpiBuildState {
1387 /* Copy of table in RAM (for patching). */
1388 ram_addr_t table_ram;
1389 /* Is table patched? */
1390 uint8_t patched;
1391 PcGuestInfo *guest_info;
1392 void *rsdp;
1393 ram_addr_t rsdp_ram;
1394 ram_addr_t linker_ram;
1395 } AcpiBuildState;
1397 static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg)
1399 Object *pci_host;
1400 QObject *o;
1401 bool ambiguous;
1403 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
1404 g_assert(!ambiguous);
1405 g_assert(pci_host);
1407 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL);
1408 if (!o) {
1409 return false;
1411 mcfg->mcfg_base = qint_get_int(qobject_to_qint(o));
1412 qobject_decref(o);
1414 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL);
1415 assert(o);
1416 mcfg->mcfg_size = qint_get_int(qobject_to_qint(o));
1417 qobject_decref(o);
1418 return true;
1421 static bool acpi_has_iommu(void)
1423 bool ambiguous;
1424 Object *intel_iommu;
1426 intel_iommu = object_resolve_path_type("", TYPE_INTEL_IOMMU_DEVICE,
1427 &ambiguous);
1428 return intel_iommu && !ambiguous;
1431 static
1432 void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
1434 GArray *table_offsets;
1435 unsigned facs, ssdt, dsdt, rsdt;
1436 AcpiCpuInfo cpu;
1437 AcpiPmInfo pm;
1438 AcpiMiscInfo misc;
1439 AcpiMcfgInfo mcfg;
1440 PcPciInfo pci;
1441 uint8_t *u;
1442 size_t aml_len = 0;
1443 GArray *tables_blob = tables->table_data;
1445 acpi_get_cpu_info(&cpu);
1446 acpi_get_pm_info(&pm);
1447 acpi_get_dsdt(&misc);
1448 acpi_get_misc_info(&misc);
1449 acpi_get_pci_info(&pci);
1451 table_offsets = g_array_new(false, true /* clear */,
1452 sizeof(uint32_t));
1453 ACPI_BUILD_DPRINTF("init ACPI tables\n");
1455 bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE,
1456 64 /* Ensure FACS is aligned */,
1457 false /* high memory */);
1460 * FACS is pointed to by FADT.
1461 * We place it first since it's the only table that has alignment
1462 * requirements.
1464 facs = tables_blob->len;
1465 build_facs(tables_blob, tables->linker, guest_info);
1467 /* DSDT is pointed to by FADT */
1468 dsdt = tables_blob->len;
1469 build_dsdt(tables_blob, tables->linker, &misc);
1471 /* Count the size of the DSDT and SSDT, we will need it for legacy
1472 * sizing of ACPI tables.
1474 aml_len += tables_blob->len - dsdt;
1476 /* ACPI tables pointed to by RSDT */
1477 acpi_add_table(table_offsets, tables_blob);
1478 build_fadt(tables_blob, tables->linker, &pm, facs, dsdt);
1480 ssdt = tables_blob->len;
1481 acpi_add_table(table_offsets, tables_blob);
1482 build_ssdt(tables_blob, tables->linker, &cpu, &pm, &misc, &pci,
1483 guest_info);
1484 aml_len += tables_blob->len - ssdt;
1486 acpi_add_table(table_offsets, tables_blob);
1487 build_madt(tables_blob, tables->linker, &cpu, guest_info);
1489 if (misc.has_hpet) {
1490 acpi_add_table(table_offsets, tables_blob);
1491 build_hpet(tables_blob, tables->linker);
1493 if (misc.has_tpm) {
1494 acpi_add_table(table_offsets, tables_blob);
1495 build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog);
1497 acpi_add_table(table_offsets, tables_blob);
1498 build_tpm_ssdt(tables_blob, tables->linker);
1500 if (guest_info->numa_nodes) {
1501 acpi_add_table(table_offsets, tables_blob);
1502 build_srat(tables_blob, tables->linker, guest_info);
1504 if (acpi_get_mcfg(&mcfg)) {
1505 acpi_add_table(table_offsets, tables_blob);
1506 build_mcfg_q35(tables_blob, tables->linker, &mcfg);
1508 if (acpi_has_iommu()) {
1509 acpi_add_table(table_offsets, tables_blob);
1510 build_dmar_q35(tables_blob, tables->linker);
1513 /* Add tables supplied by user (if any) */
1514 for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
1515 unsigned len = acpi_table_len(u);
1517 acpi_add_table(table_offsets, tables_blob);
1518 g_array_append_vals(tables_blob, u, len);
1521 /* RSDT is pointed to by RSDP */
1522 rsdt = tables_blob->len;
1523 build_rsdt(tables_blob, tables->linker, table_offsets);
1525 /* RSDP is in FSEG memory, so allocate it separately */
1526 build_rsdp(tables->rsdp, tables->linker, rsdt);
1528 /* We'll expose it all to Guest so we want to reduce
1529 * chance of size changes.
1531 * We used to align the tables to 4k, but of course this would
1532 * too simple to be enough. 4k turned out to be too small an
1533 * alignment very soon, and in fact it is almost impossible to
1534 * keep the table size stable for all (max_cpus, max_memory_slots)
1535 * combinations. So the table size is always 64k for pc-i440fx-2.1
1536 * and we give an error if the table grows beyond that limit.
1538 * We still have the problem of migrating from "-M pc-i440fx-2.0". For
1539 * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables
1540 * than 2.0 and we can always pad the smaller tables with zeros. We can
1541 * then use the exact size of the 2.0 tables.
1543 * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration.
1545 if (guest_info->legacy_acpi_table_size) {
1546 /* Subtracting aml_len gives the size of fixed tables. Then add the
1547 * size of the PIIX4 DSDT/SSDT in QEMU 2.0.
1549 int legacy_aml_len =
1550 guest_info->legacy_acpi_table_size +
1551 ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus;
1552 int legacy_table_size =
1553 ROUND_UP(tables_blob->len - aml_len + legacy_aml_len,
1554 ACPI_BUILD_ALIGN_SIZE);
1555 if (tables_blob->len > legacy_table_size) {
1556 /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */
1557 error_report("Warning: migration may not work.");
1559 g_array_set_size(tables_blob, legacy_table_size);
1560 } else {
1561 /* Make sure we have a buffer in case we need to resize the tables. */
1562 if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
1563 /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */
1564 error_report("Warning: ACPI tables are larger than 64k.");
1565 error_report("Warning: migration may not work.");
1566 error_report("Warning: please remove CPUs, NUMA nodes, "
1567 "memory slots or PCI bridges.");
1569 acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE);
1572 acpi_align_size(tables->linker, ACPI_BUILD_ALIGN_SIZE);
1574 /* Cleanup memory that's no longer used. */
1575 g_array_free(table_offsets, true);
1578 static void acpi_ram_update(ram_addr_t ram, GArray *data)
1580 uint32_t size = acpi_data_len(data);
1582 /* Make sure RAM size is correct - in case it got changed e.g. by migration */
1583 qemu_ram_resize(ram, size, &error_abort);
1585 memcpy(qemu_get_ram_ptr(ram), data->data, size);
1586 cpu_physical_memory_set_dirty_range_nocode(ram, size);
1589 static void acpi_build_update(void *build_opaque, uint32_t offset)
1591 AcpiBuildState *build_state = build_opaque;
1592 AcpiBuildTables tables;
1594 /* No state to update or already patched? Nothing to do. */
1595 if (!build_state || build_state->patched) {
1596 return;
1598 build_state->patched = 1;
1600 acpi_build_tables_init(&tables);
1602 acpi_build(build_state->guest_info, &tables);
1604 acpi_ram_update(build_state->table_ram, tables.table_data);
1606 if (build_state->rsdp) {
1607 memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp));
1608 } else {
1609 acpi_ram_update(build_state->rsdp_ram, tables.rsdp);
1612 acpi_ram_update(build_state->linker_ram, tables.linker);
1613 acpi_build_tables_cleanup(&tables, true);
1616 static void acpi_build_reset(void *build_opaque)
1618 AcpiBuildState *build_state = build_opaque;
1619 build_state->patched = 0;
1622 static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob,
1623 const char *name, uint64_t max_size)
1625 return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
1626 name, acpi_build_update, build_state);
1629 static const VMStateDescription vmstate_acpi_build = {
1630 .name = "acpi_build",
1631 .version_id = 1,
1632 .minimum_version_id = 1,
1633 .fields = (VMStateField[]) {
1634 VMSTATE_UINT8(patched, AcpiBuildState),
1635 VMSTATE_END_OF_LIST()
1639 void acpi_setup(PcGuestInfo *guest_info)
1641 AcpiBuildTables tables;
1642 AcpiBuildState *build_state;
1644 if (!guest_info->fw_cfg) {
1645 ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
1646 return;
1649 if (!guest_info->has_acpi_build) {
1650 ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
1651 return;
1654 if (!acpi_enabled) {
1655 ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
1656 return;
1659 build_state = g_malloc0(sizeof *build_state);
1661 build_state->guest_info = guest_info;
1663 acpi_set_pci_info();
1665 acpi_build_tables_init(&tables);
1666 acpi_build(build_state->guest_info, &tables);
1668 /* Now expose it all to Guest */
1669 build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data,
1670 ACPI_BUILD_TABLE_FILE,
1671 ACPI_BUILD_TABLE_MAX_SIZE);
1672 assert(build_state->table_ram != RAM_ADDR_MAX);
1674 build_state->linker_ram =
1675 acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0);
1677 fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
1678 tables.tcpalog->data, acpi_data_len(tables.tcpalog));
1680 if (!guest_info->rsdp_in_ram) {
1682 * Keep for compatibility with old machine types.
1683 * Though RSDP is small, its contents isn't immutable, so
1684 * we'll update it along with the rest of tables on guest access.
1686 uint32_t rsdp_size = acpi_data_len(tables.rsdp);
1688 build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size);
1689 fw_cfg_add_file_callback(guest_info->fw_cfg, ACPI_BUILD_RSDP_FILE,
1690 acpi_build_update, build_state,
1691 build_state->rsdp, rsdp_size);
1692 build_state->rsdp_ram = (ram_addr_t)-1;
1693 } else {
1694 build_state->rsdp = NULL;
1695 build_state->rsdp_ram = acpi_add_rom_blob(build_state, tables.rsdp,
1696 ACPI_BUILD_RSDP_FILE, 0);
1699 qemu_register_reset(acpi_build_reset, build_state);
1700 acpi_build_reset(build_state);
1701 vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
1703 /* Cleanup tables but don't free the memory: we track it
1704 * in build_state.
1706 acpi_build_tables_cleanup(&tables, false);