pc: acpi-build: generate pvpanic device description dynamically
[qemu/cris-port.git] / hw / i386 / acpi-build.c
blob2c581ef71795df52bef8f41282fedc2ddb38b026
1 /* Support for generating ACPI tables and passing them to Guests
3 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
4 * Copyright (C) 2006 Fabrice Bellard
5 * Copyright (C) 2013 Red Hat Inc
7 * Author: Michael S. Tsirkin <mst@redhat.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "acpi-build.h"
24 #include <stddef.h>
25 #include <glib.h>
26 #include "qemu-common.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/osdep.h"
29 #include "qemu/range.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/pci.h"
32 #include "qom/cpu.h"
33 #include "hw/i386/pc.h"
34 #include "target-i386/cpu.h"
35 #include "hw/timer/hpet.h"
36 #include "hw/i386/acpi-defs.h"
37 #include "hw/acpi/acpi.h"
38 #include "hw/nvram/fw_cfg.h"
39 #include "hw/acpi/bios-linker-loader.h"
40 #include "hw/loader.h"
41 #include "hw/isa/isa.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "sysemu/tpm.h"
44 #include "hw/acpi/tpm.h"
46 /* Supported chipsets: */
47 #include "hw/acpi/piix4.h"
48 #include "hw/acpi/pcihp.h"
49 #include "hw/i386/ich9.h"
50 #include "hw/pci/pci_bus.h"
51 #include "hw/pci-host/q35.h"
52 #include "hw/i386/intel_iommu.h"
54 #include "hw/i386/q35-acpi-dsdt.hex"
55 #include "hw/i386/acpi-dsdt.hex"
57 #include "hw/acpi/aml-build.h"
59 #include "qapi/qmp/qint.h"
60 #include "qom/qom-qobject.h"
61 #include "exec/ram_addr.h"
63 /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
64 * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
65 * a little bit, there should be plenty of free space since the DSDT
66 * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
68 #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97
69 #define ACPI_BUILD_ALIGN_SIZE 0x1000
71 #define ACPI_BUILD_TABLE_SIZE 0x20000
73 /* Reserve RAM space for tables: add another order of magnitude. */
74 #define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
76 /* #define DEBUG_ACPI_BUILD */
77 #ifdef DEBUG_ACPI_BUILD
78 #define ACPI_BUILD_DPRINTF(fmt, ...) \
79 do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
80 #else
81 #define ACPI_BUILD_DPRINTF(fmt, ...)
82 #endif
84 typedef struct AcpiCpuInfo {
85 DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT);
86 } AcpiCpuInfo;
88 typedef struct AcpiMcfgInfo {
89 uint64_t mcfg_base;
90 uint32_t mcfg_size;
91 } AcpiMcfgInfo;
93 typedef struct AcpiPmInfo {
94 bool s3_disabled;
95 bool s4_disabled;
96 bool pcihp_bridge_en;
97 uint8_t s4_val;
98 uint16_t sci_int;
99 uint8_t acpi_enable_cmd;
100 uint8_t acpi_disable_cmd;
101 uint32_t gpe0_blk;
102 uint32_t gpe0_blk_len;
103 uint32_t io_base;
104 } AcpiPmInfo;
106 typedef struct AcpiMiscInfo {
107 bool has_hpet;
108 bool has_tpm;
109 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX);
110 const unsigned char *dsdt_code;
111 unsigned dsdt_size;
112 uint16_t pvpanic_port;
113 } AcpiMiscInfo;
115 typedef struct AcpiBuildPciBusHotplugState {
116 GArray *device_table;
117 GArray *notify_table;
118 struct AcpiBuildPciBusHotplugState *parent;
119 bool pcihp_bridge_en;
120 } AcpiBuildPciBusHotplugState;
122 static void acpi_get_dsdt(AcpiMiscInfo *info)
124 uint16_t *applesmc_sta;
125 Object *piix = piix4_pm_find();
126 Object *lpc = ich9_lpc_find();
127 assert(!!piix != !!lpc);
129 if (piix) {
130 info->dsdt_code = AcpiDsdtAmlCode;
131 info->dsdt_size = sizeof AcpiDsdtAmlCode;
132 applesmc_sta = piix_dsdt_applesmc_sta;
134 if (lpc) {
135 info->dsdt_code = Q35AcpiDsdtAmlCode;
136 info->dsdt_size = sizeof Q35AcpiDsdtAmlCode;
137 applesmc_sta = q35_dsdt_applesmc_sta;
140 /* Patch in appropriate value for AppleSMC _STA */
141 *(uint8_t *)(info->dsdt_code + *applesmc_sta) =
142 applesmc_find() ? 0x0b : 0x00;
145 static
146 int acpi_add_cpu_info(Object *o, void *opaque)
148 AcpiCpuInfo *cpu = opaque;
149 uint64_t apic_id;
151 if (object_dynamic_cast(o, TYPE_CPU)) {
152 apic_id = object_property_get_int(o, "apic-id", NULL);
153 assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
155 set_bit(apic_id, cpu->found_cpus);
158 object_child_foreach(o, acpi_add_cpu_info, opaque);
159 return 0;
162 static void acpi_get_cpu_info(AcpiCpuInfo *cpu)
164 Object *root = object_get_root();
166 memset(cpu->found_cpus, 0, sizeof cpu->found_cpus);
167 object_child_foreach(root, acpi_add_cpu_info, cpu);
170 static void acpi_get_pm_info(AcpiPmInfo *pm)
172 Object *piix = piix4_pm_find();
173 Object *lpc = ich9_lpc_find();
174 Object *obj = NULL;
175 QObject *o;
177 if (piix) {
178 obj = piix;
180 if (lpc) {
181 obj = lpc;
183 assert(obj);
185 /* Fill in optional s3/s4 related properties */
186 o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
187 if (o) {
188 pm->s3_disabled = qint_get_int(qobject_to_qint(o));
189 } else {
190 pm->s3_disabled = false;
192 qobject_decref(o);
193 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL);
194 if (o) {
195 pm->s4_disabled = qint_get_int(qobject_to_qint(o));
196 } else {
197 pm->s4_disabled = false;
199 qobject_decref(o);
200 o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL);
201 if (o) {
202 pm->s4_val = qint_get_int(qobject_to_qint(o));
203 } else {
204 pm->s4_val = false;
206 qobject_decref(o);
208 /* Fill in mandatory properties */
209 pm->sci_int = object_property_get_int(obj, ACPI_PM_PROP_SCI_INT, NULL);
211 pm->acpi_enable_cmd = object_property_get_int(obj,
212 ACPI_PM_PROP_ACPI_ENABLE_CMD,
213 NULL);
214 pm->acpi_disable_cmd = object_property_get_int(obj,
215 ACPI_PM_PROP_ACPI_DISABLE_CMD,
216 NULL);
217 pm->io_base = object_property_get_int(obj, ACPI_PM_PROP_PM_IO_BASE,
218 NULL);
219 pm->gpe0_blk = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK,
220 NULL);
221 pm->gpe0_blk_len = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK_LEN,
222 NULL);
223 pm->pcihp_bridge_en =
224 object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support",
225 NULL);
228 static void acpi_get_misc_info(AcpiMiscInfo *info)
230 info->has_hpet = hpet_find();
231 info->has_tpm = tpm_find();
232 info->pvpanic_port = pvpanic_port();
235 static void acpi_get_pci_info(PcPciInfo *info)
237 Object *pci_host;
238 bool ambiguous;
240 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
241 g_assert(!ambiguous);
242 g_assert(pci_host);
244 info->w32.begin = object_property_get_int(pci_host,
245 PCI_HOST_PROP_PCI_HOLE_START,
246 NULL);
247 info->w32.end = object_property_get_int(pci_host,
248 PCI_HOST_PROP_PCI_HOLE_END,
249 NULL);
250 info->w64.begin = object_property_get_int(pci_host,
251 PCI_HOST_PROP_PCI_HOLE64_START,
252 NULL);
253 info->w64.end = object_property_get_int(pci_host,
254 PCI_HOST_PROP_PCI_HOLE64_END,
255 NULL);
258 #define ACPI_BUILD_APPNAME "Bochs"
259 #define ACPI_BUILD_APPNAME6 "BOCHS "
260 #define ACPI_BUILD_APPNAME4 "BXPC"
262 #define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
263 #define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
264 #define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log"
266 static void
267 build_header(GArray *linker, GArray *table_data,
268 AcpiTableHeader *h, const char *sig, int len, uint8_t rev)
270 memcpy(&h->signature, sig, 4);
271 h->length = cpu_to_le32(len);
272 h->revision = rev;
273 memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
274 memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
275 memcpy(h->oem_table_id + 4, sig, 4);
276 h->oem_revision = cpu_to_le32(1);
277 memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4);
278 h->asl_compiler_revision = cpu_to_le32(1);
279 h->checksum = 0;
280 /* Checksum to be filled in by Guest linker */
281 bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE,
282 table_data->data, h, len, &h->checksum);
285 static GArray *build_alloc_method(const char *name, uint8_t arg_count)
287 GArray *method = build_alloc_array();
289 build_append_namestring(method, "%s", name);
290 build_append_byte(method, arg_count); /* MethodFlags: ArgCount */
292 return method;
295 static void build_append_and_cleanup_method(GArray *device, GArray *method)
297 uint8_t op = 0x14; /* MethodOp */
299 build_package(method, op);
301 build_append_array(device, method);
302 build_free_array(method);
305 static void build_append_notify_target_ifequal(GArray *method,
306 GArray *target_name,
307 uint32_t value)
309 GArray *notify = build_alloc_array();
310 uint8_t op = 0xA0; /* IfOp */
312 build_append_byte(notify, 0x93); /* LEqualOp */
313 build_append_byte(notify, 0x68); /* Arg0Op */
314 build_append_int(notify, value);
315 build_append_byte(notify, 0x86); /* NotifyOp */
316 build_append_array(notify, target_name);
317 build_append_byte(notify, 0x69); /* Arg1Op */
319 /* Pack it up */
320 build_package(notify, op);
322 build_append_array(method, notify);
324 build_free_array(notify);
327 /* End here */
328 #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */
330 static inline void *acpi_data_push(GArray *table_data, unsigned size)
332 unsigned off = table_data->len;
333 g_array_set_size(table_data, off + size);
334 return table_data->data + off;
337 static unsigned acpi_data_len(GArray *table)
339 #if GLIB_CHECK_VERSION(2, 22, 0)
340 assert(g_array_get_element_size(table) == 1);
341 #endif
342 return table->len;
345 static void acpi_align_size(GArray *blob, unsigned align)
347 /* Align size to multiple of given size. This reduces the chance
348 * we need to change size in the future (breaking cross version migration).
350 g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
353 /* Set a value within table in a safe manner */
354 #define ACPI_BUILD_SET_LE(table, size, off, bits, val) \
355 do { \
356 uint64_t ACPI_BUILD_SET_LE_val = cpu_to_le64(val); \
357 memcpy(acpi_data_get_ptr(table, size, off, \
358 (bits) / BITS_PER_BYTE), \
359 &ACPI_BUILD_SET_LE_val, \
360 (bits) / BITS_PER_BYTE); \
361 } while (0)
363 static inline void *acpi_data_get_ptr(uint8_t *table_data, unsigned table_size,
364 unsigned off, unsigned size)
366 assert(off + size > off);
367 assert(off + size <= table_size);
368 return table_data + off;
371 static inline void acpi_add_table(GArray *table_offsets, GArray *table_data)
373 uint32_t offset = cpu_to_le32(table_data->len);
374 g_array_append_val(table_offsets, offset);
377 /* FACS */
378 static void
379 build_facs(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
381 AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs);
382 memcpy(&facs->signature, "FACS", 4);
383 facs->length = cpu_to_le32(sizeof(*facs));
386 /* Load chipset information in FADT */
387 static void fadt_setup(AcpiFadtDescriptorRev1 *fadt, AcpiPmInfo *pm)
389 fadt->model = 1;
390 fadt->reserved1 = 0;
391 fadt->sci_int = cpu_to_le16(pm->sci_int);
392 fadt->smi_cmd = cpu_to_le32(ACPI_PORT_SMI_CMD);
393 fadt->acpi_enable = pm->acpi_enable_cmd;
394 fadt->acpi_disable = pm->acpi_disable_cmd;
395 /* EVT, CNT, TMR offset matches hw/acpi/core.c */
396 fadt->pm1a_evt_blk = cpu_to_le32(pm->io_base);
397 fadt->pm1a_cnt_blk = cpu_to_le32(pm->io_base + 0x04);
398 fadt->pm_tmr_blk = cpu_to_le32(pm->io_base + 0x08);
399 fadt->gpe0_blk = cpu_to_le32(pm->gpe0_blk);
400 /* EVT, CNT, TMR length matches hw/acpi/core.c */
401 fadt->pm1_evt_len = 4;
402 fadt->pm1_cnt_len = 2;
403 fadt->pm_tmr_len = 4;
404 fadt->gpe0_blk_len = pm->gpe0_blk_len;
405 fadt->plvl2_lat = cpu_to_le16(0xfff); /* C2 state not supported */
406 fadt->plvl3_lat = cpu_to_le16(0xfff); /* C3 state not supported */
407 fadt->flags = cpu_to_le32((1 << ACPI_FADT_F_WBINVD) |
408 (1 << ACPI_FADT_F_PROC_C1) |
409 (1 << ACPI_FADT_F_SLP_BUTTON) |
410 (1 << ACPI_FADT_F_RTC_S4));
411 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK);
412 /* APIC destination mode ("Flat Logical") has an upper limit of 8 CPUs
413 * For more than 8 CPUs, "Clustered Logical" mode has to be used
415 if (max_cpus > 8) {
416 fadt->flags |= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL);
421 /* FADT */
422 static void
423 build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
424 unsigned facs, unsigned dsdt)
426 AcpiFadtDescriptorRev1 *fadt = acpi_data_push(table_data, sizeof(*fadt));
428 fadt->firmware_ctrl = cpu_to_le32(facs);
429 /* FACS address to be filled by Guest linker */
430 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
431 ACPI_BUILD_TABLE_FILE,
432 table_data, &fadt->firmware_ctrl,
433 sizeof fadt->firmware_ctrl);
435 fadt->dsdt = cpu_to_le32(dsdt);
436 /* DSDT address to be filled by Guest linker */
437 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
438 ACPI_BUILD_TABLE_FILE,
439 table_data, &fadt->dsdt,
440 sizeof fadt->dsdt);
442 fadt_setup(fadt, pm);
444 build_header(linker, table_data,
445 (void *)fadt, "FACP", sizeof(*fadt), 1);
448 static void
449 build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu,
450 PcGuestInfo *guest_info)
452 int madt_start = table_data->len;
454 AcpiMultipleApicTable *madt;
455 AcpiMadtIoApic *io_apic;
456 AcpiMadtIntsrcovr *intsrcovr;
457 AcpiMadtLocalNmi *local_nmi;
458 int i;
460 madt = acpi_data_push(table_data, sizeof *madt);
461 madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS);
462 madt->flags = cpu_to_le32(1);
464 for (i = 0; i < guest_info->apic_id_limit; i++) {
465 AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic);
466 apic->type = ACPI_APIC_PROCESSOR;
467 apic->length = sizeof(*apic);
468 apic->processor_id = i;
469 apic->local_apic_id = i;
470 if (test_bit(i, cpu->found_cpus)) {
471 apic->flags = cpu_to_le32(1);
472 } else {
473 apic->flags = cpu_to_le32(0);
476 io_apic = acpi_data_push(table_data, sizeof *io_apic);
477 io_apic->type = ACPI_APIC_IO;
478 io_apic->length = sizeof(*io_apic);
479 #define ACPI_BUILD_IOAPIC_ID 0x0
480 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID;
481 io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS);
482 io_apic->interrupt = cpu_to_le32(0);
484 if (guest_info->apic_xrupt_override) {
485 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
486 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
487 intsrcovr->length = sizeof(*intsrcovr);
488 intsrcovr->source = 0;
489 intsrcovr->gsi = cpu_to_le32(2);
490 intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */
492 for (i = 1; i < 16; i++) {
493 #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
494 if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) {
495 /* No need for a INT source override structure. */
496 continue;
498 intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
499 intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
500 intsrcovr->length = sizeof(*intsrcovr);
501 intsrcovr->source = i;
502 intsrcovr->gsi = cpu_to_le32(i);
503 intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */
506 local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
507 local_nmi->type = ACPI_APIC_LOCAL_NMI;
508 local_nmi->length = sizeof(*local_nmi);
509 local_nmi->processor_id = 0xff; /* all processors */
510 local_nmi->flags = cpu_to_le16(0);
511 local_nmi->lint = 1; /* ACPI_LINT1 */
513 build_header(linker, table_data,
514 (void *)(table_data->data + madt_start), "APIC",
515 table_data->len - madt_start, 1);
518 /* Encode a hex value */
519 static inline char acpi_get_hex(uint32_t val)
521 val &= 0x0f;
522 return (val <= 9) ? ('0' + val) : ('A' + val - 10);
525 #include "hw/i386/ssdt-proc.hex"
527 /* 0x5B 0x83 ProcessorOp PkgLength NameString ProcID */
528 #define ACPI_PROC_OFFSET_CPUHEX (*ssdt_proc_name - *ssdt_proc_start + 2)
529 #define ACPI_PROC_OFFSET_CPUID1 (*ssdt_proc_name - *ssdt_proc_start + 4)
530 #define ACPI_PROC_OFFSET_CPUID2 (*ssdt_proc_id - *ssdt_proc_start)
531 #define ACPI_PROC_SIZEOF (*ssdt_proc_end - *ssdt_proc_start)
532 #define ACPI_PROC_AML (ssdp_proc_aml + *ssdt_proc_start)
534 /* 0x5B 0x82 DeviceOp PkgLength NameString */
535 #define ACPI_PCIHP_OFFSET_HEX (*ssdt_pcihp_name - *ssdt_pcihp_start + 1)
536 #define ACPI_PCIHP_OFFSET_ID (*ssdt_pcihp_id - *ssdt_pcihp_start)
537 #define ACPI_PCIHP_OFFSET_ADR (*ssdt_pcihp_adr - *ssdt_pcihp_start)
538 #define ACPI_PCIHP_OFFSET_EJ0 (*ssdt_pcihp_ej0 - *ssdt_pcihp_start)
539 #define ACPI_PCIHP_SIZEOF (*ssdt_pcihp_end - *ssdt_pcihp_start)
540 #define ACPI_PCIHP_AML (ssdp_pcihp_aml + *ssdt_pcihp_start)
542 #define ACPI_PCINOHP_OFFSET_HEX (*ssdt_pcinohp_name - *ssdt_pcinohp_start + 1)
543 #define ACPI_PCINOHP_OFFSET_ADR (*ssdt_pcinohp_adr - *ssdt_pcinohp_start)
544 #define ACPI_PCINOHP_SIZEOF (*ssdt_pcinohp_end - *ssdt_pcinohp_start)
545 #define ACPI_PCINOHP_AML (ssdp_pcihp_aml + *ssdt_pcinohp_start)
547 #define ACPI_PCIVGA_OFFSET_HEX (*ssdt_pcivga_name - *ssdt_pcivga_start + 1)
548 #define ACPI_PCIVGA_OFFSET_ADR (*ssdt_pcivga_adr - *ssdt_pcivga_start)
549 #define ACPI_PCIVGA_SIZEOF (*ssdt_pcivga_end - *ssdt_pcivga_start)
550 #define ACPI_PCIVGA_AML (ssdp_pcihp_aml + *ssdt_pcivga_start)
552 #define ACPI_PCIQXL_OFFSET_HEX (*ssdt_pciqxl_name - *ssdt_pciqxl_start + 1)
553 #define ACPI_PCIQXL_OFFSET_ADR (*ssdt_pciqxl_adr - *ssdt_pciqxl_start)
554 #define ACPI_PCIQXL_SIZEOF (*ssdt_pciqxl_end - *ssdt_pciqxl_start)
555 #define ACPI_PCIQXL_AML (ssdp_pcihp_aml + *ssdt_pciqxl_start)
557 #include "hw/i386/ssdt-mem.hex"
559 /* 0x5B 0x82 DeviceOp PkgLength NameString DimmID */
560 #define ACPI_MEM_OFFSET_HEX (*ssdt_mem_name - *ssdt_mem_start + 2)
561 #define ACPI_MEM_OFFSET_ID (*ssdt_mem_id - *ssdt_mem_start + 7)
562 #define ACPI_MEM_SIZEOF (*ssdt_mem_end - *ssdt_mem_start)
563 #define ACPI_MEM_AML (ssdm_mem_aml + *ssdt_mem_start)
565 #define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */
566 #define ACPI_SSDT_HEADER_LENGTH 36
568 #include "hw/i386/ssdt-misc.hex"
569 #include "hw/i386/ssdt-pcihp.hex"
570 #include "hw/i386/ssdt-tpm.hex"
572 static void
573 build_append_notify_method(GArray *device, const char *name,
574 const char *format, int count)
576 int i;
577 GArray *method = build_alloc_method(name, 2);
579 for (i = 0; i < count; i++) {
580 GArray *target = build_alloc_array();
581 build_append_namestring(target, format, i);
582 assert(i < 256); /* Fits in 1 byte */
583 build_append_notify_target_ifequal(method, target, i);
584 build_free_array(target);
587 build_append_and_cleanup_method(device, method);
590 static void patch_pcihp(int slot, uint8_t *ssdt_ptr)
592 unsigned devfn = PCI_DEVFN(slot, 0);
594 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
595 ssdt_ptr[ACPI_PCIHP_OFFSET_HEX + 1] = acpi_get_hex(devfn);
596 ssdt_ptr[ACPI_PCIHP_OFFSET_ID] = slot;
597 ssdt_ptr[ACPI_PCIHP_OFFSET_ADR + 2] = slot;
600 static void patch_pcinohp(int slot, uint8_t *ssdt_ptr)
602 unsigned devfn = PCI_DEVFN(slot, 0);
604 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
605 ssdt_ptr[ACPI_PCINOHP_OFFSET_HEX + 1] = acpi_get_hex(devfn);
606 ssdt_ptr[ACPI_PCINOHP_OFFSET_ADR + 2] = slot;
609 static void patch_pcivga(int slot, uint8_t *ssdt_ptr)
611 unsigned devfn = PCI_DEVFN(slot, 0);
613 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
614 ssdt_ptr[ACPI_PCIVGA_OFFSET_HEX + 1] = acpi_get_hex(devfn);
615 ssdt_ptr[ACPI_PCIVGA_OFFSET_ADR + 2] = slot;
618 static void patch_pciqxl(int slot, uint8_t *ssdt_ptr)
620 unsigned devfn = PCI_DEVFN(slot, 0);
622 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX] = acpi_get_hex(devfn >> 4);
623 ssdt_ptr[ACPI_PCIQXL_OFFSET_HEX + 1] = acpi_get_hex(devfn);
624 ssdt_ptr[ACPI_PCIQXL_OFFSET_ADR + 2] = slot;
627 /* Assign BSEL property to all buses. In the future, this can be changed
628 * to only assign to buses that support hotplug.
630 static void *acpi_set_bsel(PCIBus *bus, void *opaque)
632 unsigned *bsel_alloc = opaque;
633 unsigned *bus_bsel;
635 if (qbus_is_hotpluggable(BUS(bus))) {
636 bus_bsel = g_malloc(sizeof *bus_bsel);
638 *bus_bsel = (*bsel_alloc)++;
639 object_property_add_uint32_ptr(OBJECT(bus), ACPI_PCIHP_PROP_BSEL,
640 bus_bsel, NULL);
643 return bsel_alloc;
646 static void acpi_set_pci_info(void)
648 PCIBus *bus = find_i440fx(); /* TODO: Q35 support */
649 unsigned bsel_alloc = 0;
651 if (bus) {
652 /* Scan all PCI buses. Set property to enable acpi based hotplug. */
653 pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &bsel_alloc);
657 static void build_pci_bus_state_init(AcpiBuildPciBusHotplugState *state,
658 AcpiBuildPciBusHotplugState *parent,
659 bool pcihp_bridge_en)
661 state->parent = parent;
662 state->device_table = build_alloc_array();
663 state->notify_table = build_alloc_array();
664 state->pcihp_bridge_en = pcihp_bridge_en;
667 static void build_pci_bus_state_cleanup(AcpiBuildPciBusHotplugState *state)
669 build_free_array(state->device_table);
670 build_free_array(state->notify_table);
673 static void *build_pci_bus_begin(PCIBus *bus, void *parent_state)
675 AcpiBuildPciBusHotplugState *parent = parent_state;
676 AcpiBuildPciBusHotplugState *child = g_malloc(sizeof *child);
678 build_pci_bus_state_init(child, parent, parent->pcihp_bridge_en);
680 return child;
683 static void build_pci_bus_end(PCIBus *bus, void *bus_state)
685 AcpiBuildPciBusHotplugState *child = bus_state;
686 AcpiBuildPciBusHotplugState *parent = child->parent;
687 GArray *bus_table = build_alloc_array();
688 DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX);
689 DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX);
690 DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX);
691 DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX);
692 DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX);
693 uint8_t op;
694 int i;
695 QObject *bsel;
696 GArray *method;
697 bool bus_hotplug_support = false;
700 * Skip bridge subtree creation if bridge hotplug is disabled
701 * to make acpi tables compatible with legacy machine types.
702 * Skip creation for hotplugged bridges as well.
704 if (bus->parent_dev && (!child->pcihp_bridge_en ||
705 DEVICE(bus->parent_dev)->hotplugged)) {
706 build_free_array(bus_table);
707 build_pci_bus_state_cleanup(child);
708 g_free(child);
709 return;
712 if (bus->parent_dev) {
713 op = 0x82; /* DeviceOp */
714 build_append_namestring(bus_table, "S%.02X",
715 bus->parent_dev->devfn);
716 build_append_byte(bus_table, 0x08); /* NameOp */
717 build_append_namestring(bus_table, "_SUN");
718 build_append_int(bus_table, PCI_SLOT(bus->parent_dev->devfn));
719 build_append_byte(bus_table, 0x08); /* NameOp */
720 build_append_namestring(bus_table, "_ADR");
721 build_append_int(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) |
722 PCI_FUNC(bus->parent_dev->devfn));
723 } else {
724 op = 0x10; /* ScopeOp */;
725 build_append_namestring(bus_table, "PCI0");
728 bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
729 if (bsel) {
730 build_append_byte(bus_table, 0x08); /* NameOp */
731 build_append_namestring(bus_table, "BSEL");
732 build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel)));
733 memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable);
734 } else {
735 /* No bsel - no slots are hot-pluggable */
736 memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable);
739 memset(slot_device_present, 0x00, sizeof slot_device_present);
740 memset(slot_device_system, 0x00, sizeof slot_device_present);
741 memset(slot_device_vga, 0x00, sizeof slot_device_vga);
742 memset(slot_device_qxl, 0x00, sizeof slot_device_qxl);
744 for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) {
745 DeviceClass *dc;
746 PCIDeviceClass *pc;
747 PCIDevice *pdev = bus->devices[i];
748 int slot = PCI_SLOT(i);
749 bool bridge_in_acpi;
751 if (!pdev) {
752 continue;
755 set_bit(slot, slot_device_present);
756 pc = PCI_DEVICE_GET_CLASS(pdev);
757 dc = DEVICE_GET_CLASS(pdev);
759 /* When hotplug for bridges is enabled, bridges are
760 * described in ACPI separately (see build_pci_bus_end).
761 * In this case they aren't themselves hot-pluggable.
762 * Hotplugged bridges *are* hot-pluggable.
764 bridge_in_acpi = pc->is_bridge && child->pcihp_bridge_en &&
765 !DEVICE(pdev)->hotplugged;
767 if (pc->class_id == PCI_CLASS_BRIDGE_ISA || bridge_in_acpi) {
768 set_bit(slot, slot_device_system);
771 if (pc->class_id == PCI_CLASS_DISPLAY_VGA) {
772 set_bit(slot, slot_device_vga);
774 if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) {
775 set_bit(slot, slot_device_qxl);
779 if (!dc->hotpluggable || bridge_in_acpi) {
780 clear_bit(slot, slot_hotplug_enable);
784 /* Append Device object for each slot */
785 for (i = 0; i < PCI_SLOT_MAX; i++) {
786 bool can_eject = test_bit(i, slot_hotplug_enable);
787 bool present = test_bit(i, slot_device_present);
788 bool vga = test_bit(i, slot_device_vga);
789 bool qxl = test_bit(i, slot_device_qxl);
790 bool system = test_bit(i, slot_device_system);
791 if (can_eject) {
792 void *pcihp = acpi_data_push(bus_table,
793 ACPI_PCIHP_SIZEOF);
794 memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF);
795 patch_pcihp(i, pcihp);
796 bus_hotplug_support = true;
797 } else if (qxl) {
798 void *pcihp = acpi_data_push(bus_table,
799 ACPI_PCIQXL_SIZEOF);
800 memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF);
801 patch_pciqxl(i, pcihp);
802 } else if (vga) {
803 void *pcihp = acpi_data_push(bus_table,
804 ACPI_PCIVGA_SIZEOF);
805 memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF);
806 patch_pcivga(i, pcihp);
807 } else if (system) {
808 /* Nothing to do: system devices are in DSDT or in SSDT above. */
809 } else if (present) {
810 void *pcihp = acpi_data_push(bus_table,
811 ACPI_PCINOHP_SIZEOF);
812 memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF);
813 patch_pcinohp(i, pcihp);
817 if (bsel) {
818 method = build_alloc_method("DVNT", 2);
820 for (i = 0; i < PCI_SLOT_MAX; i++) {
821 GArray *notify;
822 uint8_t op;
824 if (!test_bit(i, slot_hotplug_enable)) {
825 continue;
828 notify = build_alloc_array();
829 op = 0xA0; /* IfOp */
831 build_append_byte(notify, 0x7B); /* AndOp */
832 build_append_byte(notify, 0x68); /* Arg0Op */
833 build_append_int(notify, 0x1U << i);
834 build_append_byte(notify, 0x00); /* NullName */
835 build_append_byte(notify, 0x86); /* NotifyOp */
836 build_append_namestring(notify, "S%.02X", PCI_DEVFN(i, 0));
837 build_append_byte(notify, 0x69); /* Arg1Op */
839 /* Pack it up */
840 build_package(notify, op);
842 build_append_array(method, notify);
844 build_free_array(notify);
847 build_append_and_cleanup_method(bus_table, method);
850 /* Append PCNT method to notify about events on local and child buses.
851 * Add unconditionally for root since DSDT expects it.
853 if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) {
854 method = build_alloc_method("PCNT", 0);
856 /* If bus supports hotplug select it and notify about local events */
857 if (bsel) {
858 build_append_byte(method, 0x70); /* StoreOp */
859 build_append_int(method, qint_get_int(qobject_to_qint(bsel)));
860 build_append_namestring(method, "BNUM");
861 build_append_namestring(method, "DVNT");
862 build_append_namestring(method, "PCIU");
863 build_append_int(method, 1); /* Device Check */
864 build_append_namestring(method, "DVNT");
865 build_append_namestring(method, "PCID");
866 build_append_int(method, 3); /* Eject Request */
869 /* Notify about child bus events in any case */
870 build_append_array(method, child->notify_table);
872 build_append_and_cleanup_method(bus_table, method);
874 /* Append description of child buses */
875 build_append_array(bus_table, child->device_table);
877 /* Pack it up */
878 if (bus->parent_dev) {
879 build_extop_package(bus_table, op);
880 } else {
881 build_package(bus_table, op);
884 /* Append our bus description to parent table */
885 build_append_array(parent->device_table, bus_table);
887 /* Also tell parent how to notify us, invoking PCNT method.
888 * At the moment this is not needed for root as we have a single root.
890 if (bus->parent_dev) {
891 build_append_namestring(parent->notify_table, "^PCNT.S%.02X",
892 bus->parent_dev->devfn);
896 qobject_decref(bsel);
897 build_free_array(bus_table);
898 build_pci_bus_state_cleanup(child);
899 g_free(child);
902 static void patch_pci_windows(PcPciInfo *pci, uint8_t *start, unsigned size)
904 ACPI_BUILD_SET_LE(start, size, acpi_pci32_start[0], 32, pci->w32.begin);
906 ACPI_BUILD_SET_LE(start, size, acpi_pci32_end[0], 32, pci->w32.end - 1);
908 if (pci->w64.end || pci->w64.begin) {
909 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 1);
910 ACPI_BUILD_SET_LE(start, size, acpi_pci64_start[0], 64, pci->w64.begin);
911 ACPI_BUILD_SET_LE(start, size, acpi_pci64_end[0], 64, pci->w64.end - 1);
912 ACPI_BUILD_SET_LE(start, size, acpi_pci64_length[0], 64, pci->w64.end - pci->w64.begin);
913 } else {
914 ACPI_BUILD_SET_LE(start, size, acpi_pci64_valid[0], 8, 0);
918 static void
919 build_ssdt(GArray *table_data, GArray *linker,
920 AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
921 PcPciInfo *pci, PcGuestInfo *guest_info)
923 MachineState *machine = MACHINE(qdev_get_machine());
924 uint32_t nr_mem = machine->ram_slots;
925 unsigned acpi_cpus = guest_info->apic_id_limit;
926 uint8_t *ssdt_ptr;
927 Aml *ssdt, *sb_scope, *scope, *pkg, *dev, *method, *crs, *field;
928 int i;
930 ssdt = init_aml_allocator();
931 /* The current AML generator can cover the APIC ID range [0..255],
932 * inclusive, for VCPU hotplug. */
933 QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
934 g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT);
936 /* Copy header and patch values in the S3_ / S4_ / S5_ packages */
937 ssdt_ptr = acpi_data_push(ssdt->buf, sizeof(ssdp_misc_aml));
938 memcpy(ssdt_ptr, ssdp_misc_aml, sizeof(ssdp_misc_aml));
940 patch_pci_windows(pci, ssdt_ptr, sizeof(ssdp_misc_aml));
942 ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml),
943 ssdt_mctrl_nr_slots[0], 32, nr_mem);
945 /* create S3_ / S4_ / S5_ packages if necessary */
946 scope = aml_scope("\\");
947 if (!pm->s3_disabled) {
948 pkg = aml_package(4);
949 aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */
950 aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
951 aml_append(pkg, aml_int(0)); /* reserved */
952 aml_append(pkg, aml_int(0)); /* reserved */
953 aml_append(scope, aml_name_decl("_S3", pkg));
956 if (!pm->s4_disabled) {
957 pkg = aml_package(4);
958 aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */
959 /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
960 aml_append(pkg, aml_int(pm->s4_val));
961 aml_append(pkg, aml_int(0)); /* reserved */
962 aml_append(pkg, aml_int(0)); /* reserved */
963 aml_append(scope, aml_name_decl("_S4", pkg));
966 pkg = aml_package(4);
967 aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */
968 aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */
969 aml_append(pkg, aml_int(0)); /* reserved */
970 aml_append(pkg, aml_int(0)); /* reserved */
971 aml_append(scope, aml_name_decl("_S5", pkg));
972 aml_append(ssdt, scope);
974 if (misc->pvpanic_port) {
975 scope = aml_scope("\\_SB.PCI0.ISA");
977 dev = aml_device("PEVR");
978 aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
980 crs = aml_resource_template();
981 aml_append(crs,
982 aml_io(aml_decode16, misc->pvpanic_port, misc->pvpanic_port, 1, 1)
984 aml_append(dev, aml_name_decl("_CRS", crs));
986 aml_append(dev, aml_operation_region("PEOR", aml_system_io,
987 misc->pvpanic_port, 1));
988 field = aml_field("PEOR", aml_byte_acc);
989 aml_append(field, aml_named_field("PEPT", 8));
990 aml_append(dev, field);
992 method = aml_method("RDPT", 0);
993 aml_append(method, aml_store(aml_name("PEPT"), aml_local(0)));
994 aml_append(method, aml_return(aml_local(0)));
995 aml_append(dev, method);
997 method = aml_method("WRPT", 1);
998 aml_append(method, aml_store(aml_arg(0), aml_name("PEPT")));
999 aml_append(dev, method);
1001 aml_append(scope, dev);
1002 aml_append(ssdt, scope);
1005 sb_scope = aml_scope("_SB");
1007 /* build Processor object for each processor */
1008 for (i = 0; i < acpi_cpus; i++) {
1009 uint8_t *proc = acpi_data_push(sb_scope->buf, ACPI_PROC_SIZEOF);
1010 memcpy(proc, ACPI_PROC_AML, ACPI_PROC_SIZEOF);
1011 proc[ACPI_PROC_OFFSET_CPUHEX] = acpi_get_hex(i >> 4);
1012 proc[ACPI_PROC_OFFSET_CPUHEX+1] = acpi_get_hex(i);
1013 proc[ACPI_PROC_OFFSET_CPUID1] = i;
1014 proc[ACPI_PROC_OFFSET_CPUID2] = i;
1017 /* build this code:
1018 * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...}
1020 /* Arg0 = Processor ID = APIC ID */
1021 build_append_notify_method(sb_scope->buf, "NTFY",
1022 "CP%0.02X", acpi_cpus);
1024 /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" */
1025 build_append_byte(sb_scope->buf, 0x08); /* NameOp */
1026 build_append_namestring(sb_scope->buf, "CPON");
1029 GArray *package = build_alloc_array();
1030 uint8_t op;
1033 * Note: The ability to create variable-sized packages was first introduced in ACPI 2.0. ACPI 1.0 only
1034 * allowed fixed-size packages with up to 255 elements.
1035 * Windows guests up to win2k8 fail when VarPackageOp is used.
1037 if (acpi_cpus <= 255) {
1038 op = 0x12; /* PackageOp */
1039 build_append_byte(package, acpi_cpus); /* NumElements */
1040 } else {
1041 op = 0x13; /* VarPackageOp */
1042 build_append_int(package, acpi_cpus); /* VarNumElements */
1045 for (i = 0; i < acpi_cpus; i++) {
1046 uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00;
1047 build_append_byte(package, b);
1050 build_package(package, op);
1051 build_append_array(sb_scope->buf, package);
1052 build_free_array(package);
1055 if (nr_mem) {
1056 assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
1057 /* build memory devices */
1058 for (i = 0; i < nr_mem; i++) {
1059 char id[3];
1060 uint8_t *mem = acpi_data_push(sb_scope->buf, ACPI_MEM_SIZEOF);
1062 snprintf(id, sizeof(id), "%02X", i);
1063 memcpy(mem, ACPI_MEM_AML, ACPI_MEM_SIZEOF);
1064 memcpy(mem + ACPI_MEM_OFFSET_HEX, id, 2);
1065 memcpy(mem + ACPI_MEM_OFFSET_ID, id, 2);
1068 /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
1069 * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ...
1071 build_append_notify_method(sb_scope->buf,
1072 stringify(MEMORY_SLOT_NOTIFY_METHOD),
1073 "MP%0.02X", nr_mem);
1077 AcpiBuildPciBusHotplugState hotplug_state;
1078 Object *pci_host;
1079 PCIBus *bus = NULL;
1080 bool ambiguous;
1082 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
1083 if (!ambiguous && pci_host) {
1084 bus = PCI_HOST_BRIDGE(pci_host)->bus;
1087 build_pci_bus_state_init(&hotplug_state, NULL, pm->pcihp_bridge_en);
1089 if (bus) {
1090 /* Scan all PCI buses. Generate tables to support hotplug. */
1091 pci_for_each_bus_depth_first(bus, build_pci_bus_begin,
1092 build_pci_bus_end, &hotplug_state);
1095 build_append_array(sb_scope->buf, hotplug_state.device_table);
1096 build_pci_bus_state_cleanup(&hotplug_state);
1098 aml_append(ssdt, sb_scope);
1101 /* copy AML table into ACPI tables blob and patch header there */
1102 g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
1103 build_header(linker, table_data,
1104 (void *)(table_data->data + table_data->len - ssdt->buf->len),
1105 "SSDT", ssdt->buf->len, 1);
1106 free_aml_allocator();
1109 static void
1110 build_hpet(GArray *table_data, GArray *linker)
1112 Acpi20Hpet *hpet;
1114 hpet = acpi_data_push(table_data, sizeof(*hpet));
1115 /* Note timer_block_id value must be kept in sync with value advertised by
1116 * emulated hpet
1118 hpet->timer_block_id = cpu_to_le32(0x8086a201);
1119 hpet->addr.address = cpu_to_le64(HPET_BASE);
1120 build_header(linker, table_data,
1121 (void *)hpet, "HPET", sizeof(*hpet), 1);
1124 static void
1125 build_tpm_tcpa(GArray *table_data, GArray *linker, GArray *tcpalog)
1127 Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa);
1128 uint64_t log_area_start_address = acpi_data_len(tcpalog);
1130 tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT);
1131 tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE);
1132 tcpa->log_area_start_address = cpu_to_le64(log_area_start_address);
1134 bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, 1,
1135 false /* high memory */);
1137 /* log area start address to be filled by Guest linker */
1138 bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
1139 ACPI_BUILD_TPMLOG_FILE,
1140 table_data, &tcpa->log_area_start_address,
1141 sizeof(tcpa->log_area_start_address));
1143 build_header(linker, table_data,
1144 (void *)tcpa, "TCPA", sizeof(*tcpa), 2);
1146 acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE);
1149 static void
1150 build_tpm_ssdt(GArray *table_data, GArray *linker)
1152 void *tpm_ptr;
1154 tpm_ptr = acpi_data_push(table_data, sizeof(ssdt_tpm_aml));
1155 memcpy(tpm_ptr, ssdt_tpm_aml, sizeof(ssdt_tpm_aml));
1158 typedef enum {
1159 MEM_AFFINITY_NOFLAGS = 0,
1160 MEM_AFFINITY_ENABLED = (1 << 0),
1161 MEM_AFFINITY_HOTPLUGGABLE = (1 << 1),
1162 MEM_AFFINITY_NON_VOLATILE = (1 << 2),
1163 } MemoryAffinityFlags;
1165 static void
1166 acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
1167 uint64_t len, int node, MemoryAffinityFlags flags)
1169 numamem->type = ACPI_SRAT_MEMORY;
1170 numamem->length = sizeof(*numamem);
1171 memset(numamem->proximity, 0, 4);
1172 numamem->proximity[0] = node;
1173 numamem->flags = cpu_to_le32(flags);
1174 numamem->base_addr = cpu_to_le64(base);
1175 numamem->range_length = cpu_to_le64(len);
1178 static void
1179 build_srat(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
1181 AcpiSystemResourceAffinityTable *srat;
1182 AcpiSratProcessorAffinity *core;
1183 AcpiSratMemoryAffinity *numamem;
1185 int i;
1186 uint64_t curnode;
1187 int srat_start, numa_start, slots;
1188 uint64_t mem_len, mem_base, next_base;
1189 PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
1190 ram_addr_t hotplugabble_address_space_size =
1191 object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE,
1192 NULL);
1194 srat_start = table_data->len;
1196 srat = acpi_data_push(table_data, sizeof *srat);
1197 srat->reserved1 = cpu_to_le32(1);
1198 core = (void *)(srat + 1);
1200 for (i = 0; i < guest_info->apic_id_limit; ++i) {
1201 core = acpi_data_push(table_data, sizeof *core);
1202 core->type = ACPI_SRAT_PROCESSOR;
1203 core->length = sizeof(*core);
1204 core->local_apic_id = i;
1205 curnode = guest_info->node_cpu[i];
1206 core->proximity_lo = curnode;
1207 memset(core->proximity_hi, 0, 3);
1208 core->local_sapic_eid = 0;
1209 core->flags = cpu_to_le32(1);
1213 /* the memory map is a bit tricky, it contains at least one hole
1214 * from 640k-1M and possibly another one from 3.5G-4G.
1216 next_base = 0;
1217 numa_start = table_data->len;
1219 numamem = acpi_data_push(table_data, sizeof *numamem);
1220 acpi_build_srat_memory(numamem, 0, 640*1024, 0, MEM_AFFINITY_ENABLED);
1221 next_base = 1024 * 1024;
1222 for (i = 1; i < guest_info->numa_nodes + 1; ++i) {
1223 mem_base = next_base;
1224 mem_len = guest_info->node_mem[i - 1];
1225 if (i == 1) {
1226 mem_len -= 1024 * 1024;
1228 next_base = mem_base + mem_len;
1230 /* Cut out the ACPI_PCI hole */
1231 if (mem_base <= guest_info->ram_size_below_4g &&
1232 next_base > guest_info->ram_size_below_4g) {
1233 mem_len -= next_base - guest_info->ram_size_below_4g;
1234 if (mem_len > 0) {
1235 numamem = acpi_data_push(table_data, sizeof *numamem);
1236 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
1237 MEM_AFFINITY_ENABLED);
1239 mem_base = 1ULL << 32;
1240 mem_len = next_base - guest_info->ram_size_below_4g;
1241 next_base += (1ULL << 32) - guest_info->ram_size_below_4g;
1243 numamem = acpi_data_push(table_data, sizeof *numamem);
1244 acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
1245 MEM_AFFINITY_ENABLED);
1247 slots = (table_data->len - numa_start) / sizeof *numamem;
1248 for (; slots < guest_info->numa_nodes + 2; slots++) {
1249 numamem = acpi_data_push(table_data, sizeof *numamem);
1250 acpi_build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
1254 * Entry is required for Windows to enable memory hotplug in OS.
1255 * Memory devices may override proximity set by this entry,
1256 * providing _PXM method if necessary.
1258 if (hotplugabble_address_space_size) {
1259 numamem = acpi_data_push(table_data, sizeof *numamem);
1260 acpi_build_srat_memory(numamem, pcms->hotplug_memory_base,
1261 hotplugabble_address_space_size, 0,
1262 MEM_AFFINITY_HOTPLUGGABLE |
1263 MEM_AFFINITY_ENABLED);
1266 build_header(linker, table_data,
1267 (void *)(table_data->data + srat_start),
1268 "SRAT",
1269 table_data->len - srat_start, 1);
1272 static void
1273 build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info)
1275 AcpiTableMcfg *mcfg;
1276 const char *sig;
1277 int len = sizeof(*mcfg) + 1 * sizeof(mcfg->allocation[0]);
1279 mcfg = acpi_data_push(table_data, len);
1280 mcfg->allocation[0].address = cpu_to_le64(info->mcfg_base);
1281 /* Only a single allocation so no need to play with segments */
1282 mcfg->allocation[0].pci_segment = cpu_to_le16(0);
1283 mcfg->allocation[0].start_bus_number = 0;
1284 mcfg->allocation[0].end_bus_number = PCIE_MMCFG_BUS(info->mcfg_size - 1);
1286 /* MCFG is used for ECAM which can be enabled or disabled by guest.
1287 * To avoid table size changes (which create migration issues),
1288 * always create the table even if there are no allocations,
1289 * but set the signature to a reserved value in this case.
1290 * ACPI spec requires OSPMs to ignore such tables.
1292 if (info->mcfg_base == PCIE_BASE_ADDR_UNMAPPED) {
1293 /* Reserved signature: ignored by OSPM */
1294 sig = "QEMU";
1295 } else {
1296 sig = "MCFG";
1298 build_header(linker, table_data, (void *)mcfg, sig, len, 1);
1301 static void
1302 build_dmar_q35(GArray *table_data, GArray *linker)
1304 int dmar_start = table_data->len;
1306 AcpiTableDmar *dmar;
1307 AcpiDmarHardwareUnit *drhd;
1309 dmar = acpi_data_push(table_data, sizeof(*dmar));
1310 dmar->host_address_width = VTD_HOST_ADDRESS_WIDTH - 1;
1311 dmar->flags = 0; /* No intr_remap for now */
1313 /* DMAR Remapping Hardware Unit Definition structure */
1314 drhd = acpi_data_push(table_data, sizeof(*drhd));
1315 drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT);
1316 drhd->length = cpu_to_le16(sizeof(*drhd)); /* No device scope now */
1317 drhd->flags = ACPI_DMAR_INCLUDE_PCI_ALL;
1318 drhd->pci_segment = cpu_to_le16(0);
1319 drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR);
1321 build_header(linker, table_data, (void *)(table_data->data + dmar_start),
1322 "DMAR", table_data->len - dmar_start, 1);
1325 static void
1326 build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc)
1328 AcpiTableHeader *dsdt;
1330 assert(misc->dsdt_code && misc->dsdt_size);
1332 dsdt = acpi_data_push(table_data, misc->dsdt_size);
1333 memcpy(dsdt, misc->dsdt_code, misc->dsdt_size);
1335 memset(dsdt, 0, sizeof *dsdt);
1336 build_header(linker, table_data, dsdt, "DSDT",
1337 misc->dsdt_size, 1);
1340 /* Build final rsdt table */
1341 static void
1342 build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
1344 AcpiRsdtDescriptorRev1 *rsdt;
1345 size_t rsdt_len;
1346 int i;
1348 rsdt_len = sizeof(*rsdt) + sizeof(uint32_t) * table_offsets->len;
1349 rsdt = acpi_data_push(table_data, rsdt_len);
1350 memcpy(rsdt->table_offset_entry, table_offsets->data,
1351 sizeof(uint32_t) * table_offsets->len);
1352 for (i = 0; i < table_offsets->len; ++i) {
1353 /* rsdt->table_offset_entry to be filled by Guest linker */
1354 bios_linker_loader_add_pointer(linker,
1355 ACPI_BUILD_TABLE_FILE,
1356 ACPI_BUILD_TABLE_FILE,
1357 table_data, &rsdt->table_offset_entry[i],
1358 sizeof(uint32_t));
1360 build_header(linker, table_data,
1361 (void *)rsdt, "RSDT", rsdt_len, 1);
1364 static GArray *
1365 build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
1367 AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
1369 bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 16,
1370 true /* fseg memory */);
1372 memcpy(&rsdp->signature, "RSD PTR ", 8);
1373 memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, 6);
1374 rsdp->rsdt_physical_address = cpu_to_le32(rsdt);
1375 /* Address to be filled by Guest linker */
1376 bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE,
1377 ACPI_BUILD_TABLE_FILE,
1378 rsdp_table, &rsdp->rsdt_physical_address,
1379 sizeof rsdp->rsdt_physical_address);
1380 rsdp->checksum = 0;
1381 /* Checksum to be filled by Guest linker */
1382 bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
1383 rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
1385 return rsdp_table;
1388 typedef
1389 struct AcpiBuildTables {
1390 GArray *table_data;
1391 GArray *rsdp;
1392 GArray *tcpalog;
1393 GArray *linker;
1394 } AcpiBuildTables;
1396 static inline void acpi_build_tables_init(AcpiBuildTables *tables)
1398 tables->rsdp = g_array_new(false, true /* clear */, 1);
1399 tables->table_data = g_array_new(false, true /* clear */, 1);
1400 tables->tcpalog = g_array_new(false, true /* clear */, 1);
1401 tables->linker = bios_linker_loader_init();
1404 static inline void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre)
1406 void *linker_data = bios_linker_loader_cleanup(tables->linker);
1407 g_free(linker_data);
1408 g_array_free(tables->rsdp, true);
1409 g_array_free(tables->table_data, true);
1410 g_array_free(tables->tcpalog, mfre);
1413 typedef
1414 struct AcpiBuildState {
1415 /* Copy of table in RAM (for patching). */
1416 ram_addr_t table_ram;
1417 /* Is table patched? */
1418 uint8_t patched;
1419 PcGuestInfo *guest_info;
1420 void *rsdp;
1421 ram_addr_t rsdp_ram;
1422 ram_addr_t linker_ram;
1423 } AcpiBuildState;
1425 static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg)
1427 Object *pci_host;
1428 QObject *o;
1429 bool ambiguous;
1431 pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous);
1432 g_assert(!ambiguous);
1433 g_assert(pci_host);
1435 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL);
1436 if (!o) {
1437 return false;
1439 mcfg->mcfg_base = qint_get_int(qobject_to_qint(o));
1440 qobject_decref(o);
1442 o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL);
1443 assert(o);
1444 mcfg->mcfg_size = qint_get_int(qobject_to_qint(o));
1445 qobject_decref(o);
1446 return true;
1449 static bool acpi_has_iommu(void)
1451 bool ambiguous;
1452 Object *intel_iommu;
1454 intel_iommu = object_resolve_path_type("", TYPE_INTEL_IOMMU_DEVICE,
1455 &ambiguous);
1456 return intel_iommu && !ambiguous;
1459 static
1460 void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
1462 GArray *table_offsets;
1463 unsigned facs, ssdt, dsdt, rsdt;
1464 AcpiCpuInfo cpu;
1465 AcpiPmInfo pm;
1466 AcpiMiscInfo misc;
1467 AcpiMcfgInfo mcfg;
1468 PcPciInfo pci;
1469 uint8_t *u;
1470 size_t aml_len = 0;
1471 GArray *tables_blob = tables->table_data;
1473 acpi_get_cpu_info(&cpu);
1474 acpi_get_pm_info(&pm);
1475 acpi_get_dsdt(&misc);
1476 acpi_get_misc_info(&misc);
1477 acpi_get_pci_info(&pci);
1479 table_offsets = g_array_new(false, true /* clear */,
1480 sizeof(uint32_t));
1481 ACPI_BUILD_DPRINTF("init ACPI tables\n");
1483 bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE,
1484 64 /* Ensure FACS is aligned */,
1485 false /* high memory */);
1488 * FACS is pointed to by FADT.
1489 * We place it first since it's the only table that has alignment
1490 * requirements.
1492 facs = tables_blob->len;
1493 build_facs(tables_blob, tables->linker, guest_info);
1495 /* DSDT is pointed to by FADT */
1496 dsdt = tables_blob->len;
1497 build_dsdt(tables_blob, tables->linker, &misc);
1499 /* Count the size of the DSDT and SSDT, we will need it for legacy
1500 * sizing of ACPI tables.
1502 aml_len += tables_blob->len - dsdt;
1504 /* ACPI tables pointed to by RSDT */
1505 acpi_add_table(table_offsets, tables_blob);
1506 build_fadt(tables_blob, tables->linker, &pm, facs, dsdt);
1508 ssdt = tables_blob->len;
1509 acpi_add_table(table_offsets, tables_blob);
1510 build_ssdt(tables_blob, tables->linker, &cpu, &pm, &misc, &pci,
1511 guest_info);
1512 aml_len += tables_blob->len - ssdt;
1514 acpi_add_table(table_offsets, tables_blob);
1515 build_madt(tables_blob, tables->linker, &cpu, guest_info);
1517 if (misc.has_hpet) {
1518 acpi_add_table(table_offsets, tables_blob);
1519 build_hpet(tables_blob, tables->linker);
1521 if (misc.has_tpm) {
1522 acpi_add_table(table_offsets, tables_blob);
1523 build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog);
1525 acpi_add_table(table_offsets, tables_blob);
1526 build_tpm_ssdt(tables_blob, tables->linker);
1528 if (guest_info->numa_nodes) {
1529 acpi_add_table(table_offsets, tables_blob);
1530 build_srat(tables_blob, tables->linker, guest_info);
1532 if (acpi_get_mcfg(&mcfg)) {
1533 acpi_add_table(table_offsets, tables_blob);
1534 build_mcfg_q35(tables_blob, tables->linker, &mcfg);
1536 if (acpi_has_iommu()) {
1537 acpi_add_table(table_offsets, tables_blob);
1538 build_dmar_q35(tables_blob, tables->linker);
1541 /* Add tables supplied by user (if any) */
1542 for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
1543 unsigned len = acpi_table_len(u);
1545 acpi_add_table(table_offsets, tables_blob);
1546 g_array_append_vals(tables_blob, u, len);
1549 /* RSDT is pointed to by RSDP */
1550 rsdt = tables_blob->len;
1551 build_rsdt(tables_blob, tables->linker, table_offsets);
1553 /* RSDP is in FSEG memory, so allocate it separately */
1554 build_rsdp(tables->rsdp, tables->linker, rsdt);
1556 /* We'll expose it all to Guest so we want to reduce
1557 * chance of size changes.
1559 * We used to align the tables to 4k, but of course this would
1560 * too simple to be enough. 4k turned out to be too small an
1561 * alignment very soon, and in fact it is almost impossible to
1562 * keep the table size stable for all (max_cpus, max_memory_slots)
1563 * combinations. So the table size is always 64k for pc-i440fx-2.1
1564 * and we give an error if the table grows beyond that limit.
1566 * We still have the problem of migrating from "-M pc-i440fx-2.0". For
1567 * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables
1568 * than 2.0 and we can always pad the smaller tables with zeros. We can
1569 * then use the exact size of the 2.0 tables.
1571 * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration.
1573 if (guest_info->legacy_acpi_table_size) {
1574 /* Subtracting aml_len gives the size of fixed tables. Then add the
1575 * size of the PIIX4 DSDT/SSDT in QEMU 2.0.
1577 int legacy_aml_len =
1578 guest_info->legacy_acpi_table_size +
1579 ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus;
1580 int legacy_table_size =
1581 ROUND_UP(tables_blob->len - aml_len + legacy_aml_len,
1582 ACPI_BUILD_ALIGN_SIZE);
1583 if (tables_blob->len > legacy_table_size) {
1584 /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */
1585 error_report("Warning: migration may not work.");
1587 g_array_set_size(tables_blob, legacy_table_size);
1588 } else {
1589 /* Make sure we have a buffer in case we need to resize the tables. */
1590 if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
1591 /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */
1592 error_report("Warning: ACPI tables are larger than 64k.");
1593 error_report("Warning: migration may not work.");
1594 error_report("Warning: please remove CPUs, NUMA nodes, "
1595 "memory slots or PCI bridges.");
1597 acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE);
1600 acpi_align_size(tables->linker, ACPI_BUILD_ALIGN_SIZE);
1602 /* Cleanup memory that's no longer used. */
1603 g_array_free(table_offsets, true);
1606 static void acpi_ram_update(ram_addr_t ram, GArray *data)
1608 uint32_t size = acpi_data_len(data);
1610 /* Make sure RAM size is correct - in case it got changed e.g. by migration */
1611 qemu_ram_resize(ram, size, &error_abort);
1613 memcpy(qemu_get_ram_ptr(ram), data->data, size);
1614 cpu_physical_memory_set_dirty_range_nocode(ram, size);
1617 static void acpi_build_update(void *build_opaque, uint32_t offset)
1619 AcpiBuildState *build_state = build_opaque;
1620 AcpiBuildTables tables;
1622 /* No state to update or already patched? Nothing to do. */
1623 if (!build_state || build_state->patched) {
1624 return;
1626 build_state->patched = 1;
1628 acpi_build_tables_init(&tables);
1630 acpi_build(build_state->guest_info, &tables);
1632 acpi_ram_update(build_state->table_ram, tables.table_data);
1634 if (build_state->rsdp) {
1635 memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp));
1636 } else {
1637 acpi_ram_update(build_state->rsdp_ram, tables.rsdp);
1640 acpi_ram_update(build_state->linker_ram, tables.linker);
1641 acpi_build_tables_cleanup(&tables, true);
1644 static void acpi_build_reset(void *build_opaque)
1646 AcpiBuildState *build_state = build_opaque;
1647 build_state->patched = 0;
1650 static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob,
1651 const char *name, uint64_t max_size)
1653 return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
1654 name, acpi_build_update, build_state);
1657 static const VMStateDescription vmstate_acpi_build = {
1658 .name = "acpi_build",
1659 .version_id = 1,
1660 .minimum_version_id = 1,
1661 .fields = (VMStateField[]) {
1662 VMSTATE_UINT8(patched, AcpiBuildState),
1663 VMSTATE_END_OF_LIST()
1667 void acpi_setup(PcGuestInfo *guest_info)
1669 AcpiBuildTables tables;
1670 AcpiBuildState *build_state;
1672 if (!guest_info->fw_cfg) {
1673 ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
1674 return;
1677 if (!guest_info->has_acpi_build) {
1678 ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
1679 return;
1682 if (!acpi_enabled) {
1683 ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
1684 return;
1687 build_state = g_malloc0(sizeof *build_state);
1689 build_state->guest_info = guest_info;
1691 acpi_set_pci_info();
1693 acpi_build_tables_init(&tables);
1694 acpi_build(build_state->guest_info, &tables);
1696 /* Now expose it all to Guest */
1697 build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data,
1698 ACPI_BUILD_TABLE_FILE,
1699 ACPI_BUILD_TABLE_MAX_SIZE);
1700 assert(build_state->table_ram != RAM_ADDR_MAX);
1702 build_state->linker_ram =
1703 acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0);
1705 fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
1706 tables.tcpalog->data, acpi_data_len(tables.tcpalog));
1708 if (!guest_info->rsdp_in_ram) {
1710 * Keep for compatibility with old machine types.
1711 * Though RSDP is small, its contents isn't immutable, so
1712 * we'll update it along with the rest of tables on guest access.
1714 uint32_t rsdp_size = acpi_data_len(tables.rsdp);
1716 build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size);
1717 fw_cfg_add_file_callback(guest_info->fw_cfg, ACPI_BUILD_RSDP_FILE,
1718 acpi_build_update, build_state,
1719 build_state->rsdp, rsdp_size);
1720 build_state->rsdp_ram = (ram_addr_t)-1;
1721 } else {
1722 build_state->rsdp = NULL;
1723 build_state->rsdp_ram = acpi_add_rom_blob(build_state, tables.rsdp,
1724 ACPI_BUILD_RSDP_FILE, 0);
1727 qemu_register_reset(acpi_build_reset, build_state);
1728 acpi_build_reset(build_state);
1729 vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
1731 /* Cleanup tables but don't free the memory: we track it
1732 * in build_state.
1734 acpi_build_tables_cleanup(&tables, false);