1 /* Support for generating ACPI tables and passing them to Guests
3 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
4 * Copyright (C) 2006 Fabrice Bellard
5 * Copyright (C) 2013 Red Hat Inc
7 * Author: Michael S. Tsirkin <mst@redhat.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "acpi-build.h"
26 #include "qemu-common.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/osdep.h"
29 #include "qemu/range.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/pci.h"
33 #include "hw/i386/pc.h"
34 #include "target-i386/cpu.h"
35 #include "hw/timer/hpet.h"
36 #include "hw/i386/acpi-defs.h"
37 #include "hw/acpi/acpi.h"
38 #include "hw/nvram/fw_cfg.h"
39 #include "hw/acpi/bios-linker-loader.h"
40 #include "hw/loader.h"
41 #include "hw/isa/isa.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "sysemu/tpm.h"
44 #include "hw/acpi/tpm.h"
46 /* Supported chipsets: */
47 #include "hw/acpi/piix4.h"
48 #include "hw/acpi/pcihp.h"
49 #include "hw/i386/ich9.h"
50 #include "hw/pci/pci_bus.h"
51 #include "hw/pci-host/q35.h"
52 #include "hw/i386/intel_iommu.h"
54 #include "hw/i386/q35-acpi-dsdt.hex"
55 #include "hw/i386/acpi-dsdt.hex"
57 #include "hw/acpi/aml-build.h"
59 #include "qapi/qmp/qint.h"
60 #include "qom/qom-qobject.h"
61 #include "exec/ram_addr.h"
63 /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
64 * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
65 * a little bit, there should be plenty of free space since the DSDT
66 * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
68 #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97
69 #define ACPI_BUILD_ALIGN_SIZE 0x1000
71 #define ACPI_BUILD_TABLE_SIZE 0x20000
73 /* Reserve RAM space for tables: add another order of magnitude. */
74 #define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
76 /* #define DEBUG_ACPI_BUILD */
77 #ifdef DEBUG_ACPI_BUILD
78 #define ACPI_BUILD_DPRINTF(fmt, ...) \
79 do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
81 #define ACPI_BUILD_DPRINTF(fmt, ...)
84 typedef struct AcpiCpuInfo
{
85 DECLARE_BITMAP(found_cpus
, ACPI_CPU_HOTPLUG_ID_LIMIT
);
88 typedef struct AcpiMcfgInfo
{
93 typedef struct AcpiPmInfo
{
99 uint8_t acpi_enable_cmd
;
100 uint8_t acpi_disable_cmd
;
102 uint32_t gpe0_blk_len
;
106 typedef struct AcpiMiscInfo
{
109 DECLARE_BITMAP(slot_hotplug_enable
, PCI_SLOT_MAX
);
110 const unsigned char *dsdt_code
;
112 uint16_t pvpanic_port
;
115 typedef struct AcpiBuildPciBusHotplugState
{
116 GArray
*device_table
;
117 GArray
*notify_table
;
118 struct AcpiBuildPciBusHotplugState
*parent
;
119 bool pcihp_bridge_en
;
120 } AcpiBuildPciBusHotplugState
;
122 static void acpi_get_dsdt(AcpiMiscInfo
*info
)
124 uint16_t *applesmc_sta
;
125 Object
*piix
= piix4_pm_find();
126 Object
*lpc
= ich9_lpc_find();
127 assert(!!piix
!= !!lpc
);
130 info
->dsdt_code
= AcpiDsdtAmlCode
;
131 info
->dsdt_size
= sizeof AcpiDsdtAmlCode
;
132 applesmc_sta
= piix_dsdt_applesmc_sta
;
135 info
->dsdt_code
= Q35AcpiDsdtAmlCode
;
136 info
->dsdt_size
= sizeof Q35AcpiDsdtAmlCode
;
137 applesmc_sta
= q35_dsdt_applesmc_sta
;
140 /* Patch in appropriate value for AppleSMC _STA */
141 *(uint8_t *)(info
->dsdt_code
+ *applesmc_sta
) =
142 applesmc_find() ? 0x0b : 0x00;
146 int acpi_add_cpu_info(Object
*o
, void *opaque
)
148 AcpiCpuInfo
*cpu
= opaque
;
151 if (object_dynamic_cast(o
, TYPE_CPU
)) {
152 apic_id
= object_property_get_int(o
, "apic-id", NULL
);
153 assert(apic_id
< ACPI_CPU_HOTPLUG_ID_LIMIT
);
155 set_bit(apic_id
, cpu
->found_cpus
);
158 object_child_foreach(o
, acpi_add_cpu_info
, opaque
);
162 static void acpi_get_cpu_info(AcpiCpuInfo
*cpu
)
164 Object
*root
= object_get_root();
166 memset(cpu
->found_cpus
, 0, sizeof cpu
->found_cpus
);
167 object_child_foreach(root
, acpi_add_cpu_info
, cpu
);
170 static void acpi_get_pm_info(AcpiPmInfo
*pm
)
172 Object
*piix
= piix4_pm_find();
173 Object
*lpc
= ich9_lpc_find();
185 /* Fill in optional s3/s4 related properties */
186 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S3_DISABLED
, NULL
);
188 pm
->s3_disabled
= qint_get_int(qobject_to_qint(o
));
190 pm
->s3_disabled
= false;
193 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S4_DISABLED
, NULL
);
195 pm
->s4_disabled
= qint_get_int(qobject_to_qint(o
));
197 pm
->s4_disabled
= false;
200 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S4_VAL
, NULL
);
202 pm
->s4_val
= qint_get_int(qobject_to_qint(o
));
208 /* Fill in mandatory properties */
209 pm
->sci_int
= object_property_get_int(obj
, ACPI_PM_PROP_SCI_INT
, NULL
);
211 pm
->acpi_enable_cmd
= object_property_get_int(obj
,
212 ACPI_PM_PROP_ACPI_ENABLE_CMD
,
214 pm
->acpi_disable_cmd
= object_property_get_int(obj
,
215 ACPI_PM_PROP_ACPI_DISABLE_CMD
,
217 pm
->io_base
= object_property_get_int(obj
, ACPI_PM_PROP_PM_IO_BASE
,
219 pm
->gpe0_blk
= object_property_get_int(obj
, ACPI_PM_PROP_GPE0_BLK
,
221 pm
->gpe0_blk_len
= object_property_get_int(obj
, ACPI_PM_PROP_GPE0_BLK_LEN
,
223 pm
->pcihp_bridge_en
=
224 object_property_get_bool(obj
, "acpi-pci-hotplug-with-bridge-support",
228 static void acpi_get_misc_info(AcpiMiscInfo
*info
)
230 info
->has_hpet
= hpet_find();
231 info
->has_tpm
= tpm_find();
232 info
->pvpanic_port
= pvpanic_port();
235 static void acpi_get_pci_info(PcPciInfo
*info
)
240 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
241 g_assert(!ambiguous
);
244 info
->w32
.begin
= object_property_get_int(pci_host
,
245 PCI_HOST_PROP_PCI_HOLE_START
,
247 info
->w32
.end
= object_property_get_int(pci_host
,
248 PCI_HOST_PROP_PCI_HOLE_END
,
250 info
->w64
.begin
= object_property_get_int(pci_host
,
251 PCI_HOST_PROP_PCI_HOLE64_START
,
253 info
->w64
.end
= object_property_get_int(pci_host
,
254 PCI_HOST_PROP_PCI_HOLE64_END
,
258 #define ACPI_BUILD_APPNAME "Bochs"
259 #define ACPI_BUILD_APPNAME6 "BOCHS "
260 #define ACPI_BUILD_APPNAME4 "BXPC"
262 #define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
263 #define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
264 #define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log"
267 build_header(GArray
*linker
, GArray
*table_data
,
268 AcpiTableHeader
*h
, const char *sig
, int len
, uint8_t rev
)
270 memcpy(&h
->signature
, sig
, 4);
271 h
->length
= cpu_to_le32(len
);
273 memcpy(h
->oem_id
, ACPI_BUILD_APPNAME6
, 6);
274 memcpy(h
->oem_table_id
, ACPI_BUILD_APPNAME4
, 4);
275 memcpy(h
->oem_table_id
+ 4, sig
, 4);
276 h
->oem_revision
= cpu_to_le32(1);
277 memcpy(h
->asl_compiler_id
, ACPI_BUILD_APPNAME4
, 4);
278 h
->asl_compiler_revision
= cpu_to_le32(1);
280 /* Checksum to be filled in by Guest linker */
281 bios_linker_loader_add_checksum(linker
, ACPI_BUILD_TABLE_FILE
,
282 table_data
->data
, h
, len
, &h
->checksum
);
285 static GArray
*build_alloc_method(const char *name
, uint8_t arg_count
)
287 GArray
*method
= build_alloc_array();
289 build_append_namestring(method
, "%s", name
);
290 build_append_byte(method
, arg_count
); /* MethodFlags: ArgCount */
295 static void build_append_and_cleanup_method(GArray
*device
, GArray
*method
)
297 uint8_t op
= 0x14; /* MethodOp */
299 build_package(method
, op
);
301 build_append_array(device
, method
);
302 build_free_array(method
);
305 static void build_append_notify_target_ifequal(GArray
*method
,
307 uint32_t value
, int size
)
309 GArray
*notify
= build_alloc_array();
310 uint8_t op
= 0xA0; /* IfOp */
312 build_append_byte(notify
, 0x93); /* LEqualOp */
313 build_append_byte(notify
, 0x68); /* Arg0Op */
314 build_append_value(notify
, value
, size
);
315 build_append_byte(notify
, 0x86); /* NotifyOp */
316 build_append_array(notify
, target_name
);
317 build_append_byte(notify
, 0x69); /* Arg1Op */
320 build_package(notify
, op
);
322 build_append_array(method
, notify
);
324 build_free_array(notify
);
328 #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */
330 static inline void *acpi_data_push(GArray
*table_data
, unsigned size
)
332 unsigned off
= table_data
->len
;
333 g_array_set_size(table_data
, off
+ size
);
334 return table_data
->data
+ off
;
337 static unsigned acpi_data_len(GArray
*table
)
339 #if GLIB_CHECK_VERSION(2, 22, 0)
340 assert(g_array_get_element_size(table
) == 1);
345 static void acpi_align_size(GArray
*blob
, unsigned align
)
347 /* Align size to multiple of given size. This reduces the chance
348 * we need to change size in the future (breaking cross version migration).
350 g_array_set_size(blob
, ROUND_UP(acpi_data_len(blob
), align
));
353 /* Set a value within table in a safe manner */
354 #define ACPI_BUILD_SET_LE(table, size, off, bits, val) \
356 uint64_t ACPI_BUILD_SET_LE_val = cpu_to_le64(val); \
357 memcpy(acpi_data_get_ptr(table, size, off, \
358 (bits) / BITS_PER_BYTE), \
359 &ACPI_BUILD_SET_LE_val, \
360 (bits) / BITS_PER_BYTE); \
363 static inline void *acpi_data_get_ptr(uint8_t *table_data
, unsigned table_size
,
364 unsigned off
, unsigned size
)
366 assert(off
+ size
> off
);
367 assert(off
+ size
<= table_size
);
368 return table_data
+ off
;
371 static inline void acpi_add_table(GArray
*table_offsets
, GArray
*table_data
)
373 uint32_t offset
= cpu_to_le32(table_data
->len
);
374 g_array_append_val(table_offsets
, offset
);
379 build_facs(GArray
*table_data
, GArray
*linker
, PcGuestInfo
*guest_info
)
381 AcpiFacsDescriptorRev1
*facs
= acpi_data_push(table_data
, sizeof *facs
);
382 memcpy(&facs
->signature
, "FACS", 4);
383 facs
->length
= cpu_to_le32(sizeof(*facs
));
386 /* Load chipset information in FADT */
387 static void fadt_setup(AcpiFadtDescriptorRev1
*fadt
, AcpiPmInfo
*pm
)
391 fadt
->sci_int
= cpu_to_le16(pm
->sci_int
);
392 fadt
->smi_cmd
= cpu_to_le32(ACPI_PORT_SMI_CMD
);
393 fadt
->acpi_enable
= pm
->acpi_enable_cmd
;
394 fadt
->acpi_disable
= pm
->acpi_disable_cmd
;
395 /* EVT, CNT, TMR offset matches hw/acpi/core.c */
396 fadt
->pm1a_evt_blk
= cpu_to_le32(pm
->io_base
);
397 fadt
->pm1a_cnt_blk
= cpu_to_le32(pm
->io_base
+ 0x04);
398 fadt
->pm_tmr_blk
= cpu_to_le32(pm
->io_base
+ 0x08);
399 fadt
->gpe0_blk
= cpu_to_le32(pm
->gpe0_blk
);
400 /* EVT, CNT, TMR length matches hw/acpi/core.c */
401 fadt
->pm1_evt_len
= 4;
402 fadt
->pm1_cnt_len
= 2;
403 fadt
->pm_tmr_len
= 4;
404 fadt
->gpe0_blk_len
= pm
->gpe0_blk_len
;
405 fadt
->plvl2_lat
= cpu_to_le16(0xfff); /* C2 state not supported */
406 fadt
->plvl3_lat
= cpu_to_le16(0xfff); /* C3 state not supported */
407 fadt
->flags
= cpu_to_le32((1 << ACPI_FADT_F_WBINVD
) |
408 (1 << ACPI_FADT_F_PROC_C1
) |
409 (1 << ACPI_FADT_F_SLP_BUTTON
) |
410 (1 << ACPI_FADT_F_RTC_S4
));
411 fadt
->flags
|= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK
);
412 /* APIC destination mode ("Flat Logical") has an upper limit of 8 CPUs
413 * For more than 8 CPUs, "Clustered Logical" mode has to be used
416 fadt
->flags
|= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL
);
423 build_fadt(GArray
*table_data
, GArray
*linker
, AcpiPmInfo
*pm
,
424 unsigned facs
, unsigned dsdt
)
426 AcpiFadtDescriptorRev1
*fadt
= acpi_data_push(table_data
, sizeof(*fadt
));
428 fadt
->firmware_ctrl
= cpu_to_le32(facs
);
429 /* FACS address to be filled by Guest linker */
430 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
431 ACPI_BUILD_TABLE_FILE
,
432 table_data
, &fadt
->firmware_ctrl
,
433 sizeof fadt
->firmware_ctrl
);
435 fadt
->dsdt
= cpu_to_le32(dsdt
);
436 /* DSDT address to be filled by Guest linker */
437 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
438 ACPI_BUILD_TABLE_FILE
,
439 table_data
, &fadt
->dsdt
,
442 fadt_setup(fadt
, pm
);
444 build_header(linker
, table_data
,
445 (void *)fadt
, "FACP", sizeof(*fadt
), 1);
449 build_madt(GArray
*table_data
, GArray
*linker
, AcpiCpuInfo
*cpu
,
450 PcGuestInfo
*guest_info
)
452 int madt_start
= table_data
->len
;
454 AcpiMultipleApicTable
*madt
;
455 AcpiMadtIoApic
*io_apic
;
456 AcpiMadtIntsrcovr
*intsrcovr
;
457 AcpiMadtLocalNmi
*local_nmi
;
460 madt
= acpi_data_push(table_data
, sizeof *madt
);
461 madt
->local_apic_address
= cpu_to_le32(APIC_DEFAULT_ADDRESS
);
462 madt
->flags
= cpu_to_le32(1);
464 for (i
= 0; i
< guest_info
->apic_id_limit
; i
++) {
465 AcpiMadtProcessorApic
*apic
= acpi_data_push(table_data
, sizeof *apic
);
466 apic
->type
= ACPI_APIC_PROCESSOR
;
467 apic
->length
= sizeof(*apic
);
468 apic
->processor_id
= i
;
469 apic
->local_apic_id
= i
;
470 if (test_bit(i
, cpu
->found_cpus
)) {
471 apic
->flags
= cpu_to_le32(1);
473 apic
->flags
= cpu_to_le32(0);
476 io_apic
= acpi_data_push(table_data
, sizeof *io_apic
);
477 io_apic
->type
= ACPI_APIC_IO
;
478 io_apic
->length
= sizeof(*io_apic
);
479 #define ACPI_BUILD_IOAPIC_ID 0x0
480 io_apic
->io_apic_id
= ACPI_BUILD_IOAPIC_ID
;
481 io_apic
->address
= cpu_to_le32(IO_APIC_DEFAULT_ADDRESS
);
482 io_apic
->interrupt
= cpu_to_le32(0);
484 if (guest_info
->apic_xrupt_override
) {
485 intsrcovr
= acpi_data_push(table_data
, sizeof *intsrcovr
);
486 intsrcovr
->type
= ACPI_APIC_XRUPT_OVERRIDE
;
487 intsrcovr
->length
= sizeof(*intsrcovr
);
488 intsrcovr
->source
= 0;
489 intsrcovr
->gsi
= cpu_to_le32(2);
490 intsrcovr
->flags
= cpu_to_le16(0); /* conforms to bus specifications */
492 for (i
= 1; i
< 16; i
++) {
493 #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
494 if (!(ACPI_BUILD_PCI_IRQS
& (1 << i
))) {
495 /* No need for a INT source override structure. */
498 intsrcovr
= acpi_data_push(table_data
, sizeof *intsrcovr
);
499 intsrcovr
->type
= ACPI_APIC_XRUPT_OVERRIDE
;
500 intsrcovr
->length
= sizeof(*intsrcovr
);
501 intsrcovr
->source
= i
;
502 intsrcovr
->gsi
= cpu_to_le32(i
);
503 intsrcovr
->flags
= cpu_to_le16(0xd); /* active high, level triggered */
506 local_nmi
= acpi_data_push(table_data
, sizeof *local_nmi
);
507 local_nmi
->type
= ACPI_APIC_LOCAL_NMI
;
508 local_nmi
->length
= sizeof(*local_nmi
);
509 local_nmi
->processor_id
= 0xff; /* all processors */
510 local_nmi
->flags
= cpu_to_le16(0);
511 local_nmi
->lint
= 1; /* ACPI_LINT1 */
513 build_header(linker
, table_data
,
514 (void *)(table_data
->data
+ madt_start
), "APIC",
515 table_data
->len
- madt_start
, 1);
518 /* Encode a hex value */
519 static inline char acpi_get_hex(uint32_t val
)
522 return (val
<= 9) ? ('0' + val
) : ('A' + val
- 10);
525 #include "hw/i386/ssdt-proc.hex"
527 /* 0x5B 0x83 ProcessorOp PkgLength NameString ProcID */
528 #define ACPI_PROC_OFFSET_CPUHEX (*ssdt_proc_name - *ssdt_proc_start + 2)
529 #define ACPI_PROC_OFFSET_CPUID1 (*ssdt_proc_name - *ssdt_proc_start + 4)
530 #define ACPI_PROC_OFFSET_CPUID2 (*ssdt_proc_id - *ssdt_proc_start)
531 #define ACPI_PROC_SIZEOF (*ssdt_proc_end - *ssdt_proc_start)
532 #define ACPI_PROC_AML (ssdp_proc_aml + *ssdt_proc_start)
534 /* 0x5B 0x82 DeviceOp PkgLength NameString */
535 #define ACPI_PCIHP_OFFSET_HEX (*ssdt_pcihp_name - *ssdt_pcihp_start + 1)
536 #define ACPI_PCIHP_OFFSET_ID (*ssdt_pcihp_id - *ssdt_pcihp_start)
537 #define ACPI_PCIHP_OFFSET_ADR (*ssdt_pcihp_adr - *ssdt_pcihp_start)
538 #define ACPI_PCIHP_OFFSET_EJ0 (*ssdt_pcihp_ej0 - *ssdt_pcihp_start)
539 #define ACPI_PCIHP_SIZEOF (*ssdt_pcihp_end - *ssdt_pcihp_start)
540 #define ACPI_PCIHP_AML (ssdp_pcihp_aml + *ssdt_pcihp_start)
542 #define ACPI_PCINOHP_OFFSET_HEX (*ssdt_pcinohp_name - *ssdt_pcinohp_start + 1)
543 #define ACPI_PCINOHP_OFFSET_ADR (*ssdt_pcinohp_adr - *ssdt_pcinohp_start)
544 #define ACPI_PCINOHP_SIZEOF (*ssdt_pcinohp_end - *ssdt_pcinohp_start)
545 #define ACPI_PCINOHP_AML (ssdp_pcihp_aml + *ssdt_pcinohp_start)
547 #define ACPI_PCIVGA_OFFSET_HEX (*ssdt_pcivga_name - *ssdt_pcivga_start + 1)
548 #define ACPI_PCIVGA_OFFSET_ADR (*ssdt_pcivga_adr - *ssdt_pcivga_start)
549 #define ACPI_PCIVGA_SIZEOF (*ssdt_pcivga_end - *ssdt_pcivga_start)
550 #define ACPI_PCIVGA_AML (ssdp_pcihp_aml + *ssdt_pcivga_start)
552 #define ACPI_PCIQXL_OFFSET_HEX (*ssdt_pciqxl_name - *ssdt_pciqxl_start + 1)
553 #define ACPI_PCIQXL_OFFSET_ADR (*ssdt_pciqxl_adr - *ssdt_pciqxl_start)
554 #define ACPI_PCIQXL_SIZEOF (*ssdt_pciqxl_end - *ssdt_pciqxl_start)
555 #define ACPI_PCIQXL_AML (ssdp_pcihp_aml + *ssdt_pciqxl_start)
557 #include "hw/i386/ssdt-mem.hex"
559 /* 0x5B 0x82 DeviceOp PkgLength NameString DimmID */
560 #define ACPI_MEM_OFFSET_HEX (*ssdt_mem_name - *ssdt_mem_start + 2)
561 #define ACPI_MEM_OFFSET_ID (*ssdt_mem_id - *ssdt_mem_start + 7)
562 #define ACPI_MEM_SIZEOF (*ssdt_mem_end - *ssdt_mem_start)
563 #define ACPI_MEM_AML (ssdm_mem_aml + *ssdt_mem_start)
565 #define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */
566 #define ACPI_SSDT_HEADER_LENGTH 36
568 #include "hw/i386/ssdt-misc.hex"
569 #include "hw/i386/ssdt-pcihp.hex"
570 #include "hw/i386/ssdt-tpm.hex"
573 build_append_notify_method(GArray
*device
, const char *name
,
574 const char *format
, int count
)
577 GArray
*method
= build_alloc_method(name
, 2);
579 for (i
= 0; i
< count
; i
++) {
580 GArray
*target
= build_alloc_array();
581 build_append_namestring(target
, format
, i
);
582 assert(i
< 256); /* Fits in 1 byte */
583 build_append_notify_target_ifequal(method
, target
, i
, 1);
584 build_free_array(target
);
587 build_append_and_cleanup_method(device
, method
);
590 static void patch_pcihp(int slot
, uint8_t *ssdt_ptr
)
592 unsigned devfn
= PCI_DEVFN(slot
, 0);
594 ssdt_ptr
[ACPI_PCIHP_OFFSET_HEX
] = acpi_get_hex(devfn
>> 4);
595 ssdt_ptr
[ACPI_PCIHP_OFFSET_HEX
+ 1] = acpi_get_hex(devfn
);
596 ssdt_ptr
[ACPI_PCIHP_OFFSET_ID
] = slot
;
597 ssdt_ptr
[ACPI_PCIHP_OFFSET_ADR
+ 2] = slot
;
600 static void patch_pcinohp(int slot
, uint8_t *ssdt_ptr
)
602 unsigned devfn
= PCI_DEVFN(slot
, 0);
604 ssdt_ptr
[ACPI_PCINOHP_OFFSET_HEX
] = acpi_get_hex(devfn
>> 4);
605 ssdt_ptr
[ACPI_PCINOHP_OFFSET_HEX
+ 1] = acpi_get_hex(devfn
);
606 ssdt_ptr
[ACPI_PCINOHP_OFFSET_ADR
+ 2] = slot
;
609 static void patch_pcivga(int slot
, uint8_t *ssdt_ptr
)
611 unsigned devfn
= PCI_DEVFN(slot
, 0);
613 ssdt_ptr
[ACPI_PCIVGA_OFFSET_HEX
] = acpi_get_hex(devfn
>> 4);
614 ssdt_ptr
[ACPI_PCIVGA_OFFSET_HEX
+ 1] = acpi_get_hex(devfn
);
615 ssdt_ptr
[ACPI_PCIVGA_OFFSET_ADR
+ 2] = slot
;
618 static void patch_pciqxl(int slot
, uint8_t *ssdt_ptr
)
620 unsigned devfn
= PCI_DEVFN(slot
, 0);
622 ssdt_ptr
[ACPI_PCIQXL_OFFSET_HEX
] = acpi_get_hex(devfn
>> 4);
623 ssdt_ptr
[ACPI_PCIQXL_OFFSET_HEX
+ 1] = acpi_get_hex(devfn
);
624 ssdt_ptr
[ACPI_PCIQXL_OFFSET_ADR
+ 2] = slot
;
627 /* Assign BSEL property to all buses. In the future, this can be changed
628 * to only assign to buses that support hotplug.
630 static void *acpi_set_bsel(PCIBus
*bus
, void *opaque
)
632 unsigned *bsel_alloc
= opaque
;
635 if (qbus_is_hotpluggable(BUS(bus
))) {
636 bus_bsel
= g_malloc(sizeof *bus_bsel
);
638 *bus_bsel
= (*bsel_alloc
)++;
639 object_property_add_uint32_ptr(OBJECT(bus
), ACPI_PCIHP_PROP_BSEL
,
646 static void acpi_set_pci_info(void)
648 PCIBus
*bus
= find_i440fx(); /* TODO: Q35 support */
649 unsigned bsel_alloc
= 0;
652 /* Scan all PCI buses. Set property to enable acpi based hotplug. */
653 pci_for_each_bus_depth_first(bus
, acpi_set_bsel
, NULL
, &bsel_alloc
);
657 static void build_pci_bus_state_init(AcpiBuildPciBusHotplugState
*state
,
658 AcpiBuildPciBusHotplugState
*parent
,
659 bool pcihp_bridge_en
)
661 state
->parent
= parent
;
662 state
->device_table
= build_alloc_array();
663 state
->notify_table
= build_alloc_array();
664 state
->pcihp_bridge_en
= pcihp_bridge_en
;
667 static void build_pci_bus_state_cleanup(AcpiBuildPciBusHotplugState
*state
)
669 build_free_array(state
->device_table
);
670 build_free_array(state
->notify_table
);
673 static void *build_pci_bus_begin(PCIBus
*bus
, void *parent_state
)
675 AcpiBuildPciBusHotplugState
*parent
= parent_state
;
676 AcpiBuildPciBusHotplugState
*child
= g_malloc(sizeof *child
);
678 build_pci_bus_state_init(child
, parent
, parent
->pcihp_bridge_en
);
683 static void build_pci_bus_end(PCIBus
*bus
, void *bus_state
)
685 AcpiBuildPciBusHotplugState
*child
= bus_state
;
686 AcpiBuildPciBusHotplugState
*parent
= child
->parent
;
687 GArray
*bus_table
= build_alloc_array();
688 DECLARE_BITMAP(slot_hotplug_enable
, PCI_SLOT_MAX
);
689 DECLARE_BITMAP(slot_device_present
, PCI_SLOT_MAX
);
690 DECLARE_BITMAP(slot_device_system
, PCI_SLOT_MAX
);
691 DECLARE_BITMAP(slot_device_vga
, PCI_SLOT_MAX
);
692 DECLARE_BITMAP(slot_device_qxl
, PCI_SLOT_MAX
);
697 bool bus_hotplug_support
= false;
700 * Skip bridge subtree creation if bridge hotplug is disabled
701 * to make acpi tables compatible with legacy machine types.
703 if (!child
->pcihp_bridge_en
&& bus
->parent_dev
) {
704 build_free_array(bus_table
);
705 build_pci_bus_state_cleanup(child
);
710 if (bus
->parent_dev
) {
711 op
= 0x82; /* DeviceOp */
712 build_append_namestring(bus_table
, "S%.02X",
713 bus
->parent_dev
->devfn
);
714 build_append_byte(bus_table
, 0x08); /* NameOp */
715 build_append_namestring(bus_table
, "_SUN");
716 build_append_value(bus_table
, PCI_SLOT(bus
->parent_dev
->devfn
), 1);
717 build_append_byte(bus_table
, 0x08); /* NameOp */
718 build_append_namestring(bus_table
, "_ADR");
719 build_append_value(bus_table
, (PCI_SLOT(bus
->parent_dev
->devfn
) << 16) |
720 PCI_FUNC(bus
->parent_dev
->devfn
), 4);
722 op
= 0x10; /* ScopeOp */;
723 build_append_namestring(bus_table
, "PCI0");
726 bsel
= object_property_get_qobject(OBJECT(bus
), ACPI_PCIHP_PROP_BSEL
, NULL
);
728 build_append_byte(bus_table
, 0x08); /* NameOp */
729 build_append_namestring(bus_table
, "BSEL");
730 build_append_int(bus_table
, qint_get_int(qobject_to_qint(bsel
)));
731 memset(slot_hotplug_enable
, 0xff, sizeof slot_hotplug_enable
);
733 /* No bsel - no slots are hot-pluggable */
734 memset(slot_hotplug_enable
, 0x00, sizeof slot_hotplug_enable
);
737 memset(slot_device_present
, 0x00, sizeof slot_device_present
);
738 memset(slot_device_system
, 0x00, sizeof slot_device_present
);
739 memset(slot_device_vga
, 0x00, sizeof slot_device_vga
);
740 memset(slot_device_qxl
, 0x00, sizeof slot_device_qxl
);
742 for (i
= 0; i
< ARRAY_SIZE(bus
->devices
); i
+= PCI_FUNC_MAX
) {
745 PCIDevice
*pdev
= bus
->devices
[i
];
746 int slot
= PCI_SLOT(i
);
753 set_bit(slot
, slot_device_present
);
754 pc
= PCI_DEVICE_GET_CLASS(pdev
);
755 dc
= DEVICE_GET_CLASS(pdev
);
757 /* When hotplug for bridges is enabled, bridges are
758 * described in ACPI separately (see build_pci_bus_end).
759 * In this case they aren't themselves hot-pluggable.
761 bridge_in_acpi
= pc
->is_bridge
&& child
->pcihp_bridge_en
;
763 if (pc
->class_id
== PCI_CLASS_BRIDGE_ISA
|| bridge_in_acpi
) {
764 set_bit(slot
, slot_device_system
);
767 if (pc
->class_id
== PCI_CLASS_DISPLAY_VGA
) {
768 set_bit(slot
, slot_device_vga
);
770 if (object_dynamic_cast(OBJECT(pdev
), "qxl-vga")) {
771 set_bit(slot
, slot_device_qxl
);
775 if (!dc
->hotpluggable
|| bridge_in_acpi
) {
776 clear_bit(slot
, slot_hotplug_enable
);
780 /* Append Device object for each slot */
781 for (i
= 0; i
< PCI_SLOT_MAX
; i
++) {
782 bool can_eject
= test_bit(i
, slot_hotplug_enable
);
783 bool present
= test_bit(i
, slot_device_present
);
784 bool vga
= test_bit(i
, slot_device_vga
);
785 bool qxl
= test_bit(i
, slot_device_qxl
);
786 bool system
= test_bit(i
, slot_device_system
);
788 void *pcihp
= acpi_data_push(bus_table
,
790 memcpy(pcihp
, ACPI_PCIHP_AML
, ACPI_PCIHP_SIZEOF
);
791 patch_pcihp(i
, pcihp
);
792 bus_hotplug_support
= true;
794 void *pcihp
= acpi_data_push(bus_table
,
796 memcpy(pcihp
, ACPI_PCIQXL_AML
, ACPI_PCIQXL_SIZEOF
);
797 patch_pciqxl(i
, pcihp
);
799 void *pcihp
= acpi_data_push(bus_table
,
801 memcpy(pcihp
, ACPI_PCIVGA_AML
, ACPI_PCIVGA_SIZEOF
);
802 patch_pcivga(i
, pcihp
);
804 /* Nothing to do: system devices are in DSDT or in SSDT above. */
805 } else if (present
) {
806 void *pcihp
= acpi_data_push(bus_table
,
807 ACPI_PCINOHP_SIZEOF
);
808 memcpy(pcihp
, ACPI_PCINOHP_AML
, ACPI_PCINOHP_SIZEOF
);
809 patch_pcinohp(i
, pcihp
);
814 method
= build_alloc_method("DVNT", 2);
816 for (i
= 0; i
< PCI_SLOT_MAX
; i
++) {
820 if (!test_bit(i
, slot_hotplug_enable
)) {
824 notify
= build_alloc_array();
825 op
= 0xA0; /* IfOp */
827 build_append_byte(notify
, 0x7B); /* AndOp */
828 build_append_byte(notify
, 0x68); /* Arg0Op */
829 build_append_int(notify
, 0x1U
<< i
);
830 build_append_byte(notify
, 0x00); /* NullName */
831 build_append_byte(notify
, 0x86); /* NotifyOp */
832 build_append_namestring(notify
, "S%.02X", PCI_DEVFN(i
, 0));
833 build_append_byte(notify
, 0x69); /* Arg1Op */
836 build_package(notify
, op
);
838 build_append_array(method
, notify
);
840 build_free_array(notify
);
843 build_append_and_cleanup_method(bus_table
, method
);
846 /* Append PCNT method to notify about events on local and child buses.
847 * Add unconditionally for root since DSDT expects it.
849 if (bus_hotplug_support
|| child
->notify_table
->len
|| !bus
->parent_dev
) {
850 method
= build_alloc_method("PCNT", 0);
852 /* If bus supports hotplug select it and notify about local events */
854 build_append_byte(method
, 0x70); /* StoreOp */
855 build_append_int(method
, qint_get_int(qobject_to_qint(bsel
)));
856 build_append_namestring(method
, "BNUM");
857 build_append_namestring(method
, "DVNT");
858 build_append_namestring(method
, "PCIU");
859 build_append_int(method
, 1); /* Device Check */
860 build_append_namestring(method
, "DVNT");
861 build_append_namestring(method
, "PCID");
862 build_append_int(method
, 3); /* Eject Request */
865 /* Notify about child bus events in any case */
866 build_append_array(method
, child
->notify_table
);
868 build_append_and_cleanup_method(bus_table
, method
);
870 /* Append description of child buses */
871 build_append_array(bus_table
, child
->device_table
);
874 if (bus
->parent_dev
) {
875 build_extop_package(bus_table
, op
);
877 build_package(bus_table
, op
);
880 /* Append our bus description to parent table */
881 build_append_array(parent
->device_table
, bus_table
);
883 /* Also tell parent how to notify us, invoking PCNT method.
884 * At the moment this is not needed for root as we have a single root.
886 if (bus
->parent_dev
) {
887 build_append_namestring(parent
->notify_table
, "^PCNT.S%.02X",
888 bus
->parent_dev
->devfn
);
892 qobject_decref(bsel
);
893 build_free_array(bus_table
);
894 build_pci_bus_state_cleanup(child
);
898 static void patch_pci_windows(PcPciInfo
*pci
, uint8_t *start
, unsigned size
)
900 ACPI_BUILD_SET_LE(start
, size
, acpi_pci32_start
[0], 32, pci
->w32
.begin
);
902 ACPI_BUILD_SET_LE(start
, size
, acpi_pci32_end
[0], 32, pci
->w32
.end
- 1);
904 if (pci
->w64
.end
|| pci
->w64
.begin
) {
905 ACPI_BUILD_SET_LE(start
, size
, acpi_pci64_valid
[0], 8, 1);
906 ACPI_BUILD_SET_LE(start
, size
, acpi_pci64_start
[0], 64, pci
->w64
.begin
);
907 ACPI_BUILD_SET_LE(start
, size
, acpi_pci64_end
[0], 64, pci
->w64
.end
- 1);
908 ACPI_BUILD_SET_LE(start
, size
, acpi_pci64_length
[0], 64, pci
->w64
.end
- pci
->w64
.begin
);
910 ACPI_BUILD_SET_LE(start
, size
, acpi_pci64_valid
[0], 8, 0);
915 build_ssdt(GArray
*table_data
, GArray
*linker
,
916 AcpiCpuInfo
*cpu
, AcpiPmInfo
*pm
, AcpiMiscInfo
*misc
,
917 PcPciInfo
*pci
, PcGuestInfo
*guest_info
)
919 MachineState
*machine
= MACHINE(qdev_get_machine());
920 uint32_t nr_mem
= machine
->ram_slots
;
921 unsigned acpi_cpus
= guest_info
->apic_id_limit
;
922 int ssdt_start
= table_data
->len
;
926 /* The current AML generator can cover the APIC ID range [0..255],
927 * inclusive, for VCPU hotplug. */
928 QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT
> 256);
929 g_assert(acpi_cpus
<= ACPI_CPU_HOTPLUG_ID_LIMIT
);
931 /* Copy header and patch values in the S3_ / S4_ / S5_ packages */
932 ssdt_ptr
= acpi_data_push(table_data
, sizeof(ssdp_misc_aml
));
933 memcpy(ssdt_ptr
, ssdp_misc_aml
, sizeof(ssdp_misc_aml
));
934 if (pm
->s3_disabled
) {
935 ssdt_ptr
[acpi_s3_name
[0]] = 'X';
937 if (pm
->s4_disabled
) {
938 ssdt_ptr
[acpi_s4_name
[0]] = 'X';
940 ssdt_ptr
[acpi_s4_pkg
[0] + 1] = ssdt_ptr
[acpi_s4_pkg
[0] + 3] =
944 patch_pci_windows(pci
, ssdt_ptr
, sizeof(ssdp_misc_aml
));
946 ACPI_BUILD_SET_LE(ssdt_ptr
, sizeof(ssdp_misc_aml
),
947 ssdt_isa_pest
[0], 16, misc
->pvpanic_port
);
949 ACPI_BUILD_SET_LE(ssdt_ptr
, sizeof(ssdp_misc_aml
),
950 ssdt_mctrl_nr_slots
[0], 32, nr_mem
);
953 GArray
*sb_scope
= build_alloc_array();
954 uint8_t op
= 0x10; /* ScopeOp */
956 build_append_namestring(sb_scope
, "_SB");
958 /* build Processor object for each processor */
959 for (i
= 0; i
< acpi_cpus
; i
++) {
960 uint8_t *proc
= acpi_data_push(sb_scope
, ACPI_PROC_SIZEOF
);
961 memcpy(proc
, ACPI_PROC_AML
, ACPI_PROC_SIZEOF
);
962 proc
[ACPI_PROC_OFFSET_CPUHEX
] = acpi_get_hex(i
>> 4);
963 proc
[ACPI_PROC_OFFSET_CPUHEX
+1] = acpi_get_hex(i
);
964 proc
[ACPI_PROC_OFFSET_CPUID1
] = i
;
965 proc
[ACPI_PROC_OFFSET_CPUID2
] = i
;
969 * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...}
971 /* Arg0 = Processor ID = APIC ID */
972 build_append_notify_method(sb_scope
, "NTFY", "CP%0.02X", acpi_cpus
);
974 /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" */
975 build_append_byte(sb_scope
, 0x08); /* NameOp */
976 build_append_namestring(sb_scope
, "CPON");
979 GArray
*package
= build_alloc_array();
983 * Note: The ability to create variable-sized packages was first introduced in ACPI 2.0. ACPI 1.0 only
984 * allowed fixed-size packages with up to 255 elements.
985 * Windows guests up to win2k8 fail when VarPackageOp is used.
987 if (acpi_cpus
<= 255) {
988 op
= 0x12; /* PackageOp */
989 build_append_byte(package
, acpi_cpus
); /* NumElements */
991 op
= 0x13; /* VarPackageOp */
992 build_append_int(package
, acpi_cpus
); /* VarNumElements */
995 for (i
= 0; i
< acpi_cpus
; i
++) {
996 uint8_t b
= test_bit(i
, cpu
->found_cpus
) ? 0x01 : 0x00;
997 build_append_byte(package
, b
);
1000 build_package(package
, op
);
1001 build_append_array(sb_scope
, package
);
1002 build_free_array(package
);
1006 assert(nr_mem
<= ACPI_MAX_RAM_SLOTS
);
1007 /* build memory devices */
1008 for (i
= 0; i
< nr_mem
; i
++) {
1010 uint8_t *mem
= acpi_data_push(sb_scope
, ACPI_MEM_SIZEOF
);
1012 snprintf(id
, sizeof(id
), "%02X", i
);
1013 memcpy(mem
, ACPI_MEM_AML
, ACPI_MEM_SIZEOF
);
1014 memcpy(mem
+ ACPI_MEM_OFFSET_HEX
, id
, 2);
1015 memcpy(mem
+ ACPI_MEM_OFFSET_ID
, id
, 2);
1018 /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
1019 * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ...
1021 build_append_notify_method(sb_scope
,
1022 stringify(MEMORY_SLOT_NOTIFY_METHOD
),
1023 "MP%0.02X", nr_mem
);
1027 AcpiBuildPciBusHotplugState hotplug_state
;
1032 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
1033 if (!ambiguous
&& pci_host
) {
1034 bus
= PCI_HOST_BRIDGE(pci_host
)->bus
;
1037 build_pci_bus_state_init(&hotplug_state
, NULL
, pm
->pcihp_bridge_en
);
1040 /* Scan all PCI buses. Generate tables to support hotplug. */
1041 pci_for_each_bus_depth_first(bus
, build_pci_bus_begin
,
1042 build_pci_bus_end
, &hotplug_state
);
1045 build_append_array(sb_scope
, hotplug_state
.device_table
);
1046 build_pci_bus_state_cleanup(&hotplug_state
);
1048 build_package(sb_scope
, op
);
1049 build_append_array(table_data
, sb_scope
);
1050 build_free_array(sb_scope
);
1053 build_header(linker
, table_data
,
1054 (void *)(table_data
->data
+ ssdt_start
),
1055 "SSDT", table_data
->len
- ssdt_start
, 1);
1059 build_hpet(GArray
*table_data
, GArray
*linker
)
1063 hpet
= acpi_data_push(table_data
, sizeof(*hpet
));
1064 /* Note timer_block_id value must be kept in sync with value advertised by
1067 hpet
->timer_block_id
= cpu_to_le32(0x8086a201);
1068 hpet
->addr
.address
= cpu_to_le64(HPET_BASE
);
1069 build_header(linker
, table_data
,
1070 (void *)hpet
, "HPET", sizeof(*hpet
), 1);
1074 build_tpm_tcpa(GArray
*table_data
, GArray
*linker
, GArray
*tcpalog
)
1076 Acpi20Tcpa
*tcpa
= acpi_data_push(table_data
, sizeof *tcpa
);
1077 uint64_t log_area_start_address
= acpi_data_len(tcpalog
);
1079 tcpa
->platform_class
= cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT
);
1080 tcpa
->log_area_minimum_length
= cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE
);
1081 tcpa
->log_area_start_address
= cpu_to_le64(log_area_start_address
);
1083 bios_linker_loader_alloc(linker
, ACPI_BUILD_TPMLOG_FILE
, 1,
1084 false /* high memory */);
1086 /* log area start address to be filled by Guest linker */
1087 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
1088 ACPI_BUILD_TPMLOG_FILE
,
1089 table_data
, &tcpa
->log_area_start_address
,
1090 sizeof(tcpa
->log_area_start_address
));
1092 build_header(linker
, table_data
,
1093 (void *)tcpa
, "TCPA", sizeof(*tcpa
), 2);
1095 acpi_data_push(tcpalog
, TPM_LOG_AREA_MINIMUM_SIZE
);
1099 build_tpm_ssdt(GArray
*table_data
, GArray
*linker
)
1103 tpm_ptr
= acpi_data_push(table_data
, sizeof(ssdt_tpm_aml
));
1104 memcpy(tpm_ptr
, ssdt_tpm_aml
, sizeof(ssdt_tpm_aml
));
1108 MEM_AFFINITY_NOFLAGS
= 0,
1109 MEM_AFFINITY_ENABLED
= (1 << 0),
1110 MEM_AFFINITY_HOTPLUGGABLE
= (1 << 1),
1111 MEM_AFFINITY_NON_VOLATILE
= (1 << 2),
1112 } MemoryAffinityFlags
;
1115 acpi_build_srat_memory(AcpiSratMemoryAffinity
*numamem
, uint64_t base
,
1116 uint64_t len
, int node
, MemoryAffinityFlags flags
)
1118 numamem
->type
= ACPI_SRAT_MEMORY
;
1119 numamem
->length
= sizeof(*numamem
);
1120 memset(numamem
->proximity
, 0, 4);
1121 numamem
->proximity
[0] = node
;
1122 numamem
->flags
= cpu_to_le32(flags
);
1123 numamem
->base_addr
= cpu_to_le64(base
);
1124 numamem
->range_length
= cpu_to_le64(len
);
1128 build_srat(GArray
*table_data
, GArray
*linker
, PcGuestInfo
*guest_info
)
1130 AcpiSystemResourceAffinityTable
*srat
;
1131 AcpiSratProcessorAffinity
*core
;
1132 AcpiSratMemoryAffinity
*numamem
;
1136 int srat_start
, numa_start
, slots
;
1137 uint64_t mem_len
, mem_base
, next_base
;
1138 PCMachineState
*pcms
= PC_MACHINE(qdev_get_machine());
1139 ram_addr_t hotplugabble_address_space_size
=
1140 object_property_get_int(OBJECT(pcms
), PC_MACHINE_MEMHP_REGION_SIZE
,
1143 srat_start
= table_data
->len
;
1145 srat
= acpi_data_push(table_data
, sizeof *srat
);
1146 srat
->reserved1
= cpu_to_le32(1);
1147 core
= (void *)(srat
+ 1);
1149 for (i
= 0; i
< guest_info
->apic_id_limit
; ++i
) {
1150 core
= acpi_data_push(table_data
, sizeof *core
);
1151 core
->type
= ACPI_SRAT_PROCESSOR
;
1152 core
->length
= sizeof(*core
);
1153 core
->local_apic_id
= i
;
1154 curnode
= guest_info
->node_cpu
[i
];
1155 core
->proximity_lo
= curnode
;
1156 memset(core
->proximity_hi
, 0, 3);
1157 core
->local_sapic_eid
= 0;
1158 core
->flags
= cpu_to_le32(1);
1162 /* the memory map is a bit tricky, it contains at least one hole
1163 * from 640k-1M and possibly another one from 3.5G-4G.
1166 numa_start
= table_data
->len
;
1168 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1169 acpi_build_srat_memory(numamem
, 0, 640*1024, 0, MEM_AFFINITY_ENABLED
);
1170 next_base
= 1024 * 1024;
1171 for (i
= 1; i
< guest_info
->numa_nodes
+ 1; ++i
) {
1172 mem_base
= next_base
;
1173 mem_len
= guest_info
->node_mem
[i
- 1];
1175 mem_len
-= 1024 * 1024;
1177 next_base
= mem_base
+ mem_len
;
1179 /* Cut out the ACPI_PCI hole */
1180 if (mem_base
<= guest_info
->ram_size_below_4g
&&
1181 next_base
> guest_info
->ram_size_below_4g
) {
1182 mem_len
-= next_base
- guest_info
->ram_size_below_4g
;
1184 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1185 acpi_build_srat_memory(numamem
, mem_base
, mem_len
, i
- 1,
1186 MEM_AFFINITY_ENABLED
);
1188 mem_base
= 1ULL << 32;
1189 mem_len
= next_base
- guest_info
->ram_size_below_4g
;
1190 next_base
+= (1ULL << 32) - guest_info
->ram_size_below_4g
;
1192 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1193 acpi_build_srat_memory(numamem
, mem_base
, mem_len
, i
- 1,
1194 MEM_AFFINITY_ENABLED
);
1196 slots
= (table_data
->len
- numa_start
) / sizeof *numamem
;
1197 for (; slots
< guest_info
->numa_nodes
+ 2; slots
++) {
1198 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1199 acpi_build_srat_memory(numamem
, 0, 0, 0, MEM_AFFINITY_NOFLAGS
);
1203 * Entry is required for Windows to enable memory hotplug in OS.
1204 * Memory devices may override proximity set by this entry,
1205 * providing _PXM method if necessary.
1207 if (hotplugabble_address_space_size
) {
1208 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1209 acpi_build_srat_memory(numamem
, pcms
->hotplug_memory_base
,
1210 hotplugabble_address_space_size
, 0,
1211 MEM_AFFINITY_HOTPLUGGABLE
|
1212 MEM_AFFINITY_ENABLED
);
1215 build_header(linker
, table_data
,
1216 (void *)(table_data
->data
+ srat_start
),
1218 table_data
->len
- srat_start
, 1);
1222 build_mcfg_q35(GArray
*table_data
, GArray
*linker
, AcpiMcfgInfo
*info
)
1224 AcpiTableMcfg
*mcfg
;
1226 int len
= sizeof(*mcfg
) + 1 * sizeof(mcfg
->allocation
[0]);
1228 mcfg
= acpi_data_push(table_data
, len
);
1229 mcfg
->allocation
[0].address
= cpu_to_le64(info
->mcfg_base
);
1230 /* Only a single allocation so no need to play with segments */
1231 mcfg
->allocation
[0].pci_segment
= cpu_to_le16(0);
1232 mcfg
->allocation
[0].start_bus_number
= 0;
1233 mcfg
->allocation
[0].end_bus_number
= PCIE_MMCFG_BUS(info
->mcfg_size
- 1);
1235 /* MCFG is used for ECAM which can be enabled or disabled by guest.
1236 * To avoid table size changes (which create migration issues),
1237 * always create the table even if there are no allocations,
1238 * but set the signature to a reserved value in this case.
1239 * ACPI spec requires OSPMs to ignore such tables.
1241 if (info
->mcfg_base
== PCIE_BASE_ADDR_UNMAPPED
) {
1242 /* Reserved signature: ignored by OSPM */
1247 build_header(linker
, table_data
, (void *)mcfg
, sig
, len
, 1);
1251 build_dmar_q35(GArray
*table_data
, GArray
*linker
)
1253 int dmar_start
= table_data
->len
;
1255 AcpiTableDmar
*dmar
;
1256 AcpiDmarHardwareUnit
*drhd
;
1258 dmar
= acpi_data_push(table_data
, sizeof(*dmar
));
1259 dmar
->host_address_width
= VTD_HOST_ADDRESS_WIDTH
- 1;
1260 dmar
->flags
= 0; /* No intr_remap for now */
1262 /* DMAR Remapping Hardware Unit Definition structure */
1263 drhd
= acpi_data_push(table_data
, sizeof(*drhd
));
1264 drhd
->type
= cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT
);
1265 drhd
->length
= cpu_to_le16(sizeof(*drhd
)); /* No device scope now */
1266 drhd
->flags
= ACPI_DMAR_INCLUDE_PCI_ALL
;
1267 drhd
->pci_segment
= cpu_to_le16(0);
1268 drhd
->address
= cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR
);
1270 build_header(linker
, table_data
, (void *)(table_data
->data
+ dmar_start
),
1271 "DMAR", table_data
->len
- dmar_start
, 1);
1275 build_dsdt(GArray
*table_data
, GArray
*linker
, AcpiMiscInfo
*misc
)
1277 AcpiTableHeader
*dsdt
;
1279 assert(misc
->dsdt_code
&& misc
->dsdt_size
);
1281 dsdt
= acpi_data_push(table_data
, misc
->dsdt_size
);
1282 memcpy(dsdt
, misc
->dsdt_code
, misc
->dsdt_size
);
1284 memset(dsdt
, 0, sizeof *dsdt
);
1285 build_header(linker
, table_data
, dsdt
, "DSDT",
1286 misc
->dsdt_size
, 1);
1289 /* Build final rsdt table */
1291 build_rsdt(GArray
*table_data
, GArray
*linker
, GArray
*table_offsets
)
1293 AcpiRsdtDescriptorRev1
*rsdt
;
1297 rsdt_len
= sizeof(*rsdt
) + sizeof(uint32_t) * table_offsets
->len
;
1298 rsdt
= acpi_data_push(table_data
, rsdt_len
);
1299 memcpy(rsdt
->table_offset_entry
, table_offsets
->data
,
1300 sizeof(uint32_t) * table_offsets
->len
);
1301 for (i
= 0; i
< table_offsets
->len
; ++i
) {
1302 /* rsdt->table_offset_entry to be filled by Guest linker */
1303 bios_linker_loader_add_pointer(linker
,
1304 ACPI_BUILD_TABLE_FILE
,
1305 ACPI_BUILD_TABLE_FILE
,
1306 table_data
, &rsdt
->table_offset_entry
[i
],
1309 build_header(linker
, table_data
,
1310 (void *)rsdt
, "RSDT", rsdt_len
, 1);
1314 build_rsdp(GArray
*rsdp_table
, GArray
*linker
, unsigned rsdt
)
1316 AcpiRsdpDescriptor
*rsdp
= acpi_data_push(rsdp_table
, sizeof *rsdp
);
1318 bios_linker_loader_alloc(linker
, ACPI_BUILD_RSDP_FILE
, 16,
1319 true /* fseg memory */);
1321 memcpy(&rsdp
->signature
, "RSD PTR ", 8);
1322 memcpy(rsdp
->oem_id
, ACPI_BUILD_APPNAME6
, 6);
1323 rsdp
->rsdt_physical_address
= cpu_to_le32(rsdt
);
1324 /* Address to be filled by Guest linker */
1325 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_RSDP_FILE
,
1326 ACPI_BUILD_TABLE_FILE
,
1327 rsdp_table
, &rsdp
->rsdt_physical_address
,
1328 sizeof rsdp
->rsdt_physical_address
);
1330 /* Checksum to be filled by Guest linker */
1331 bios_linker_loader_add_checksum(linker
, ACPI_BUILD_RSDP_FILE
,
1332 rsdp
, rsdp
, sizeof *rsdp
, &rsdp
->checksum
);
1338 struct AcpiBuildTables
{
1345 static inline void acpi_build_tables_init(AcpiBuildTables
*tables
)
1347 tables
->rsdp
= g_array_new(false, true /* clear */, 1);
1348 tables
->table_data
= g_array_new(false, true /* clear */, 1);
1349 tables
->tcpalog
= g_array_new(false, true /* clear */, 1);
1350 tables
->linker
= bios_linker_loader_init();
1353 static inline void acpi_build_tables_cleanup(AcpiBuildTables
*tables
, bool mfre
)
1355 void *linker_data
= bios_linker_loader_cleanup(tables
->linker
);
1356 g_free(linker_data
);
1357 g_array_free(tables
->rsdp
, mfre
);
1358 g_array_free(tables
->table_data
, true);
1359 g_array_free(tables
->tcpalog
, mfre
);
1363 struct AcpiBuildState
{
1364 /* Copy of table in RAM (for patching). */
1365 ram_addr_t table_ram
;
1366 uint32_t table_size
;
1367 /* Is table patched? */
1369 PcGuestInfo
*guest_info
;
1371 ram_addr_t linker_ram
;
1372 uint32_t linker_size
;
1375 static bool acpi_get_mcfg(AcpiMcfgInfo
*mcfg
)
1381 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
1382 g_assert(!ambiguous
);
1385 o
= object_property_get_qobject(pci_host
, PCIE_HOST_MCFG_BASE
, NULL
);
1389 mcfg
->mcfg_base
= qint_get_int(qobject_to_qint(o
));
1392 o
= object_property_get_qobject(pci_host
, PCIE_HOST_MCFG_SIZE
, NULL
);
1394 mcfg
->mcfg_size
= qint_get_int(qobject_to_qint(o
));
1399 static bool acpi_has_iommu(void)
1402 Object
*intel_iommu
;
1404 intel_iommu
= object_resolve_path_type("", TYPE_INTEL_IOMMU_DEVICE
,
1406 return intel_iommu
&& !ambiguous
;
1410 void acpi_build(PcGuestInfo
*guest_info
, AcpiBuildTables
*tables
)
1412 GArray
*table_offsets
;
1413 unsigned facs
, ssdt
, dsdt
, rsdt
;
1422 acpi_get_cpu_info(&cpu
);
1423 acpi_get_pm_info(&pm
);
1424 acpi_get_dsdt(&misc
);
1425 acpi_get_misc_info(&misc
);
1426 acpi_get_pci_info(&pci
);
1428 table_offsets
= g_array_new(false, true /* clear */,
1430 ACPI_BUILD_DPRINTF("init ACPI tables\n");
1432 bios_linker_loader_alloc(tables
->linker
, ACPI_BUILD_TABLE_FILE
,
1433 64 /* Ensure FACS is aligned */,
1434 false /* high memory */);
1437 * FACS is pointed to by FADT.
1438 * We place it first since it's the only table that has alignment
1441 facs
= tables
->table_data
->len
;
1442 build_facs(tables
->table_data
, tables
->linker
, guest_info
);
1444 /* DSDT is pointed to by FADT */
1445 dsdt
= tables
->table_data
->len
;
1446 build_dsdt(tables
->table_data
, tables
->linker
, &misc
);
1448 /* Count the size of the DSDT and SSDT, we will need it for legacy
1449 * sizing of ACPI tables.
1451 aml_len
+= tables
->table_data
->len
- dsdt
;
1453 /* ACPI tables pointed to by RSDT */
1454 acpi_add_table(table_offsets
, tables
->table_data
);
1455 build_fadt(tables
->table_data
, tables
->linker
, &pm
, facs
, dsdt
);
1457 ssdt
= tables
->table_data
->len
;
1458 acpi_add_table(table_offsets
, tables
->table_data
);
1459 build_ssdt(tables
->table_data
, tables
->linker
, &cpu
, &pm
, &misc
, &pci
,
1461 aml_len
+= tables
->table_data
->len
- ssdt
;
1463 acpi_add_table(table_offsets
, tables
->table_data
);
1464 build_madt(tables
->table_data
, tables
->linker
, &cpu
, guest_info
);
1466 if (misc
.has_hpet
) {
1467 acpi_add_table(table_offsets
, tables
->table_data
);
1468 build_hpet(tables
->table_data
, tables
->linker
);
1471 acpi_add_table(table_offsets
, tables
->table_data
);
1472 build_tpm_tcpa(tables
->table_data
, tables
->linker
, tables
->tcpalog
);
1474 acpi_add_table(table_offsets
, tables
->table_data
);
1475 build_tpm_ssdt(tables
->table_data
, tables
->linker
);
1477 if (guest_info
->numa_nodes
) {
1478 acpi_add_table(table_offsets
, tables
->table_data
);
1479 build_srat(tables
->table_data
, tables
->linker
, guest_info
);
1481 if (acpi_get_mcfg(&mcfg
)) {
1482 acpi_add_table(table_offsets
, tables
->table_data
);
1483 build_mcfg_q35(tables
->table_data
, tables
->linker
, &mcfg
);
1485 if (acpi_has_iommu()) {
1486 acpi_add_table(table_offsets
, tables
->table_data
);
1487 build_dmar_q35(tables
->table_data
, tables
->linker
);
1490 /* Add tables supplied by user (if any) */
1491 for (u
= acpi_table_first(); u
; u
= acpi_table_next(u
)) {
1492 unsigned len
= acpi_table_len(u
);
1494 acpi_add_table(table_offsets
, tables
->table_data
);
1495 g_array_append_vals(tables
->table_data
, u
, len
);
1498 /* RSDT is pointed to by RSDP */
1499 rsdt
= tables
->table_data
->len
;
1500 build_rsdt(tables
->table_data
, tables
->linker
, table_offsets
);
1502 /* RSDP is in FSEG memory, so allocate it separately */
1503 build_rsdp(tables
->rsdp
, tables
->linker
, rsdt
);
1505 /* We'll expose it all to Guest so we want to reduce
1506 * chance of size changes.
1508 * We used to align the tables to 4k, but of course this would
1509 * too simple to be enough. 4k turned out to be too small an
1510 * alignment very soon, and in fact it is almost impossible to
1511 * keep the table size stable for all (max_cpus, max_memory_slots)
1512 * combinations. So the table size is always 64k for pc-i440fx-2.1
1513 * and we give an error if the table grows beyond that limit.
1515 * We still have the problem of migrating from "-M pc-i440fx-2.0". For
1516 * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables
1517 * than 2.0 and we can always pad the smaller tables with zeros. We can
1518 * then use the exact size of the 2.0 tables.
1520 * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration.
1522 if (guest_info
->legacy_acpi_table_size
) {
1523 /* Subtracting aml_len gives the size of fixed tables. Then add the
1524 * size of the PIIX4 DSDT/SSDT in QEMU 2.0.
1526 int legacy_aml_len
=
1527 guest_info
->legacy_acpi_table_size
+
1528 ACPI_BUILD_LEGACY_CPU_AML_SIZE
* max_cpus
;
1529 int legacy_table_size
=
1530 ROUND_UP(tables
->table_data
->len
- aml_len
+ legacy_aml_len
,
1531 ACPI_BUILD_ALIGN_SIZE
);
1532 if (tables
->table_data
->len
> legacy_table_size
) {
1533 /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */
1534 error_report("Warning: migration may not work.");
1536 g_array_set_size(tables
->table_data
, legacy_table_size
);
1538 /* Make sure we have a buffer in case we need to resize the tables. */
1539 if (tables
->table_data
->len
> ACPI_BUILD_TABLE_SIZE
/ 2) {
1540 /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */
1541 error_report("Warning: ACPI tables are larger than 64k.");
1542 error_report("Warning: migration may not work.");
1543 error_report("Warning: please remove CPUs, NUMA nodes, "
1544 "memory slots or PCI bridges.");
1546 acpi_align_size(tables
->table_data
, ACPI_BUILD_TABLE_SIZE
);
1549 acpi_align_size(tables
->linker
, ACPI_BUILD_ALIGN_SIZE
);
1551 /* Cleanup memory that's no longer used. */
1552 g_array_free(table_offsets
, true);
1555 static void acpi_build_update(void *build_opaque
, uint32_t offset
)
1557 AcpiBuildState
*build_state
= build_opaque
;
1558 AcpiBuildTables tables
;
1560 /* No state to update or already patched? Nothing to do. */
1561 if (!build_state
|| build_state
->patched
) {
1564 build_state
->patched
= 1;
1566 acpi_build_tables_init(&tables
);
1568 acpi_build(build_state
->guest_info
, &tables
);
1570 assert(acpi_data_len(tables
.table_data
) == build_state
->table_size
);
1572 /* Make sure RAM size is correct - in case it got changed by migration */
1573 qemu_ram_resize(build_state
->table_ram
, build_state
->table_size
,
1576 memcpy(qemu_get_ram_ptr(build_state
->table_ram
), tables
.table_data
->data
,
1577 build_state
->table_size
);
1578 memcpy(build_state
->rsdp
, tables
.rsdp
->data
, acpi_data_len(tables
.rsdp
));
1579 memcpy(qemu_get_ram_ptr(build_state
->linker_ram
), tables
.linker
->data
,
1580 build_state
->linker_size
);
1582 cpu_physical_memory_set_dirty_range_nocode(build_state
->table_ram
,
1583 build_state
->table_size
);
1585 acpi_build_tables_cleanup(&tables
, true);
1588 static void acpi_build_reset(void *build_opaque
)
1590 AcpiBuildState
*build_state
= build_opaque
;
1591 build_state
->patched
= 0;
1594 static ram_addr_t
acpi_add_rom_blob(AcpiBuildState
*build_state
, GArray
*blob
,
1595 const char *name
, uint64_t max_size
)
1597 return rom_add_blob(name
, blob
->data
, acpi_data_len(blob
), max_size
, -1,
1598 name
, acpi_build_update
, build_state
);
1601 static const VMStateDescription vmstate_acpi_build
= {
1602 .name
= "acpi_build",
1604 .minimum_version_id
= 1,
1605 .fields
= (VMStateField
[]) {
1606 VMSTATE_UINT8(patched
, AcpiBuildState
),
1607 VMSTATE_END_OF_LIST()
1611 void acpi_setup(PcGuestInfo
*guest_info
)
1613 AcpiBuildTables tables
;
1614 AcpiBuildState
*build_state
;
1616 if (!guest_info
->fw_cfg
) {
1617 ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
1621 if (!guest_info
->has_acpi_build
) {
1622 ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
1626 if (!acpi_enabled
) {
1627 ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
1631 build_state
= g_malloc0(sizeof *build_state
);
1633 build_state
->guest_info
= guest_info
;
1635 acpi_set_pci_info();
1637 acpi_build_tables_init(&tables
);
1638 acpi_build(build_state
->guest_info
, &tables
);
1640 /* Now expose it all to Guest */
1641 build_state
->table_ram
= acpi_add_rom_blob(build_state
, tables
.table_data
,
1642 ACPI_BUILD_TABLE_FILE
,
1643 ACPI_BUILD_TABLE_MAX_SIZE
);
1644 assert(build_state
->table_ram
!= RAM_ADDR_MAX
);
1645 build_state
->table_size
= acpi_data_len(tables
.table_data
);
1647 build_state
->linker_ram
=
1648 acpi_add_rom_blob(build_state
, tables
.linker
, "etc/table-loader", 0);
1649 build_state
->linker_size
= acpi_data_len(tables
.linker
);
1651 fw_cfg_add_file(guest_info
->fw_cfg
, ACPI_BUILD_TPMLOG_FILE
,
1652 tables
.tcpalog
->data
, acpi_data_len(tables
.tcpalog
));
1655 * Though RSDP is small, its contents isn't immutable, so
1656 * update it along with the rest of tables on guest access.
1658 fw_cfg_add_file_callback(guest_info
->fw_cfg
, ACPI_BUILD_RSDP_FILE
,
1659 acpi_build_update
, build_state
,
1660 tables
.rsdp
->data
, acpi_data_len(tables
.rsdp
));
1662 build_state
->rsdp
= tables
.rsdp
->data
;
1664 qemu_register_reset(acpi_build_reset
, build_state
);
1665 acpi_build_reset(build_state
);
1666 vmstate_register(NULL
, 0, &vmstate_acpi_build
, build_state
);
1668 /* Cleanup tables but don't free the memory: we track it
1671 acpi_build_tables_cleanup(&tables
, false);