1 /* Support for generating ACPI tables and passing them to Guests
3 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
4 * Copyright (C) 2006 Fabrice Bellard
5 * Copyright (C) 2013 Red Hat Inc
7 * Author: Michael S. Tsirkin <mst@redhat.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "acpi-build.h"
26 #include "qemu-common.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/osdep.h"
29 #include "qemu/range.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/pci.h"
33 #include "hw/i386/pc.h"
34 #include "target-i386/cpu.h"
35 #include "hw/timer/hpet.h"
36 #include "hw/i386/acpi-defs.h"
37 #include "hw/acpi/acpi.h"
38 #include "hw/nvram/fw_cfg.h"
39 #include "hw/acpi/bios-linker-loader.h"
40 #include "hw/loader.h"
41 #include "hw/isa/isa.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "sysemu/tpm.h"
44 #include "hw/acpi/tpm.h"
46 /* Supported chipsets: */
47 #include "hw/acpi/piix4.h"
48 #include "hw/acpi/pcihp.h"
49 #include "hw/i386/ich9.h"
50 #include "hw/pci/pci_bus.h"
51 #include "hw/pci-host/q35.h"
52 #include "hw/i386/intel_iommu.h"
54 #include "hw/i386/q35-acpi-dsdt.hex"
55 #include "hw/i386/acpi-dsdt.hex"
57 #include "hw/acpi/aml-build.h"
59 #include "qapi/qmp/qint.h"
60 #include "qom/qom-qobject.h"
61 #include "exec/ram_addr.h"
63 /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
64 * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
65 * a little bit, there should be plenty of free space since the DSDT
66 * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
68 #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97
69 #define ACPI_BUILD_ALIGN_SIZE 0x1000
71 #define ACPI_BUILD_TABLE_SIZE 0x20000
73 /* Reserve RAM space for tables: add another order of magnitude. */
74 #define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
76 /* #define DEBUG_ACPI_BUILD */
77 #ifdef DEBUG_ACPI_BUILD
78 #define ACPI_BUILD_DPRINTF(fmt, ...) \
79 do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
81 #define ACPI_BUILD_DPRINTF(fmt, ...)
84 typedef struct AcpiCpuInfo
{
85 DECLARE_BITMAP(found_cpus
, ACPI_CPU_HOTPLUG_ID_LIMIT
);
88 typedef struct AcpiMcfgInfo
{
93 typedef struct AcpiPmInfo
{
99 uint8_t acpi_enable_cmd
;
100 uint8_t acpi_disable_cmd
;
102 uint32_t gpe0_blk_len
;
104 uint16_t cpu_hp_io_base
;
105 uint16_t cpu_hp_io_len
;
106 uint16_t mem_hp_io_base
;
107 uint16_t mem_hp_io_len
;
108 uint16_t pcihp_io_base
;
109 uint16_t pcihp_io_len
;
112 typedef struct AcpiMiscInfo
{
115 const unsigned char *dsdt_code
;
117 uint16_t pvpanic_port
;
118 uint16_t applesmc_io_base
;
121 typedef struct AcpiBuildPciBusHotplugState
{
122 GArray
*device_table
;
123 GArray
*notify_table
;
124 struct AcpiBuildPciBusHotplugState
*parent
;
125 bool pcihp_bridge_en
;
126 } AcpiBuildPciBusHotplugState
;
128 static void acpi_get_dsdt(AcpiMiscInfo
*info
)
130 Object
*piix
= piix4_pm_find();
131 Object
*lpc
= ich9_lpc_find();
132 assert(!!piix
!= !!lpc
);
135 info
->dsdt_code
= AcpiDsdtAmlCode
;
136 info
->dsdt_size
= sizeof AcpiDsdtAmlCode
;
139 info
->dsdt_code
= Q35AcpiDsdtAmlCode
;
140 info
->dsdt_size
= sizeof Q35AcpiDsdtAmlCode
;
145 int acpi_add_cpu_info(Object
*o
, void *opaque
)
147 AcpiCpuInfo
*cpu
= opaque
;
150 if (object_dynamic_cast(o
, TYPE_CPU
)) {
151 apic_id
= object_property_get_int(o
, "apic-id", NULL
);
152 assert(apic_id
< ACPI_CPU_HOTPLUG_ID_LIMIT
);
154 set_bit(apic_id
, cpu
->found_cpus
);
157 object_child_foreach(o
, acpi_add_cpu_info
, opaque
);
161 static void acpi_get_cpu_info(AcpiCpuInfo
*cpu
)
163 Object
*root
= object_get_root();
165 memset(cpu
->found_cpus
, 0, sizeof cpu
->found_cpus
);
166 object_child_foreach(root
, acpi_add_cpu_info
, cpu
);
169 static void acpi_get_pm_info(AcpiPmInfo
*pm
)
171 Object
*piix
= piix4_pm_find();
172 Object
*lpc
= ich9_lpc_find();
176 pm
->pcihp_io_base
= 0;
177 pm
->pcihp_io_len
= 0;
180 pm
->cpu_hp_io_base
= PIIX4_CPU_HOTPLUG_IO_BASE
;
182 object_property_get_int(obj
, ACPI_PCIHP_IO_BASE_PROP
, NULL
);
184 object_property_get_int(obj
, ACPI_PCIHP_IO_LEN_PROP
, NULL
);
188 pm
->cpu_hp_io_base
= ICH9_CPU_HOTPLUG_IO_BASE
;
192 pm
->cpu_hp_io_len
= ACPI_GPE_PROC_LEN
;
193 pm
->mem_hp_io_base
= ACPI_MEMORY_HOTPLUG_BASE
;
194 pm
->mem_hp_io_len
= ACPI_MEMORY_HOTPLUG_IO_LEN
;
196 /* Fill in optional s3/s4 related properties */
197 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S3_DISABLED
, NULL
);
199 pm
->s3_disabled
= qint_get_int(qobject_to_qint(o
));
201 pm
->s3_disabled
= false;
204 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S4_DISABLED
, NULL
);
206 pm
->s4_disabled
= qint_get_int(qobject_to_qint(o
));
208 pm
->s4_disabled
= false;
211 o
= object_property_get_qobject(obj
, ACPI_PM_PROP_S4_VAL
, NULL
);
213 pm
->s4_val
= qint_get_int(qobject_to_qint(o
));
219 /* Fill in mandatory properties */
220 pm
->sci_int
= object_property_get_int(obj
, ACPI_PM_PROP_SCI_INT
, NULL
);
222 pm
->acpi_enable_cmd
= object_property_get_int(obj
,
223 ACPI_PM_PROP_ACPI_ENABLE_CMD
,
225 pm
->acpi_disable_cmd
= object_property_get_int(obj
,
226 ACPI_PM_PROP_ACPI_DISABLE_CMD
,
228 pm
->io_base
= object_property_get_int(obj
, ACPI_PM_PROP_PM_IO_BASE
,
230 pm
->gpe0_blk
= object_property_get_int(obj
, ACPI_PM_PROP_GPE0_BLK
,
232 pm
->gpe0_blk_len
= object_property_get_int(obj
, ACPI_PM_PROP_GPE0_BLK_LEN
,
234 pm
->pcihp_bridge_en
=
235 object_property_get_bool(obj
, "acpi-pci-hotplug-with-bridge-support",
239 static void acpi_get_misc_info(AcpiMiscInfo
*info
)
241 info
->has_hpet
= hpet_find();
242 info
->has_tpm
= tpm_find();
243 info
->pvpanic_port
= pvpanic_port();
244 info
->applesmc_io_base
= applesmc_port();
247 static void acpi_get_pci_info(PcPciInfo
*info
)
252 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
253 g_assert(!ambiguous
);
256 info
->w32
.begin
= object_property_get_int(pci_host
,
257 PCI_HOST_PROP_PCI_HOLE_START
,
259 info
->w32
.end
= object_property_get_int(pci_host
,
260 PCI_HOST_PROP_PCI_HOLE_END
,
262 info
->w64
.begin
= object_property_get_int(pci_host
,
263 PCI_HOST_PROP_PCI_HOLE64_START
,
265 info
->w64
.end
= object_property_get_int(pci_host
,
266 PCI_HOST_PROP_PCI_HOLE64_END
,
270 #define ACPI_BUILD_APPNAME "Bochs"
271 #define ACPI_BUILD_APPNAME6 "BOCHS "
272 #define ACPI_BUILD_APPNAME4 "BXPC"
274 #define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
275 #define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
276 #define ACPI_BUILD_TPMLOG_FILE "etc/tpm/log"
279 build_header(GArray
*linker
, GArray
*table_data
,
280 AcpiTableHeader
*h
, const char *sig
, int len
, uint8_t rev
)
282 memcpy(&h
->signature
, sig
, 4);
283 h
->length
= cpu_to_le32(len
);
285 memcpy(h
->oem_id
, ACPI_BUILD_APPNAME6
, 6);
286 memcpy(h
->oem_table_id
, ACPI_BUILD_APPNAME4
, 4);
287 memcpy(h
->oem_table_id
+ 4, sig
, 4);
288 h
->oem_revision
= cpu_to_le32(1);
289 memcpy(h
->asl_compiler_id
, ACPI_BUILD_APPNAME4
, 4);
290 h
->asl_compiler_revision
= cpu_to_le32(1);
292 /* Checksum to be filled in by Guest linker */
293 bios_linker_loader_add_checksum(linker
, ACPI_BUILD_TABLE_FILE
,
294 table_data
->data
, h
, len
, &h
->checksum
);
298 #define ACPI_PORT_SMI_CMD 0x00b2 /* TODO: this is APM_CNT_IOPORT */
300 static inline void *acpi_data_push(GArray
*table_data
, unsigned size
)
302 unsigned off
= table_data
->len
;
303 g_array_set_size(table_data
, off
+ size
);
304 return table_data
->data
+ off
;
307 static unsigned acpi_data_len(GArray
*table
)
309 #if GLIB_CHECK_VERSION(2, 22, 0)
310 assert(g_array_get_element_size(table
) == 1);
315 static void acpi_align_size(GArray
*blob
, unsigned align
)
317 /* Align size to multiple of given size. This reduces the chance
318 * we need to change size in the future (breaking cross version migration).
320 g_array_set_size(blob
, ROUND_UP(acpi_data_len(blob
), align
));
323 static inline void acpi_add_table(GArray
*table_offsets
, GArray
*table_data
)
325 uint32_t offset
= cpu_to_le32(table_data
->len
);
326 g_array_append_val(table_offsets
, offset
);
331 build_facs(GArray
*table_data
, GArray
*linker
, PcGuestInfo
*guest_info
)
333 AcpiFacsDescriptorRev1
*facs
= acpi_data_push(table_data
, sizeof *facs
);
334 memcpy(&facs
->signature
, "FACS", 4);
335 facs
->length
= cpu_to_le32(sizeof(*facs
));
338 /* Load chipset information in FADT */
339 static void fadt_setup(AcpiFadtDescriptorRev1
*fadt
, AcpiPmInfo
*pm
)
343 fadt
->sci_int
= cpu_to_le16(pm
->sci_int
);
344 fadt
->smi_cmd
= cpu_to_le32(ACPI_PORT_SMI_CMD
);
345 fadt
->acpi_enable
= pm
->acpi_enable_cmd
;
346 fadt
->acpi_disable
= pm
->acpi_disable_cmd
;
347 /* EVT, CNT, TMR offset matches hw/acpi/core.c */
348 fadt
->pm1a_evt_blk
= cpu_to_le32(pm
->io_base
);
349 fadt
->pm1a_cnt_blk
= cpu_to_le32(pm
->io_base
+ 0x04);
350 fadt
->pm_tmr_blk
= cpu_to_le32(pm
->io_base
+ 0x08);
351 fadt
->gpe0_blk
= cpu_to_le32(pm
->gpe0_blk
);
352 /* EVT, CNT, TMR length matches hw/acpi/core.c */
353 fadt
->pm1_evt_len
= 4;
354 fadt
->pm1_cnt_len
= 2;
355 fadt
->pm_tmr_len
= 4;
356 fadt
->gpe0_blk_len
= pm
->gpe0_blk_len
;
357 fadt
->plvl2_lat
= cpu_to_le16(0xfff); /* C2 state not supported */
358 fadt
->plvl3_lat
= cpu_to_le16(0xfff); /* C3 state not supported */
359 fadt
->flags
= cpu_to_le32((1 << ACPI_FADT_F_WBINVD
) |
360 (1 << ACPI_FADT_F_PROC_C1
) |
361 (1 << ACPI_FADT_F_SLP_BUTTON
) |
362 (1 << ACPI_FADT_F_RTC_S4
));
363 fadt
->flags
|= cpu_to_le32(1 << ACPI_FADT_F_USE_PLATFORM_CLOCK
);
364 /* APIC destination mode ("Flat Logical") has an upper limit of 8 CPUs
365 * For more than 8 CPUs, "Clustered Logical" mode has to be used
368 fadt
->flags
|= cpu_to_le32(1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL
);
375 build_fadt(GArray
*table_data
, GArray
*linker
, AcpiPmInfo
*pm
,
376 unsigned facs
, unsigned dsdt
)
378 AcpiFadtDescriptorRev1
*fadt
= acpi_data_push(table_data
, sizeof(*fadt
));
380 fadt
->firmware_ctrl
= cpu_to_le32(facs
);
381 /* FACS address to be filled by Guest linker */
382 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
383 ACPI_BUILD_TABLE_FILE
,
384 table_data
, &fadt
->firmware_ctrl
,
385 sizeof fadt
->firmware_ctrl
);
387 fadt
->dsdt
= cpu_to_le32(dsdt
);
388 /* DSDT address to be filled by Guest linker */
389 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
390 ACPI_BUILD_TABLE_FILE
,
391 table_data
, &fadt
->dsdt
,
394 fadt_setup(fadt
, pm
);
396 build_header(linker
, table_data
,
397 (void *)fadt
, "FACP", sizeof(*fadt
), 1);
401 build_madt(GArray
*table_data
, GArray
*linker
, AcpiCpuInfo
*cpu
,
402 PcGuestInfo
*guest_info
)
404 int madt_start
= table_data
->len
;
406 AcpiMultipleApicTable
*madt
;
407 AcpiMadtIoApic
*io_apic
;
408 AcpiMadtIntsrcovr
*intsrcovr
;
409 AcpiMadtLocalNmi
*local_nmi
;
412 madt
= acpi_data_push(table_data
, sizeof *madt
);
413 madt
->local_apic_address
= cpu_to_le32(APIC_DEFAULT_ADDRESS
);
414 madt
->flags
= cpu_to_le32(1);
416 for (i
= 0; i
< guest_info
->apic_id_limit
; i
++) {
417 AcpiMadtProcessorApic
*apic
= acpi_data_push(table_data
, sizeof *apic
);
418 apic
->type
= ACPI_APIC_PROCESSOR
;
419 apic
->length
= sizeof(*apic
);
420 apic
->processor_id
= i
;
421 apic
->local_apic_id
= i
;
422 if (test_bit(i
, cpu
->found_cpus
)) {
423 apic
->flags
= cpu_to_le32(1);
425 apic
->flags
= cpu_to_le32(0);
428 io_apic
= acpi_data_push(table_data
, sizeof *io_apic
);
429 io_apic
->type
= ACPI_APIC_IO
;
430 io_apic
->length
= sizeof(*io_apic
);
431 #define ACPI_BUILD_IOAPIC_ID 0x0
432 io_apic
->io_apic_id
= ACPI_BUILD_IOAPIC_ID
;
433 io_apic
->address
= cpu_to_le32(IO_APIC_DEFAULT_ADDRESS
);
434 io_apic
->interrupt
= cpu_to_le32(0);
436 if (guest_info
->apic_xrupt_override
) {
437 intsrcovr
= acpi_data_push(table_data
, sizeof *intsrcovr
);
438 intsrcovr
->type
= ACPI_APIC_XRUPT_OVERRIDE
;
439 intsrcovr
->length
= sizeof(*intsrcovr
);
440 intsrcovr
->source
= 0;
441 intsrcovr
->gsi
= cpu_to_le32(2);
442 intsrcovr
->flags
= cpu_to_le16(0); /* conforms to bus specifications */
444 for (i
= 1; i
< 16; i
++) {
445 #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
446 if (!(ACPI_BUILD_PCI_IRQS
& (1 << i
))) {
447 /* No need for a INT source override structure. */
450 intsrcovr
= acpi_data_push(table_data
, sizeof *intsrcovr
);
451 intsrcovr
->type
= ACPI_APIC_XRUPT_OVERRIDE
;
452 intsrcovr
->length
= sizeof(*intsrcovr
);
453 intsrcovr
->source
= i
;
454 intsrcovr
->gsi
= cpu_to_le32(i
);
455 intsrcovr
->flags
= cpu_to_le16(0xd); /* active high, level triggered */
458 local_nmi
= acpi_data_push(table_data
, sizeof *local_nmi
);
459 local_nmi
->type
= ACPI_APIC_LOCAL_NMI
;
460 local_nmi
->length
= sizeof(*local_nmi
);
461 local_nmi
->processor_id
= 0xff; /* all processors */
462 local_nmi
->flags
= cpu_to_le16(0);
463 local_nmi
->lint
= 1; /* ACPI_LINT1 */
465 build_header(linker
, table_data
,
466 (void *)(table_data
->data
+ madt_start
), "APIC",
467 table_data
->len
- madt_start
, 1);
470 /* Encode a hex value */
471 static inline char acpi_get_hex(uint32_t val
)
474 return (val
<= 9) ? ('0' + val
) : ('A' + val
- 10);
478 #define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */
479 #define ACPI_SSDT_HEADER_LENGTH 36
481 #include "hw/i386/ssdt-tpm.hex"
484 /* Assign BSEL property to all buses. In the future, this can be changed
485 * to only assign to buses that support hotplug.
487 static void *acpi_set_bsel(PCIBus
*bus
, void *opaque
)
489 unsigned *bsel_alloc
= opaque
;
492 if (qbus_is_hotpluggable(BUS(bus
))) {
493 bus_bsel
= g_malloc(sizeof *bus_bsel
);
495 *bus_bsel
= (*bsel_alloc
)++;
496 object_property_add_uint32_ptr(OBJECT(bus
), ACPI_PCIHP_PROP_BSEL
,
503 static void acpi_set_pci_info(void)
505 PCIBus
*bus
= find_i440fx(); /* TODO: Q35 support */
506 unsigned bsel_alloc
= 0;
509 /* Scan all PCI buses. Set property to enable acpi based hotplug. */
510 pci_for_each_bus_depth_first(bus
, acpi_set_bsel
, NULL
, &bsel_alloc
);
514 static void build_append_pcihp_notify_entry(Aml
*method
, int slot
)
517 int32_t devfn
= PCI_DEVFN(slot
, 0);
519 if_ctx
= aml_if(aml_and(aml_arg(0), aml_int(0x1U
<< slot
)));
520 aml_append(if_ctx
, aml_notify(aml_name("S%.02X", devfn
), aml_arg(1)));
521 aml_append(method
, if_ctx
);
524 static void build_append_pci_bus_devices(Aml
*parent_scope
, PCIBus
*bus
,
525 bool pcihp_bridge_en
)
527 Aml
*dev
, *notify_method
, *method
;
532 bsel
= object_property_get_qobject(OBJECT(bus
), ACPI_PCIHP_PROP_BSEL
, NULL
);
534 int64_t bsel_val
= qint_get_int(qobject_to_qint(bsel
));
536 aml_append(parent_scope
, aml_name_decl("BSEL", aml_int(bsel_val
)));
537 notify_method
= aml_method("DVNT", 2);
540 for (i
= 0; i
< ARRAY_SIZE(bus
->devices
); i
+= PCI_FUNC_MAX
) {
543 PCIDevice
*pdev
= bus
->devices
[i
];
544 int slot
= PCI_SLOT(i
);
545 bool hotplug_enabled_dev
;
549 if (bsel
) { /* add hotplug slots for non present devices */
550 dev
= aml_device("S%.02X", PCI_DEVFN(slot
, 0));
551 aml_append(dev
, aml_name_decl("_SUN", aml_int(slot
)));
552 aml_append(dev
, aml_name_decl("_ADR", aml_int(slot
<< 16)));
553 method
= aml_method("_EJ0", 1);
555 aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
557 aml_append(dev
, method
);
558 aml_append(parent_scope
, dev
);
560 build_append_pcihp_notify_entry(notify_method
, slot
);
565 pc
= PCI_DEVICE_GET_CLASS(pdev
);
566 dc
= DEVICE_GET_CLASS(pdev
);
568 /* When hotplug for bridges is enabled, bridges are
569 * described in ACPI separately (see build_pci_bus_end).
570 * In this case they aren't themselves hot-pluggable.
571 * Hotplugged bridges *are* hot-pluggable.
573 bridge_in_acpi
= pc
->is_bridge
&& pcihp_bridge_en
&&
574 !DEVICE(pdev
)->hotplugged
;
576 hotplug_enabled_dev
= bsel
&& dc
->hotpluggable
&& !bridge_in_acpi
;
578 if (pc
->class_id
== PCI_CLASS_BRIDGE_ISA
) {
582 /* start to compose PCI slot descriptor */
583 dev
= aml_device("S%.02X", PCI_DEVFN(slot
, 0));
584 aml_append(dev
, aml_name_decl("_ADR", aml_int(slot
<< 16)));
586 if (pc
->class_id
== PCI_CLASS_DISPLAY_VGA
) {
587 /* add VGA specific AML methods */
590 if (object_dynamic_cast(OBJECT(pdev
), "qxl-vga")) {
596 method
= aml_method("_S1D", 0);
597 aml_append(method
, aml_return(aml_int(0)));
598 aml_append(dev
, method
);
600 method
= aml_method("_S2D", 0);
601 aml_append(method
, aml_return(aml_int(0)));
602 aml_append(dev
, method
);
604 method
= aml_method("_S3D", 0);
605 aml_append(method
, aml_return(aml_int(s3d
)));
606 aml_append(dev
, method
);
607 } else if (hotplug_enabled_dev
) {
608 /* add _SUN/_EJ0 to make slot hotpluggable */
609 aml_append(dev
, aml_name_decl("_SUN", aml_int(slot
)));
611 method
= aml_method("_EJ0", 1);
613 aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
615 aml_append(dev
, method
);
618 build_append_pcihp_notify_entry(notify_method
, slot
);
620 } else if (bridge_in_acpi
) {
622 * device is coldplugged bridge,
623 * add child device descriptions into its scope
625 PCIBus
*sec_bus
= pci_bridge_get_sec_bus(PCI_BRIDGE(pdev
));
627 build_append_pci_bus_devices(dev
, sec_bus
, pcihp_bridge_en
);
629 /* slot descriptor has been composed, add it into parent context */
630 aml_append(parent_scope
, dev
);
634 aml_append(parent_scope
, notify_method
);
637 /* Append PCNT method to notify about events on local and child buses.
638 * Add unconditionally for root since DSDT expects it.
640 method
= aml_method("PCNT", 0);
642 /* If bus supports hotplug select it and notify about local events */
644 int64_t bsel_val
= qint_get_int(qobject_to_qint(bsel
));
645 aml_append(method
, aml_store(aml_int(bsel_val
), aml_name("BNUM")));
647 aml_call2("DVNT", aml_name("PCIU"), aml_int(1) /* Device Check */)
650 aml_call2("DVNT", aml_name("PCID"), aml_int(3)/* Eject Request */)
654 /* Notify about child bus events in any case */
655 if (pcihp_bridge_en
) {
656 QLIST_FOREACH(sec
, &bus
->child
, sibling
) {
657 int32_t devfn
= sec
->parent_dev
->devfn
;
659 aml_append(method
, aml_name("^S%.02X.PCNT", devfn
));
662 aml_append(parent_scope
, method
);
666 build_ssdt(GArray
*table_data
, GArray
*linker
,
667 AcpiCpuInfo
*cpu
, AcpiPmInfo
*pm
, AcpiMiscInfo
*misc
,
668 PcPciInfo
*pci
, PcGuestInfo
*guest_info
)
670 MachineState
*machine
= MACHINE(qdev_get_machine());
671 uint32_t nr_mem
= machine
->ram_slots
;
672 unsigned acpi_cpus
= guest_info
->apic_id_limit
;
673 Aml
*ssdt
, *sb_scope
, *scope
, *pkg
, *dev
, *method
, *crs
, *field
, *ifctx
;
676 ssdt
= init_aml_allocator();
677 /* The current AML generator can cover the APIC ID range [0..255],
678 * inclusive, for VCPU hotplug. */
679 QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT
> 256);
680 g_assert(acpi_cpus
<= ACPI_CPU_HOTPLUG_ID_LIMIT
);
682 /* Reserve space for header */
683 acpi_data_push(ssdt
->buf
, sizeof(AcpiTableHeader
));
685 scope
= aml_scope("\\_SB.PCI0");
686 /* build PCI0._CRS */
687 crs
= aml_resource_template();
689 aml_word_bus_number(aml_min_fixed
, aml_max_fixed
, aml_pos_decode
,
690 0x0000, 0x0000, 0x00FF, 0x0000, 0x0100));
691 aml_append(crs
, aml_io(aml_decode16
, 0x0CF8, 0x0CF8, 0x01, 0x08));
694 aml_word_io(aml_min_fixed
, aml_max_fixed
,
695 aml_pos_decode
, aml_entire_range
,
696 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8));
698 aml_word_io(aml_min_fixed
, aml_max_fixed
,
699 aml_pos_decode
, aml_entire_range
,
700 0x0000, 0x0D00, 0xFFFF, 0x0000, 0xF300));
702 aml_dword_memory(aml_pos_decode
, aml_min_fixed
, aml_max_fixed
,
703 aml_cacheable
, aml_ReadWrite
,
704 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000));
706 aml_dword_memory(aml_pos_decode
, aml_min_fixed
, aml_max_fixed
,
707 aml_non_cacheable
, aml_ReadWrite
,
708 0, pci
->w32
.begin
, pci
->w32
.end
- 1, 0,
709 pci
->w32
.end
- pci
->w32
.begin
));
710 if (pci
->w64
.begin
) {
712 aml_qword_memory(aml_pos_decode
, aml_min_fixed
, aml_max_fixed
,
713 aml_cacheable
, aml_ReadWrite
,
714 0, pci
->w64
.begin
, pci
->w64
.end
- 1, 0,
715 pci
->w64
.end
- pci
->w64
.begin
));
717 aml_append(scope
, aml_name_decl("_CRS", crs
));
719 /* reserve GPE0 block resources */
720 dev
= aml_device("GPE0");
721 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0A06")));
722 aml_append(dev
, aml_name_decl("_UID", aml_string("GPE0 resources")));
723 /* device present, functioning, decoding, not shown in UI */
724 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
725 crs
= aml_resource_template();
727 aml_io(aml_decode16
, pm
->gpe0_blk
, pm
->gpe0_blk
, 1, pm
->gpe0_blk_len
)
729 aml_append(dev
, aml_name_decl("_CRS", crs
));
730 aml_append(scope
, dev
);
732 /* reserve PCIHP resources */
733 if (pm
->pcihp_io_len
) {
734 dev
= aml_device("PHPR");
735 aml_append(dev
, aml_name_decl("_HID", aml_string("PNP0A06")));
737 aml_name_decl("_UID", aml_string("PCI Hotplug resources")));
738 /* device present, functioning, decoding, not shown in UI */
739 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
740 crs
= aml_resource_template();
742 aml_io(aml_decode16
, pm
->pcihp_io_base
, pm
->pcihp_io_base
, 1,
745 aml_append(dev
, aml_name_decl("_CRS", crs
));
746 aml_append(scope
, dev
);
748 aml_append(ssdt
, scope
);
750 /* create S3_ / S4_ / S5_ packages if necessary */
751 scope
= aml_scope("\\");
752 if (!pm
->s3_disabled
) {
753 pkg
= aml_package(4);
754 aml_append(pkg
, aml_int(1)); /* PM1a_CNT.SLP_TYP */
755 aml_append(pkg
, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
756 aml_append(pkg
, aml_int(0)); /* reserved */
757 aml_append(pkg
, aml_int(0)); /* reserved */
758 aml_append(scope
, aml_name_decl("_S3", pkg
));
761 if (!pm
->s4_disabled
) {
762 pkg
= aml_package(4);
763 aml_append(pkg
, aml_int(pm
->s4_val
)); /* PM1a_CNT.SLP_TYP */
764 /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
765 aml_append(pkg
, aml_int(pm
->s4_val
));
766 aml_append(pkg
, aml_int(0)); /* reserved */
767 aml_append(pkg
, aml_int(0)); /* reserved */
768 aml_append(scope
, aml_name_decl("_S4", pkg
));
771 pkg
= aml_package(4);
772 aml_append(pkg
, aml_int(0)); /* PM1a_CNT.SLP_TYP */
773 aml_append(pkg
, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */
774 aml_append(pkg
, aml_int(0)); /* reserved */
775 aml_append(pkg
, aml_int(0)); /* reserved */
776 aml_append(scope
, aml_name_decl("_S5", pkg
));
777 aml_append(ssdt
, scope
);
779 if (misc
->applesmc_io_base
) {
780 scope
= aml_scope("\\_SB.PCI0.ISA");
781 dev
= aml_device("SMC");
783 aml_append(dev
, aml_name_decl("_HID", aml_eisaid("APP0001")));
784 /* device present, functioning, decoding, not shown in UI */
785 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
787 crs
= aml_resource_template();
789 aml_io(aml_decode16
, misc
->applesmc_io_base
, misc
->applesmc_io_base
,
790 0x01, APPLESMC_MAX_DATA_LENGTH
)
792 aml_append(crs
, aml_irq_no_flags(6));
793 aml_append(dev
, aml_name_decl("_CRS", crs
));
795 aml_append(scope
, dev
);
796 aml_append(ssdt
, scope
);
799 if (misc
->pvpanic_port
) {
800 scope
= aml_scope("\\_SB.PCI0.ISA");
802 dev
= aml_device("PEVR");
803 aml_append(dev
, aml_name_decl("_HID", aml_string("QEMU0002")));
805 crs
= aml_resource_template();
807 aml_io(aml_decode16
, misc
->pvpanic_port
, misc
->pvpanic_port
, 1, 1)
809 aml_append(dev
, aml_name_decl("_CRS", crs
));
811 aml_append(dev
, aml_operation_region("PEOR", aml_system_io
,
812 misc
->pvpanic_port
, 1));
813 field
= aml_field("PEOR", aml_byte_acc
);
814 aml_append(field
, aml_named_field("PEPT", 8));
815 aml_append(dev
, field
);
817 method
= aml_method("RDPT", 0);
818 aml_append(method
, aml_store(aml_name("PEPT"), aml_local(0)));
819 aml_append(method
, aml_return(aml_local(0)));
820 aml_append(dev
, method
);
822 method
= aml_method("WRPT", 1);
823 aml_append(method
, aml_store(aml_arg(0), aml_name("PEPT")));
824 aml_append(dev
, method
);
826 aml_append(scope
, dev
);
827 aml_append(ssdt
, scope
);
830 sb_scope
= aml_scope("_SB");
832 /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */
833 dev
= aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE
));
834 aml_append(dev
, aml_name_decl("_HID", aml_eisaid("PNP0A06")));
836 aml_name_decl("_UID", aml_string("CPU Hotplug resources"))
838 /* device present, functioning, decoding, not shown in UI */
839 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
840 crs
= aml_resource_template();
842 aml_io(aml_decode16
, pm
->cpu_hp_io_base
, pm
->cpu_hp_io_base
, 1,
845 aml_append(dev
, aml_name_decl("_CRS", crs
));
846 aml_append(sb_scope
, dev
);
847 /* declare CPU hotplug MMIO region and PRS field to access it */
848 aml_append(sb_scope
, aml_operation_region(
849 "PRST", aml_system_io
, pm
->cpu_hp_io_base
, pm
->cpu_hp_io_len
));
850 field
= aml_field("PRST", aml_byte_acc
);
851 aml_append(field
, aml_named_field("PRS", 256));
852 aml_append(sb_scope
, field
);
854 /* build Processor object for each processor */
855 for (i
= 0; i
< acpi_cpus
; i
++) {
856 dev
= aml_processor(i
, 0, 0, "CP%.02X", i
);
858 method
= aml_method("_MAT", 0);
859 aml_append(method
, aml_return(aml_call1("CPMA", aml_int(i
))));
860 aml_append(dev
, method
);
862 method
= aml_method("_STA", 0);
863 aml_append(method
, aml_return(aml_call1("CPST", aml_int(i
))));
864 aml_append(dev
, method
);
866 method
= aml_method("_EJ0", 1);
868 aml_return(aml_call2("CPEJ", aml_int(i
), aml_arg(0)))
870 aml_append(dev
, method
);
872 aml_append(sb_scope
, dev
);
876 * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...}
878 /* Arg0 = Processor ID = APIC ID */
879 method
= aml_method("NTFY", 2);
880 for (i
= 0; i
< acpi_cpus
; i
++) {
881 ifctx
= aml_if(aml_equal(aml_arg(0), aml_int(i
)));
883 aml_notify(aml_name("CP%.02X", i
), aml_arg(1))
885 aml_append(method
, ifctx
);
887 aml_append(sb_scope
, method
);
889 /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })"
891 * Note: The ability to create variable-sized packages was first
892 * ntroduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages
893 * ith up to 255 elements. Windows guests up to win2k8 fail when
894 * VarPackageOp is used.
896 pkg
= acpi_cpus
<= 255 ? aml_package(acpi_cpus
) :
897 aml_varpackage(acpi_cpus
);
899 for (i
= 0; i
< acpi_cpus
; i
++) {
900 uint8_t b
= test_bit(i
, cpu
->found_cpus
) ? 0x01 : 0x00;
901 aml_append(pkg
, aml_int(b
));
903 aml_append(sb_scope
, aml_name_decl("CPON", pkg
));
905 /* build memory devices */
906 assert(nr_mem
<= ACPI_MAX_RAM_SLOTS
);
907 scope
= aml_scope("\\_SB.PCI0." stringify(MEMORY_HOTPLUG_DEVICE
));
909 aml_name_decl(stringify(MEMORY_SLOTS_NUMBER
), aml_int(nr_mem
))
912 crs
= aml_resource_template();
914 aml_io(aml_decode16
, pm
->mem_hp_io_base
, pm
->mem_hp_io_base
, 0,
917 aml_append(scope
, aml_name_decl("_CRS", crs
));
919 aml_append(scope
, aml_operation_region(
920 stringify(MEMORY_HOTPLUG_IO_REGION
), aml_system_io
,
921 pm
->mem_hp_io_base
, pm
->mem_hp_io_len
)
924 field
= aml_field(stringify(MEMORY_HOTPLUG_IO_REGION
), aml_dword_acc
);
925 aml_append(field
, /* read only */
926 aml_named_field(stringify(MEMORY_SLOT_ADDR_LOW
), 32));
927 aml_append(field
, /* read only */
928 aml_named_field(stringify(MEMORY_SLOT_ADDR_HIGH
), 32));
929 aml_append(field
, /* read only */
930 aml_named_field(stringify(MEMORY_SLOT_SIZE_LOW
), 32));
931 aml_append(field
, /* read only */
932 aml_named_field(stringify(MEMORY_SLOT_SIZE_HIGH
), 32));
933 aml_append(field
, /* read only */
934 aml_named_field(stringify(MEMORY_SLOT_PROXIMITY
), 32));
935 aml_append(scope
, field
);
937 field
= aml_field(stringify(MEMORY_HOTPLUG_IO_REGION
), aml_byte_acc
);
938 aml_append(field
, aml_reserved_field(160 /* bits, Offset(20) */));
939 aml_append(field
, /* 1 if enabled, read only */
940 aml_named_field(stringify(MEMORY_SLOT_ENABLED
), 1));
942 /*(read) 1 if has a insert event. (write) 1 to clear event */
943 aml_named_field(stringify(MEMORY_SLOT_INSERT_EVENT
), 1));
944 aml_append(scope
, field
);
946 field
= aml_field(stringify(MEMORY_HOTPLUG_IO_REGION
), aml_dword_acc
);
947 aml_append(field
, /* DIMM selector, write only */
948 aml_named_field(stringify(MEMORY_SLOT_SLECTOR
), 32));
949 aml_append(field
, /* _OST event code, write only */
950 aml_named_field(stringify(MEMORY_SLOT_OST_EVENT
), 32));
951 aml_append(field
, /* _OST status code, write only */
952 aml_named_field(stringify(MEMORY_SLOT_OST_STATUS
), 32));
953 aml_append(scope
, field
);
955 aml_append(sb_scope
, scope
);
957 for (i
= 0; i
< nr_mem
; i
++) {
958 #define BASEPATH "\\_SB.PCI0." stringify(MEMORY_HOTPLUG_DEVICE) "."
961 dev
= aml_device("MP%02X", i
);
962 aml_append(dev
, aml_name_decl("_UID", aml_string("0x%02X", i
)));
963 aml_append(dev
, aml_name_decl("_HID", aml_eisaid("PNP0C80")));
965 method
= aml_method("_CRS", 0);
966 s
= BASEPATH
stringify(MEMORY_SLOT_CRS_METHOD
);
967 aml_append(method
, aml_return(aml_call1(s
, aml_name("_UID"))));
968 aml_append(dev
, method
);
970 method
= aml_method("_STA", 0);
971 s
= BASEPATH
stringify(MEMORY_SLOT_STATUS_METHOD
);
972 aml_append(method
, aml_return(aml_call1(s
, aml_name("_UID"))));
973 aml_append(dev
, method
);
975 method
= aml_method("_PXM", 0);
976 s
= BASEPATH
stringify(MEMORY_SLOT_PROXIMITY_METHOD
);
977 aml_append(method
, aml_return(aml_call1(s
, aml_name("_UID"))));
978 aml_append(dev
, method
);
980 method
= aml_method("_OST", 3);
981 s
= BASEPATH
stringify(MEMORY_SLOT_OST_METHOD
);
982 aml_append(method
, aml_return(aml_call4(
983 s
, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2)
985 aml_append(dev
, method
);
987 aml_append(sb_scope
, dev
);
990 /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
991 * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ...
993 method
= aml_method(stringify(MEMORY_SLOT_NOTIFY_METHOD
), 2);
994 for (i
= 0; i
< nr_mem
; i
++) {
995 ifctx
= aml_if(aml_equal(aml_arg(0), aml_int(i
)));
997 aml_notify(aml_name("MP%.02X", i
), aml_arg(1))
999 aml_append(method
, ifctx
);
1001 aml_append(sb_scope
, method
);
1008 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
1009 if (!ambiguous
&& pci_host
) {
1010 bus
= PCI_HOST_BRIDGE(pci_host
)->bus
;
1014 Aml
*scope
= aml_scope("PCI0");
1015 /* Scan all PCI buses. Generate tables to support hotplug. */
1016 build_append_pci_bus_devices(scope
, bus
, pm
->pcihp_bridge_en
);
1017 aml_append(sb_scope
, scope
);
1020 aml_append(ssdt
, sb_scope
);
1023 /* copy AML table into ACPI tables blob and patch header there */
1024 g_array_append_vals(table_data
, ssdt
->buf
->data
, ssdt
->buf
->len
);
1025 build_header(linker
, table_data
,
1026 (void *)(table_data
->data
+ table_data
->len
- ssdt
->buf
->len
),
1027 "SSDT", ssdt
->buf
->len
, 1);
1028 free_aml_allocator();
1032 build_hpet(GArray
*table_data
, GArray
*linker
)
1036 hpet
= acpi_data_push(table_data
, sizeof(*hpet
));
1037 /* Note timer_block_id value must be kept in sync with value advertised by
1040 hpet
->timer_block_id
= cpu_to_le32(0x8086a201);
1041 hpet
->addr
.address
= cpu_to_le64(HPET_BASE
);
1042 build_header(linker
, table_data
,
1043 (void *)hpet
, "HPET", sizeof(*hpet
), 1);
1047 build_tpm_tcpa(GArray
*table_data
, GArray
*linker
, GArray
*tcpalog
)
1049 Acpi20Tcpa
*tcpa
= acpi_data_push(table_data
, sizeof *tcpa
);
1050 uint64_t log_area_start_address
= acpi_data_len(tcpalog
);
1052 tcpa
->platform_class
= cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT
);
1053 tcpa
->log_area_minimum_length
= cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE
);
1054 tcpa
->log_area_start_address
= cpu_to_le64(log_area_start_address
);
1056 bios_linker_loader_alloc(linker
, ACPI_BUILD_TPMLOG_FILE
, 1,
1057 false /* high memory */);
1059 /* log area start address to be filled by Guest linker */
1060 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
1061 ACPI_BUILD_TPMLOG_FILE
,
1062 table_data
, &tcpa
->log_area_start_address
,
1063 sizeof(tcpa
->log_area_start_address
));
1065 build_header(linker
, table_data
,
1066 (void *)tcpa
, "TCPA", sizeof(*tcpa
), 2);
1068 acpi_data_push(tcpalog
, TPM_LOG_AREA_MINIMUM_SIZE
);
1072 build_tpm_ssdt(GArray
*table_data
, GArray
*linker
)
1076 tpm_ptr
= acpi_data_push(table_data
, sizeof(ssdt_tpm_aml
));
1077 memcpy(tpm_ptr
, ssdt_tpm_aml
, sizeof(ssdt_tpm_aml
));
1081 MEM_AFFINITY_NOFLAGS
= 0,
1082 MEM_AFFINITY_ENABLED
= (1 << 0),
1083 MEM_AFFINITY_HOTPLUGGABLE
= (1 << 1),
1084 MEM_AFFINITY_NON_VOLATILE
= (1 << 2),
1085 } MemoryAffinityFlags
;
1088 acpi_build_srat_memory(AcpiSratMemoryAffinity
*numamem
, uint64_t base
,
1089 uint64_t len
, int node
, MemoryAffinityFlags flags
)
1091 numamem
->type
= ACPI_SRAT_MEMORY
;
1092 numamem
->length
= sizeof(*numamem
);
1093 memset(numamem
->proximity
, 0, 4);
1094 numamem
->proximity
[0] = node
;
1095 numamem
->flags
= cpu_to_le32(flags
);
1096 numamem
->base_addr
= cpu_to_le64(base
);
1097 numamem
->range_length
= cpu_to_le64(len
);
1101 build_srat(GArray
*table_data
, GArray
*linker
, PcGuestInfo
*guest_info
)
1103 AcpiSystemResourceAffinityTable
*srat
;
1104 AcpiSratProcessorAffinity
*core
;
1105 AcpiSratMemoryAffinity
*numamem
;
1109 int srat_start
, numa_start
, slots
;
1110 uint64_t mem_len
, mem_base
, next_base
;
1111 PCMachineState
*pcms
= PC_MACHINE(qdev_get_machine());
1112 ram_addr_t hotplugabble_address_space_size
=
1113 object_property_get_int(OBJECT(pcms
), PC_MACHINE_MEMHP_REGION_SIZE
,
1116 srat_start
= table_data
->len
;
1118 srat
= acpi_data_push(table_data
, sizeof *srat
);
1119 srat
->reserved1
= cpu_to_le32(1);
1120 core
= (void *)(srat
+ 1);
1122 for (i
= 0; i
< guest_info
->apic_id_limit
; ++i
) {
1123 core
= acpi_data_push(table_data
, sizeof *core
);
1124 core
->type
= ACPI_SRAT_PROCESSOR
;
1125 core
->length
= sizeof(*core
);
1126 core
->local_apic_id
= i
;
1127 curnode
= guest_info
->node_cpu
[i
];
1128 core
->proximity_lo
= curnode
;
1129 memset(core
->proximity_hi
, 0, 3);
1130 core
->local_sapic_eid
= 0;
1131 core
->flags
= cpu_to_le32(1);
1135 /* the memory map is a bit tricky, it contains at least one hole
1136 * from 640k-1M and possibly another one from 3.5G-4G.
1139 numa_start
= table_data
->len
;
1141 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1142 acpi_build_srat_memory(numamem
, 0, 640*1024, 0, MEM_AFFINITY_ENABLED
);
1143 next_base
= 1024 * 1024;
1144 for (i
= 1; i
< guest_info
->numa_nodes
+ 1; ++i
) {
1145 mem_base
= next_base
;
1146 mem_len
= guest_info
->node_mem
[i
- 1];
1148 mem_len
-= 1024 * 1024;
1150 next_base
= mem_base
+ mem_len
;
1152 /* Cut out the ACPI_PCI hole */
1153 if (mem_base
<= guest_info
->ram_size_below_4g
&&
1154 next_base
> guest_info
->ram_size_below_4g
) {
1155 mem_len
-= next_base
- guest_info
->ram_size_below_4g
;
1157 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1158 acpi_build_srat_memory(numamem
, mem_base
, mem_len
, i
- 1,
1159 MEM_AFFINITY_ENABLED
);
1161 mem_base
= 1ULL << 32;
1162 mem_len
= next_base
- guest_info
->ram_size_below_4g
;
1163 next_base
+= (1ULL << 32) - guest_info
->ram_size_below_4g
;
1165 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1166 acpi_build_srat_memory(numamem
, mem_base
, mem_len
, i
- 1,
1167 MEM_AFFINITY_ENABLED
);
1169 slots
= (table_data
->len
- numa_start
) / sizeof *numamem
;
1170 for (; slots
< guest_info
->numa_nodes
+ 2; slots
++) {
1171 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1172 acpi_build_srat_memory(numamem
, 0, 0, 0, MEM_AFFINITY_NOFLAGS
);
1176 * Entry is required for Windows to enable memory hotplug in OS.
1177 * Memory devices may override proximity set by this entry,
1178 * providing _PXM method if necessary.
1180 if (hotplugabble_address_space_size
) {
1181 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
1182 acpi_build_srat_memory(numamem
, pcms
->hotplug_memory_base
,
1183 hotplugabble_address_space_size
, 0,
1184 MEM_AFFINITY_HOTPLUGGABLE
|
1185 MEM_AFFINITY_ENABLED
);
1188 build_header(linker
, table_data
,
1189 (void *)(table_data
->data
+ srat_start
),
1191 table_data
->len
- srat_start
, 1);
1195 build_mcfg_q35(GArray
*table_data
, GArray
*linker
, AcpiMcfgInfo
*info
)
1197 AcpiTableMcfg
*mcfg
;
1199 int len
= sizeof(*mcfg
) + 1 * sizeof(mcfg
->allocation
[0]);
1201 mcfg
= acpi_data_push(table_data
, len
);
1202 mcfg
->allocation
[0].address
= cpu_to_le64(info
->mcfg_base
);
1203 /* Only a single allocation so no need to play with segments */
1204 mcfg
->allocation
[0].pci_segment
= cpu_to_le16(0);
1205 mcfg
->allocation
[0].start_bus_number
= 0;
1206 mcfg
->allocation
[0].end_bus_number
= PCIE_MMCFG_BUS(info
->mcfg_size
- 1);
1208 /* MCFG is used for ECAM which can be enabled or disabled by guest.
1209 * To avoid table size changes (which create migration issues),
1210 * always create the table even if there are no allocations,
1211 * but set the signature to a reserved value in this case.
1212 * ACPI spec requires OSPMs to ignore such tables.
1214 if (info
->mcfg_base
== PCIE_BASE_ADDR_UNMAPPED
) {
1215 /* Reserved signature: ignored by OSPM */
1220 build_header(linker
, table_data
, (void *)mcfg
, sig
, len
, 1);
1224 build_dmar_q35(GArray
*table_data
, GArray
*linker
)
1226 int dmar_start
= table_data
->len
;
1228 AcpiTableDmar
*dmar
;
1229 AcpiDmarHardwareUnit
*drhd
;
1231 dmar
= acpi_data_push(table_data
, sizeof(*dmar
));
1232 dmar
->host_address_width
= VTD_HOST_ADDRESS_WIDTH
- 1;
1233 dmar
->flags
= 0; /* No intr_remap for now */
1235 /* DMAR Remapping Hardware Unit Definition structure */
1236 drhd
= acpi_data_push(table_data
, sizeof(*drhd
));
1237 drhd
->type
= cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT
);
1238 drhd
->length
= cpu_to_le16(sizeof(*drhd
)); /* No device scope now */
1239 drhd
->flags
= ACPI_DMAR_INCLUDE_PCI_ALL
;
1240 drhd
->pci_segment
= cpu_to_le16(0);
1241 drhd
->address
= cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR
);
1243 build_header(linker
, table_data
, (void *)(table_data
->data
+ dmar_start
),
1244 "DMAR", table_data
->len
- dmar_start
, 1);
1248 build_dsdt(GArray
*table_data
, GArray
*linker
, AcpiMiscInfo
*misc
)
1250 AcpiTableHeader
*dsdt
;
1252 assert(misc
->dsdt_code
&& misc
->dsdt_size
);
1254 dsdt
= acpi_data_push(table_data
, misc
->dsdt_size
);
1255 memcpy(dsdt
, misc
->dsdt_code
, misc
->dsdt_size
);
1257 memset(dsdt
, 0, sizeof *dsdt
);
1258 build_header(linker
, table_data
, dsdt
, "DSDT",
1259 misc
->dsdt_size
, 1);
1262 /* Build final rsdt table */
1264 build_rsdt(GArray
*table_data
, GArray
*linker
, GArray
*table_offsets
)
1266 AcpiRsdtDescriptorRev1
*rsdt
;
1270 rsdt_len
= sizeof(*rsdt
) + sizeof(uint32_t) * table_offsets
->len
;
1271 rsdt
= acpi_data_push(table_data
, rsdt_len
);
1272 memcpy(rsdt
->table_offset_entry
, table_offsets
->data
,
1273 sizeof(uint32_t) * table_offsets
->len
);
1274 for (i
= 0; i
< table_offsets
->len
; ++i
) {
1275 /* rsdt->table_offset_entry to be filled by Guest linker */
1276 bios_linker_loader_add_pointer(linker
,
1277 ACPI_BUILD_TABLE_FILE
,
1278 ACPI_BUILD_TABLE_FILE
,
1279 table_data
, &rsdt
->table_offset_entry
[i
],
1282 build_header(linker
, table_data
,
1283 (void *)rsdt
, "RSDT", rsdt_len
, 1);
1287 build_rsdp(GArray
*rsdp_table
, GArray
*linker
, unsigned rsdt
)
1289 AcpiRsdpDescriptor
*rsdp
= acpi_data_push(rsdp_table
, sizeof *rsdp
);
1291 bios_linker_loader_alloc(linker
, ACPI_BUILD_RSDP_FILE
, 16,
1292 true /* fseg memory */);
1294 memcpy(&rsdp
->signature
, "RSD PTR ", 8);
1295 memcpy(rsdp
->oem_id
, ACPI_BUILD_APPNAME6
, 6);
1296 rsdp
->rsdt_physical_address
= cpu_to_le32(rsdt
);
1297 /* Address to be filled by Guest linker */
1298 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_RSDP_FILE
,
1299 ACPI_BUILD_TABLE_FILE
,
1300 rsdp_table
, &rsdp
->rsdt_physical_address
,
1301 sizeof rsdp
->rsdt_physical_address
);
1303 /* Checksum to be filled by Guest linker */
1304 bios_linker_loader_add_checksum(linker
, ACPI_BUILD_RSDP_FILE
,
1305 rsdp
, rsdp
, sizeof *rsdp
, &rsdp
->checksum
);
1311 struct AcpiBuildTables
{
1318 static inline void acpi_build_tables_init(AcpiBuildTables
*tables
)
1320 tables
->rsdp
= g_array_new(false, true /* clear */, 1);
1321 tables
->table_data
= g_array_new(false, true /* clear */, 1);
1322 tables
->tcpalog
= g_array_new(false, true /* clear */, 1);
1323 tables
->linker
= bios_linker_loader_init();
1326 static inline void acpi_build_tables_cleanup(AcpiBuildTables
*tables
, bool mfre
)
1328 void *linker_data
= bios_linker_loader_cleanup(tables
->linker
);
1329 g_free(linker_data
);
1330 g_array_free(tables
->rsdp
, true);
1331 g_array_free(tables
->table_data
, true);
1332 g_array_free(tables
->tcpalog
, mfre
);
1336 struct AcpiBuildState
{
1337 /* Copy of table in RAM (for patching). */
1338 ram_addr_t table_ram
;
1339 /* Is table patched? */
1341 PcGuestInfo
*guest_info
;
1343 ram_addr_t rsdp_ram
;
1344 ram_addr_t linker_ram
;
1347 static bool acpi_get_mcfg(AcpiMcfgInfo
*mcfg
)
1353 pci_host
= object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE
, &ambiguous
);
1354 g_assert(!ambiguous
);
1357 o
= object_property_get_qobject(pci_host
, PCIE_HOST_MCFG_BASE
, NULL
);
1361 mcfg
->mcfg_base
= qint_get_int(qobject_to_qint(o
));
1364 o
= object_property_get_qobject(pci_host
, PCIE_HOST_MCFG_SIZE
, NULL
);
1366 mcfg
->mcfg_size
= qint_get_int(qobject_to_qint(o
));
1371 static bool acpi_has_iommu(void)
1374 Object
*intel_iommu
;
1376 intel_iommu
= object_resolve_path_type("", TYPE_INTEL_IOMMU_DEVICE
,
1378 return intel_iommu
&& !ambiguous
;
1382 void acpi_build(PcGuestInfo
*guest_info
, AcpiBuildTables
*tables
)
1384 GArray
*table_offsets
;
1385 unsigned facs
, ssdt
, dsdt
, rsdt
;
1393 GArray
*tables_blob
= tables
->table_data
;
1395 acpi_get_cpu_info(&cpu
);
1396 acpi_get_pm_info(&pm
);
1397 acpi_get_dsdt(&misc
);
1398 acpi_get_misc_info(&misc
);
1399 acpi_get_pci_info(&pci
);
1401 table_offsets
= g_array_new(false, true /* clear */,
1403 ACPI_BUILD_DPRINTF("init ACPI tables\n");
1405 bios_linker_loader_alloc(tables
->linker
, ACPI_BUILD_TABLE_FILE
,
1406 64 /* Ensure FACS is aligned */,
1407 false /* high memory */);
1410 * FACS is pointed to by FADT.
1411 * We place it first since it's the only table that has alignment
1414 facs
= tables_blob
->len
;
1415 build_facs(tables_blob
, tables
->linker
, guest_info
);
1417 /* DSDT is pointed to by FADT */
1418 dsdt
= tables_blob
->len
;
1419 build_dsdt(tables_blob
, tables
->linker
, &misc
);
1421 /* Count the size of the DSDT and SSDT, we will need it for legacy
1422 * sizing of ACPI tables.
1424 aml_len
+= tables_blob
->len
- dsdt
;
1426 /* ACPI tables pointed to by RSDT */
1427 acpi_add_table(table_offsets
, tables_blob
);
1428 build_fadt(tables_blob
, tables
->linker
, &pm
, facs
, dsdt
);
1430 ssdt
= tables_blob
->len
;
1431 acpi_add_table(table_offsets
, tables_blob
);
1432 build_ssdt(tables_blob
, tables
->linker
, &cpu
, &pm
, &misc
, &pci
,
1434 aml_len
+= tables_blob
->len
- ssdt
;
1436 acpi_add_table(table_offsets
, tables_blob
);
1437 build_madt(tables_blob
, tables
->linker
, &cpu
, guest_info
);
1439 if (misc
.has_hpet
) {
1440 acpi_add_table(table_offsets
, tables_blob
);
1441 build_hpet(tables_blob
, tables
->linker
);
1444 acpi_add_table(table_offsets
, tables_blob
);
1445 build_tpm_tcpa(tables_blob
, tables
->linker
, tables
->tcpalog
);
1447 acpi_add_table(table_offsets
, tables_blob
);
1448 build_tpm_ssdt(tables_blob
, tables
->linker
);
1450 if (guest_info
->numa_nodes
) {
1451 acpi_add_table(table_offsets
, tables_blob
);
1452 build_srat(tables_blob
, tables
->linker
, guest_info
);
1454 if (acpi_get_mcfg(&mcfg
)) {
1455 acpi_add_table(table_offsets
, tables_blob
);
1456 build_mcfg_q35(tables_blob
, tables
->linker
, &mcfg
);
1458 if (acpi_has_iommu()) {
1459 acpi_add_table(table_offsets
, tables_blob
);
1460 build_dmar_q35(tables_blob
, tables
->linker
);
1463 /* Add tables supplied by user (if any) */
1464 for (u
= acpi_table_first(); u
; u
= acpi_table_next(u
)) {
1465 unsigned len
= acpi_table_len(u
);
1467 acpi_add_table(table_offsets
, tables_blob
);
1468 g_array_append_vals(tables_blob
, u
, len
);
1471 /* RSDT is pointed to by RSDP */
1472 rsdt
= tables_blob
->len
;
1473 build_rsdt(tables_blob
, tables
->linker
, table_offsets
);
1475 /* RSDP is in FSEG memory, so allocate it separately */
1476 build_rsdp(tables
->rsdp
, tables
->linker
, rsdt
);
1478 /* We'll expose it all to Guest so we want to reduce
1479 * chance of size changes.
1481 * We used to align the tables to 4k, but of course this would
1482 * too simple to be enough. 4k turned out to be too small an
1483 * alignment very soon, and in fact it is almost impossible to
1484 * keep the table size stable for all (max_cpus, max_memory_slots)
1485 * combinations. So the table size is always 64k for pc-i440fx-2.1
1486 * and we give an error if the table grows beyond that limit.
1488 * We still have the problem of migrating from "-M pc-i440fx-2.0". For
1489 * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables
1490 * than 2.0 and we can always pad the smaller tables with zeros. We can
1491 * then use the exact size of the 2.0 tables.
1493 * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration.
1495 if (guest_info
->legacy_acpi_table_size
) {
1496 /* Subtracting aml_len gives the size of fixed tables. Then add the
1497 * size of the PIIX4 DSDT/SSDT in QEMU 2.0.
1499 int legacy_aml_len
=
1500 guest_info
->legacy_acpi_table_size
+
1501 ACPI_BUILD_LEGACY_CPU_AML_SIZE
* max_cpus
;
1502 int legacy_table_size
=
1503 ROUND_UP(tables_blob
->len
- aml_len
+ legacy_aml_len
,
1504 ACPI_BUILD_ALIGN_SIZE
);
1505 if (tables_blob
->len
> legacy_table_size
) {
1506 /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */
1507 error_report("Warning: migration may not work.");
1509 g_array_set_size(tables_blob
, legacy_table_size
);
1511 /* Make sure we have a buffer in case we need to resize the tables. */
1512 if (tables_blob
->len
> ACPI_BUILD_TABLE_SIZE
/ 2) {
1513 /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */
1514 error_report("Warning: ACPI tables are larger than 64k.");
1515 error_report("Warning: migration may not work.");
1516 error_report("Warning: please remove CPUs, NUMA nodes, "
1517 "memory slots or PCI bridges.");
1519 acpi_align_size(tables_blob
, ACPI_BUILD_TABLE_SIZE
);
1522 acpi_align_size(tables
->linker
, ACPI_BUILD_ALIGN_SIZE
);
1524 /* Cleanup memory that's no longer used. */
1525 g_array_free(table_offsets
, true);
1528 static void acpi_ram_update(ram_addr_t ram
, GArray
*data
)
1530 uint32_t size
= acpi_data_len(data
);
1532 /* Make sure RAM size is correct - in case it got changed e.g. by migration */
1533 qemu_ram_resize(ram
, size
, &error_abort
);
1535 memcpy(qemu_get_ram_ptr(ram
), data
->data
, size
);
1536 cpu_physical_memory_set_dirty_range_nocode(ram
, size
);
1539 static void acpi_build_update(void *build_opaque
, uint32_t offset
)
1541 AcpiBuildState
*build_state
= build_opaque
;
1542 AcpiBuildTables tables
;
1544 /* No state to update or already patched? Nothing to do. */
1545 if (!build_state
|| build_state
->patched
) {
1548 build_state
->patched
= 1;
1550 acpi_build_tables_init(&tables
);
1552 acpi_build(build_state
->guest_info
, &tables
);
1554 acpi_ram_update(build_state
->table_ram
, tables
.table_data
);
1556 if (build_state
->rsdp
) {
1557 memcpy(build_state
->rsdp
, tables
.rsdp
->data
, acpi_data_len(tables
.rsdp
));
1559 acpi_ram_update(build_state
->rsdp_ram
, tables
.rsdp
);
1562 acpi_ram_update(build_state
->linker_ram
, tables
.linker
);
1563 acpi_build_tables_cleanup(&tables
, true);
1566 static void acpi_build_reset(void *build_opaque
)
1568 AcpiBuildState
*build_state
= build_opaque
;
1569 build_state
->patched
= 0;
1572 static ram_addr_t
acpi_add_rom_blob(AcpiBuildState
*build_state
, GArray
*blob
,
1573 const char *name
, uint64_t max_size
)
1575 return rom_add_blob(name
, blob
->data
, acpi_data_len(blob
), max_size
, -1,
1576 name
, acpi_build_update
, build_state
);
1579 static const VMStateDescription vmstate_acpi_build
= {
1580 .name
= "acpi_build",
1582 .minimum_version_id
= 1,
1583 .fields
= (VMStateField
[]) {
1584 VMSTATE_UINT8(patched
, AcpiBuildState
),
1585 VMSTATE_END_OF_LIST()
1589 void acpi_setup(PcGuestInfo
*guest_info
)
1591 AcpiBuildTables tables
;
1592 AcpiBuildState
*build_state
;
1594 if (!guest_info
->fw_cfg
) {
1595 ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
1599 if (!guest_info
->has_acpi_build
) {
1600 ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
1604 if (!acpi_enabled
) {
1605 ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
1609 build_state
= g_malloc0(sizeof *build_state
);
1611 build_state
->guest_info
= guest_info
;
1613 acpi_set_pci_info();
1615 acpi_build_tables_init(&tables
);
1616 acpi_build(build_state
->guest_info
, &tables
);
1618 /* Now expose it all to Guest */
1619 build_state
->table_ram
= acpi_add_rom_blob(build_state
, tables
.table_data
,
1620 ACPI_BUILD_TABLE_FILE
,
1621 ACPI_BUILD_TABLE_MAX_SIZE
);
1622 assert(build_state
->table_ram
!= RAM_ADDR_MAX
);
1624 build_state
->linker_ram
=
1625 acpi_add_rom_blob(build_state
, tables
.linker
, "etc/table-loader", 0);
1627 fw_cfg_add_file(guest_info
->fw_cfg
, ACPI_BUILD_TPMLOG_FILE
,
1628 tables
.tcpalog
->data
, acpi_data_len(tables
.tcpalog
));
1630 if (!guest_info
->rsdp_in_ram
) {
1632 * Keep for compatibility with old machine types.
1633 * Though RSDP is small, its contents isn't immutable, so
1634 * we'll update it along with the rest of tables on guest access.
1636 uint32_t rsdp_size
= acpi_data_len(tables
.rsdp
);
1638 build_state
->rsdp
= g_memdup(tables
.rsdp
->data
, rsdp_size
);
1639 fw_cfg_add_file_callback(guest_info
->fw_cfg
, ACPI_BUILD_RSDP_FILE
,
1640 acpi_build_update
, build_state
,
1641 build_state
->rsdp
, rsdp_size
);
1642 build_state
->rsdp_ram
= (ram_addr_t
)-1;
1644 build_state
->rsdp
= NULL
;
1645 build_state
->rsdp_ram
= acpi_add_rom_blob(build_state
, tables
.rsdp
,
1646 ACPI_BUILD_RSDP_FILE
, 0);
1649 qemu_register_reset(acpi_build_reset
, build_state
);
1650 acpi_build_reset(build_state
);
1651 vmstate_register(NULL
, 0, &vmstate_acpi_build
, build_state
);
1653 /* Cleanup tables but don't free the memory: we track it
1656 acpi_build_tables_cleanup(&tables
, false);