2 * NVDIMM ACPI Implementation
4 * Copyright(C) 2015 Intel Corporation.
7 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
9 * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
10 * and the DSM specification can be found at:
11 * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
13 * Currently, it only supports PMEM Virtualization.
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, see <http://www.gnu.org/licenses/>
29 #include "qemu/osdep.h"
30 #include "hw/acpi/acpi.h"
31 #include "hw/acpi/aml-build.h"
32 #include "hw/acpi/bios-linker-loader.h"
33 #include "hw/nvram/fw_cfg.h"
34 #include "hw/mem/nvdimm.h"
36 static int nvdimm_plugged_device_list(Object
*obj
, void *opaque
)
38 GSList
**list
= opaque
;
40 if (object_dynamic_cast(obj
, TYPE_NVDIMM
)) {
41 DeviceState
*dev
= DEVICE(obj
);
43 if (dev
->realized
) { /* only realized NVDIMMs matter */
44 *list
= g_slist_append(*list
, DEVICE(obj
));
48 object_child_foreach(obj
, nvdimm_plugged_device_list
, opaque
);
53 * inquire plugged NVDIMM devices and link them into the list which is
54 * returned to the caller.
56 * Note: it is the caller's responsibility to free the list to avoid
59 static GSList
*nvdimm_get_plugged_device_list(void)
63 object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list
,
68 #define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
69 { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
70 (b) & 0xff, ((b) >> 8) & 0xff, (c) & 0xff, ((c) >> 8) & 0xff, \
71 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }
74 * define Byte Addressable Persistent Memory (PM) Region according to
75 * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
77 static const uint8_t nvdimm_nfit_spa_uuid
[] =
78 NVDIMM_UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
79 0x18, 0xb7, 0x8c, 0xdb);
82 * NVDIMM Firmware Interface Table
85 * It provides information that allows OSPM to enumerate NVDIMM present in
86 * the platform and associate system physical address ranges created by the
89 * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
91 struct NvdimmNfitHeader
{
95 typedef struct NvdimmNfitHeader NvdimmNfitHeader
;
98 * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
99 * Interface Table (NFIT).
103 * System Physical Address Range Structure
105 * It describes the system physical address ranges occupied by NVDIMMs and
106 * the types of the regions.
108 struct NvdimmNfitSpa
{
114 uint32_t proximity_domain
;
115 uint8_t type_guid
[16];
120 typedef struct NvdimmNfitSpa NvdimmNfitSpa
;
123 * Memory Device to System Physical Address Range Mapping Structure
125 * It enables identifying each NVDIMM region and the corresponding SPA
126 * describing the memory interleave
128 struct NvdimmNfitMemDev
{
131 uint32_t nfit_handle
;
137 uint64_t region_offset
;
139 uint16_t interleave_index
;
140 uint16_t interleave_ways
;
144 typedef struct NvdimmNfitMemDev NvdimmNfitMemDev
;
147 * NVDIMM Control Region Structure
149 * It describes the NVDIMM and if applicable, Block Control Window.
151 struct NvdimmNfitControlRegion
{
157 uint16_t revision_id
;
158 uint16_t sub_vendor_id
;
159 uint16_t sub_device_id
;
160 uint16_t sub_revision_id
;
162 uint32_t serial_number
;
168 uint64_t status_offset
;
169 uint64_t status_size
;
171 uint8_t reserved2
[6];
173 typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion
;
176 * Module serial number is a unique number for each device. We use the
177 * slot id of NVDIMM device to generate this number so that each device
178 * associates with a different number.
180 * 0x123456 is a magic number we arbitrarily chose.
182 static uint32_t nvdimm_slot_to_sn(int slot
)
184 return 0x123456 + slot
;
188 * handle is used to uniquely associate nfit_memdev structure with NVDIMM
189 * ACPI device - nfit_memdev.nfit_handle matches with the value returned
190 * by ACPI device _ADR method.
192 * We generate the handle with the slot id of NVDIMM device and reserve
193 * 0 for NVDIMM root device.
195 static uint32_t nvdimm_slot_to_handle(int slot
)
201 * index uniquely identifies the structure, 0 is reserved which indicates
202 * that the structure is not valid or the associated structure is not
205 * Each NVDIMM device needs two indexes, one for nfit_spa and another for
206 * nfit_dc which are generated by the slot id of NVDIMM device.
208 static uint16_t nvdimm_slot_to_spa_index(int slot
)
210 return (slot
+ 1) << 1;
213 /* See the comments of nvdimm_slot_to_spa_index(). */
214 static uint32_t nvdimm_slot_to_dcr_index(int slot
)
216 return nvdimm_slot_to_spa_index(slot
) + 1;
219 /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
221 nvdimm_build_structure_spa(GArray
*structures
, DeviceState
*dev
)
223 NvdimmNfitSpa
*nfit_spa
;
224 uint64_t addr
= object_property_get_int(OBJECT(dev
), PC_DIMM_ADDR_PROP
,
226 uint64_t size
= object_property_get_int(OBJECT(dev
), PC_DIMM_SIZE_PROP
,
228 uint32_t node
= object_property_get_int(OBJECT(dev
), PC_DIMM_NODE_PROP
,
230 int slot
= object_property_get_int(OBJECT(dev
), PC_DIMM_SLOT_PROP
,
233 nfit_spa
= acpi_data_push(structures
, sizeof(*nfit_spa
));
235 nfit_spa
->type
= cpu_to_le16(0 /* System Physical Address Range
237 nfit_spa
->length
= cpu_to_le16(sizeof(*nfit_spa
));
238 nfit_spa
->spa_index
= cpu_to_le16(nvdimm_slot_to_spa_index(slot
));
241 * Control region is strict as all the device info, such as SN, index,
242 * is associated with slot id.
244 nfit_spa
->flags
= cpu_to_le16(1 /* Control region is strictly for
245 management during hot add/online
247 2 /* Data in Proximity Domain field is
251 nfit_spa
->proximity_domain
= cpu_to_le32(node
);
252 /* the region reported as PMEM. */
253 memcpy(nfit_spa
->type_guid
, nvdimm_nfit_spa_uuid
,
254 sizeof(nvdimm_nfit_spa_uuid
));
256 nfit_spa
->spa_base
= cpu_to_le64(addr
);
257 nfit_spa
->spa_length
= cpu_to_le64(size
);
259 /* It is the PMEM and can be cached as writeback. */
260 nfit_spa
->mem_attr
= cpu_to_le64(0x8ULL
/* EFI_MEMORY_WB */ |
261 0x8000ULL
/* EFI_MEMORY_NV */);
265 * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
269 nvdimm_build_structure_memdev(GArray
*structures
, DeviceState
*dev
)
271 NvdimmNfitMemDev
*nfit_memdev
;
272 uint64_t addr
= object_property_get_int(OBJECT(dev
), PC_DIMM_ADDR_PROP
,
274 uint64_t size
= object_property_get_int(OBJECT(dev
), PC_DIMM_SIZE_PROP
,
276 int slot
= object_property_get_int(OBJECT(dev
), PC_DIMM_SLOT_PROP
,
278 uint32_t handle
= nvdimm_slot_to_handle(slot
);
280 nfit_memdev
= acpi_data_push(structures
, sizeof(*nfit_memdev
));
282 nfit_memdev
->type
= cpu_to_le16(1 /* Memory Device to System Address
283 Range Map Structure*/);
284 nfit_memdev
->length
= cpu_to_le16(sizeof(*nfit_memdev
));
285 nfit_memdev
->nfit_handle
= cpu_to_le32(handle
);
288 * associate memory device with System Physical Address Range
291 nfit_memdev
->spa_index
= cpu_to_le16(nvdimm_slot_to_spa_index(slot
));
292 /* associate memory device with Control Region Structure. */
293 nfit_memdev
->dcr_index
= cpu_to_le16(nvdimm_slot_to_dcr_index(slot
));
295 /* The memory region on the device. */
296 nfit_memdev
->region_len
= cpu_to_le64(size
);
297 nfit_memdev
->region_dpa
= cpu_to_le64(addr
);
299 /* Only one interleave for PMEM. */
300 nfit_memdev
->interleave_ways
= cpu_to_le16(1);
304 * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
306 static void nvdimm_build_structure_dcr(GArray
*structures
, DeviceState
*dev
)
308 NvdimmNfitControlRegion
*nfit_dcr
;
309 int slot
= object_property_get_int(OBJECT(dev
), PC_DIMM_SLOT_PROP
,
311 uint32_t sn
= nvdimm_slot_to_sn(slot
);
313 nfit_dcr
= acpi_data_push(structures
, sizeof(*nfit_dcr
));
315 nfit_dcr
->type
= cpu_to_le16(4 /* NVDIMM Control Region Structure */);
316 nfit_dcr
->length
= cpu_to_le16(sizeof(*nfit_dcr
));
317 nfit_dcr
->dcr_index
= cpu_to_le16(nvdimm_slot_to_dcr_index(slot
));
320 nfit_dcr
->vendor_id
= cpu_to_le16(0x8086);
321 nfit_dcr
->device_id
= cpu_to_le16(1);
323 /* The _DSM method is following Intel's DSM specification. */
324 nfit_dcr
->revision_id
= cpu_to_le16(1 /* Current Revision supported
325 in ACPI 6.0 is 1. */);
326 nfit_dcr
->serial_number
= cpu_to_le32(sn
);
327 nfit_dcr
->fic
= cpu_to_le16(0x201 /* Format Interface Code. See Chapter
328 2: NVDIMM Device Specific Method
329 (DSM) in DSM Spec Rev1.*/);
332 static GArray
*nvdimm_build_device_structure(GSList
*device_list
)
334 GArray
*structures
= g_array_new(false, true /* clear */, 1);
336 for (; device_list
; device_list
= device_list
->next
) {
337 DeviceState
*dev
= device_list
->data
;
339 /* build System Physical Address Range Structure. */
340 nvdimm_build_structure_spa(structures
, dev
);
343 * build Memory Device to System Physical Address Range Mapping
346 nvdimm_build_structure_memdev(structures
, dev
);
348 /* build NVDIMM Control Region Structure. */
349 nvdimm_build_structure_dcr(structures
, dev
);
355 static void nvdimm_build_nfit(GSList
*device_list
, GArray
*table_offsets
,
356 GArray
*table_data
, GArray
*linker
)
358 GArray
*structures
= nvdimm_build_device_structure(device_list
);
361 acpi_add_table(table_offsets
, table_data
);
364 header
= table_data
->len
;
365 acpi_data_push(table_data
, sizeof(NvdimmNfitHeader
));
366 /* NVDIMM device structures. */
367 g_array_append_vals(table_data
, structures
->data
, structures
->len
);
369 build_header(linker
, table_data
,
370 (void *)(table_data
->data
+ header
), "NFIT",
371 sizeof(NvdimmNfitHeader
) + structures
->len
, 1, NULL
, NULL
);
372 g_array_free(structures
, true);
379 /* the remaining size in the page is used by arg3. */
384 typedef struct NvdimmDsmIn NvdimmDsmIn
;
385 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn
) != 4096);
387 struct NvdimmDsmOut
{
388 /* the size of buffer filled by QEMU. */
392 typedef struct NvdimmDsmOut NvdimmDsmOut
;
393 QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut
) != 4096);
395 struct NvdimmDsmFunc0Out
{
396 /* the size of buffer filled by QEMU. */
398 uint32_t supported_func
;
400 typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out
;
402 struct NvdimmDsmFuncNoPayloadOut
{
403 /* the size of buffer filled by QEMU. */
405 uint32_t func_ret_status
;
407 typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut
;
410 nvdimm_dsm_read(void *opaque
, hwaddr addr
, unsigned size
)
412 nvdimm_debug("BUG: we never read _DSM IO Port.\n");
417 nvdimm_dsm_write(void *opaque
, hwaddr addr
, uint64_t val
, unsigned size
)
420 hwaddr dsm_mem_addr
= val
;
422 nvdimm_debug("dsm memory address %#" HWADDR_PRIx
".\n", dsm_mem_addr
);
425 * The DSM memory is mapped to guest address space so an evil guest
426 * can change its content while we are doing DSM emulation. Avoid
427 * this by copying DSM memory to QEMU local memory.
429 in
= g_new(NvdimmDsmIn
, 1);
430 cpu_physical_memory_read(dsm_mem_addr
, in
, sizeof(*in
));
432 le32_to_cpus(&in
->revision
);
433 le32_to_cpus(&in
->function
);
434 le32_to_cpus(&in
->handle
);
436 nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in
->revision
,
437 in
->handle
, in
->function
);
440 * function 0 is called to inquire which functions are supported by
443 if (in
->function
== 0) {
444 NvdimmDsmFunc0Out func0
= {
445 .len
= cpu_to_le32(sizeof(func0
)),
446 /* No function supported other than function 0 */
447 .supported_func
= cpu_to_le32(0),
449 cpu_physical_memory_write(dsm_mem_addr
, &func0
, sizeof func0
);
451 /* No function except function 0 is supported yet. */
452 NvdimmDsmFuncNoPayloadOut out
= {
453 .len
= cpu_to_le32(sizeof(out
)),
454 .func_ret_status
= cpu_to_le32(1) /* Not Supported */,
456 cpu_physical_memory_write(dsm_mem_addr
, &out
, sizeof(out
));
462 static const MemoryRegionOps nvdimm_dsm_ops
= {
463 .read
= nvdimm_dsm_read
,
464 .write
= nvdimm_dsm_write
,
465 .endianness
= DEVICE_LITTLE_ENDIAN
,
467 .min_access_size
= 4,
468 .max_access_size
= 4,
472 void nvdimm_init_acpi_state(AcpiNVDIMMState
*state
, MemoryRegion
*io
,
473 FWCfgState
*fw_cfg
, Object
*owner
)
475 memory_region_init_io(&state
->io_mr
, owner
, &nvdimm_dsm_ops
, state
,
476 "nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN
);
477 memory_region_add_subregion(io
, NVDIMM_ACPI_IO_BASE
, &state
->io_mr
);
479 state
->dsm_mem
= g_array_new(false, true /* clear */, 1);
480 acpi_data_push(state
->dsm_mem
, sizeof(NvdimmDsmIn
));
481 fw_cfg_add_file(fw_cfg
, NVDIMM_DSM_MEM_FILE
, state
->dsm_mem
->data
,
482 state
->dsm_mem
->len
);
485 #define NVDIMM_COMMON_DSM "NCAL"
486 #define NVDIMM_ACPI_MEM_ADDR "MEMA"
488 static void nvdimm_build_common_dsm(Aml
*dev
)
490 Aml
*method
, *ifctx
, *function
, *dsm_mem
, *unpatched
, *result_size
;
491 uint8_t byte_list
[1];
493 method
= aml_method(NVDIMM_COMMON_DSM
, 4, AML_SERIALIZED
);
494 function
= aml_arg(2);
495 dsm_mem
= aml_name(NVDIMM_ACPI_MEM_ADDR
);
498 * do not support any method if DSM memory address has not been
501 unpatched
= aml_if(aml_equal(dsm_mem
, aml_int(0x0)));
504 * function 0 is called to inquire what functions are supported by
507 ifctx
= aml_if(aml_equal(function
, aml_int(0)));
508 byte_list
[0] = 0 /* No function Supported */;
509 aml_append(ifctx
, aml_return(aml_buffer(1, byte_list
)));
510 aml_append(unpatched
, ifctx
);
512 /* No function is supported yet. */
513 byte_list
[0] = 1 /* Not Supported */;
514 aml_append(unpatched
, aml_return(aml_buffer(1, byte_list
)));
515 aml_append(method
, unpatched
);
518 * The HDLE indicates the DSM function is issued from which device,
519 * it is not used at this time as no function is supported yet.
520 * Currently we make it always be 0 for all the devices and will set
521 * the appropriate value once real function is implemented.
523 aml_append(method
, aml_store(aml_int(0x0), aml_name("HDLE")));
524 aml_append(method
, aml_store(aml_arg(1), aml_name("REVS")));
525 aml_append(method
, aml_store(aml_arg(2), aml_name("FUNC")));
528 * tell QEMU about the real address of DSM memory, then QEMU
529 * gets the control and fills the result in DSM memory.
531 aml_append(method
, aml_store(dsm_mem
, aml_name("NTFI")));
533 result_size
= aml_local(1);
534 aml_append(method
, aml_store(aml_name("RLEN"), result_size
));
535 aml_append(method
, aml_store(aml_shiftleft(result_size
, aml_int(3)),
537 aml_append(method
, aml_create_field(aml_name("ODAT"), aml_int(0),
538 result_size
, "OBUF"));
539 aml_append(method
, aml_concatenate(aml_buffer(0, NULL
), aml_name("OBUF"),
541 aml_append(method
, aml_return(aml_arg(6)));
542 aml_append(dev
, method
);
545 static void nvdimm_build_device_dsm(Aml
*dev
)
549 method
= aml_method("_DSM", 4, AML_NOTSERIALIZED
);
550 aml_append(method
, aml_return(aml_call4(NVDIMM_COMMON_DSM
, aml_arg(0),
551 aml_arg(1), aml_arg(2), aml_arg(3))));
552 aml_append(dev
, method
);
555 static void nvdimm_build_nvdimm_devices(GSList
*device_list
, Aml
*root_dev
)
557 for (; device_list
; device_list
= device_list
->next
) {
558 DeviceState
*dev
= device_list
->data
;
559 int slot
= object_property_get_int(OBJECT(dev
), PC_DIMM_SLOT_PROP
,
561 uint32_t handle
= nvdimm_slot_to_handle(slot
);
564 nvdimm_dev
= aml_device("NV%02X", slot
);
567 * ACPI 6.0: 9.20 NVDIMM Devices:
569 * _ADR object that is used to supply OSPM with unique address
570 * of the NVDIMM device. This is done by returning the NFIT Device
571 * handle that is used to identify the associated entries in ACPI
572 * table NFIT or _FIT.
574 aml_append(nvdimm_dev
, aml_name_decl("_ADR", aml_int(handle
)));
576 nvdimm_build_device_dsm(nvdimm_dev
);
577 aml_append(root_dev
, nvdimm_dev
);
581 static void nvdimm_build_ssdt(GSList
*device_list
, GArray
*table_offsets
,
582 GArray
*table_data
, GArray
*linker
)
584 Aml
*ssdt
, *sb_scope
, *dev
, *field
;
585 int mem_addr_offset
, nvdimm_ssdt
;
587 acpi_add_table(table_offsets
, table_data
);
589 ssdt
= init_aml_allocator();
590 acpi_data_push(ssdt
->buf
, sizeof(AcpiTableHeader
));
592 sb_scope
= aml_scope("\\_SB");
594 dev
= aml_device("NVDR");
597 * ACPI 6.0: 9.20 NVDIMM Devices:
599 * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
600 * NVDIMM interface device. Platform firmware is required to contain one
601 * such device in _SB scope if NVDIMMs support is exposed by platform to
603 * For each NVDIMM present or intended to be supported by platform,
604 * platform firmware also exposes an ACPI Namespace Device under the
607 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0012")));
609 /* map DSM memory and IO into ACPI namespace. */
610 aml_append(dev
, aml_operation_region("NPIO", AML_SYSTEM_IO
,
611 aml_int(NVDIMM_ACPI_IO_BASE
), NVDIMM_ACPI_IO_LEN
));
612 aml_append(dev
, aml_operation_region("NRAM", AML_SYSTEM_MEMORY
,
613 aml_name(NVDIMM_ACPI_MEM_ADDR
), sizeof(NvdimmDsmIn
)));
617 * NTFI: write the address of DSM memory and notify QEMU to emulate
620 * It is the IO port so that accessing them will cause VM-exit, the
621 * control will be transferred to QEMU.
623 field
= aml_field("NPIO", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
624 aml_append(field
, aml_named_field("NTFI",
625 sizeof(uint32_t) * BITS_PER_BYTE
));
626 aml_append(dev
, field
);
630 * HDLE: store device's handle, it's zero if the _DSM call happens
631 * on NVDIMM Root Device.
632 * REVS: store the Arg1 of _DSM call.
633 * FUNC: store the Arg2 of _DSM call.
634 * ARG3: store the Arg3 of _DSM call.
636 * They are RAM mapping on host so that these accesses never cause
639 field
= aml_field("NRAM", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
640 aml_append(field
, aml_named_field("HDLE",
641 sizeof(typeof_field(NvdimmDsmIn
, handle
)) * BITS_PER_BYTE
));
642 aml_append(field
, aml_named_field("REVS",
643 sizeof(typeof_field(NvdimmDsmIn
, revision
)) * BITS_PER_BYTE
));
644 aml_append(field
, aml_named_field("FUNC",
645 sizeof(typeof_field(NvdimmDsmIn
, function
)) * BITS_PER_BYTE
));
646 aml_append(field
, aml_named_field("ARG3",
647 (sizeof(NvdimmDsmIn
) - offsetof(NvdimmDsmIn
, arg3
)) * BITS_PER_BYTE
));
648 aml_append(dev
, field
);
652 * RLEN: the size of the buffer filled by QEMU.
653 * ODAT: the buffer QEMU uses to store the result.
655 * Since the page is reused by both input and out, the input data
656 * will be lost after storing new result into ODAT so we should fetch
657 * all the input data before writing the result.
659 field
= aml_field("NRAM", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
660 aml_append(field
, aml_named_field("RLEN",
661 sizeof(typeof_field(NvdimmDsmOut
, len
)) * BITS_PER_BYTE
));
662 aml_append(field
, aml_named_field("ODAT",
663 (sizeof(NvdimmDsmOut
) - offsetof(NvdimmDsmOut
, data
)) * BITS_PER_BYTE
));
664 aml_append(dev
, field
);
666 nvdimm_build_common_dsm(dev
);
667 nvdimm_build_device_dsm(dev
);
669 nvdimm_build_nvdimm_devices(device_list
, dev
);
671 aml_append(sb_scope
, dev
);
672 aml_append(ssdt
, sb_scope
);
674 nvdimm_ssdt
= table_data
->len
;
676 /* copy AML table into ACPI tables blob and patch header there */
677 g_array_append_vals(table_data
, ssdt
->buf
->data
, ssdt
->buf
->len
);
678 mem_addr_offset
= build_append_named_dword(table_data
,
679 NVDIMM_ACPI_MEM_ADDR
);
681 bios_linker_loader_alloc(linker
, NVDIMM_DSM_MEM_FILE
, sizeof(NvdimmDsmIn
),
682 false /* high memory */);
683 bios_linker_loader_add_pointer(linker
, ACPI_BUILD_TABLE_FILE
,
684 NVDIMM_DSM_MEM_FILE
, table_data
,
685 table_data
->data
+ mem_addr_offset
,
687 build_header(linker
, table_data
,
688 (void *)(table_data
->data
+ nvdimm_ssdt
),
689 "SSDT", table_data
->len
- nvdimm_ssdt
, 1, NULL
, "NVDIMM");
690 free_aml_allocator();
693 void nvdimm_build_acpi(GArray
*table_offsets
, GArray
*table_data
,
698 /* no NVDIMM device is plugged. */
699 device_list
= nvdimm_get_plugged_device_list();
703 nvdimm_build_nfit(device_list
, table_offsets
, table_data
, linker
);
704 nvdimm_build_ssdt(device_list
, table_offsets
, table_data
, linker
);
705 g_slist_free(device_list
);