2 * CXL Type 3 (memory expander) device
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
9 * SPDX-License-Identifier: GPL-v2-only
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
26 #include "qemu/guest-random.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "hw/cxl/cxl.h"
30 #include "hw/pci/msix.h"
34 /* Default CDAT entries for a memory region */
45 static void ct3_build_cdat_entries_for_mr(CDATSubHeader
**cdat_table
,
46 int dsmad_handle
, MemoryRegion
*mr
,
47 bool is_pmem
, uint64_t dpa_base
)
56 dsmas
= g_malloc(sizeof(*dsmas
));
57 *dsmas
= (CDATDsmas
) {
59 .type
= CDAT_TYPE_DSMAS
,
60 .length
= sizeof(*dsmas
),
62 .DSMADhandle
= dsmad_handle
,
63 .flags
= is_pmem
? CDAT_DSMAS_FLAG_NV
: 0,
65 .DPA_length
= memory_region_size(mr
),
68 /* For now, no memory side cache, plausiblish numbers */
69 dslbis0
= g_malloc(sizeof(*dslbis0
));
70 *dslbis0
= (CDATDslbis
) {
72 .type
= CDAT_TYPE_DSLBIS
,
73 .length
= sizeof(*dslbis0
),
75 .handle
= dsmad_handle
,
76 .flags
= HMAT_LB_MEM_MEMORY
,
77 .data_type
= HMAT_LB_DATA_READ_LATENCY
,
78 .entry_base_unit
= 10000, /* 10ns base */
79 .entry
[0] = 15, /* 150ns */
82 dslbis1
= g_malloc(sizeof(*dslbis1
));
83 *dslbis1
= (CDATDslbis
) {
85 .type
= CDAT_TYPE_DSLBIS
,
86 .length
= sizeof(*dslbis1
),
88 .handle
= dsmad_handle
,
89 .flags
= HMAT_LB_MEM_MEMORY
,
90 .data_type
= HMAT_LB_DATA_WRITE_LATENCY
,
91 .entry_base_unit
= 10000,
92 .entry
[0] = 25, /* 250ns */
95 dslbis2
= g_malloc(sizeof(*dslbis2
));
96 *dslbis2
= (CDATDslbis
) {
98 .type
= CDAT_TYPE_DSLBIS
,
99 .length
= sizeof(*dslbis2
),
101 .handle
= dsmad_handle
,
102 .flags
= HMAT_LB_MEM_MEMORY
,
103 .data_type
= HMAT_LB_DATA_READ_BANDWIDTH
,
104 .entry_base_unit
= 1000, /* GB/s */
108 dslbis3
= g_malloc(sizeof(*dslbis3
));
109 *dslbis3
= (CDATDslbis
) {
111 .type
= CDAT_TYPE_DSLBIS
,
112 .length
= sizeof(*dslbis3
),
114 .handle
= dsmad_handle
,
115 .flags
= HMAT_LB_MEM_MEMORY
,
116 .data_type
= HMAT_LB_DATA_WRITE_BANDWIDTH
,
117 .entry_base_unit
= 1000, /* GB/s */
121 dsemts
= g_malloc(sizeof(*dsemts
));
122 *dsemts
= (CDATDsemts
) {
124 .type
= CDAT_TYPE_DSEMTS
,
125 .length
= sizeof(*dsemts
),
127 .DSMAS_handle
= dsmad_handle
,
129 * NV: Reserved - the non volatile from DSMAS matters
132 .EFI_memory_type_attr
= is_pmem
? 2 : 1,
134 .DPA_length
= memory_region_size(mr
),
137 /* Header always at start of structure */
138 cdat_table
[CT3_CDAT_DSMAS
] = (CDATSubHeader
*)dsmas
;
139 cdat_table
[CT3_CDAT_DSLBIS0
] = (CDATSubHeader
*)dslbis0
;
140 cdat_table
[CT3_CDAT_DSLBIS1
] = (CDATSubHeader
*)dslbis1
;
141 cdat_table
[CT3_CDAT_DSLBIS2
] = (CDATSubHeader
*)dslbis2
;
142 cdat_table
[CT3_CDAT_DSLBIS3
] = (CDATSubHeader
*)dslbis3
;
143 cdat_table
[CT3_CDAT_DSEMTS
] = (CDATSubHeader
*)dsemts
;
146 static int ct3_build_cdat_table(CDATSubHeader
***cdat_table
, void *priv
)
148 g_autofree CDATSubHeader
**table
= NULL
;
149 CXLType3Dev
*ct3d
= priv
;
150 MemoryRegion
*volatile_mr
= NULL
, *nonvolatile_mr
= NULL
;
151 int dsmad_handle
= 0;
155 if (!ct3d
->hostpmem
&& !ct3d
->hostvmem
) {
159 if (ct3d
->hostvmem
) {
160 volatile_mr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
164 len
+= CT3_CDAT_NUM_ENTRIES
;
167 if (ct3d
->hostpmem
) {
168 nonvolatile_mr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
169 if (!nonvolatile_mr
) {
172 len
+= CT3_CDAT_NUM_ENTRIES
;
175 table
= g_malloc0(len
* sizeof(*table
));
177 /* Now fill them in */
179 ct3_build_cdat_entries_for_mr(table
, dsmad_handle
++, volatile_mr
,
181 cur_ent
= CT3_CDAT_NUM_ENTRIES
;
184 if (nonvolatile_mr
) {
185 uint64_t base
= volatile_mr
? memory_region_size(volatile_mr
) : 0;
186 ct3_build_cdat_entries_for_mr(&(table
[cur_ent
]), dsmad_handle
++,
187 nonvolatile_mr
, true, base
);
188 cur_ent
+= CT3_CDAT_NUM_ENTRIES
;
190 assert(len
== cur_ent
);
192 *cdat_table
= g_steal_pointer(&table
);
197 static void ct3_free_cdat_table(CDATSubHeader
**cdat_table
, int num
, void *priv
)
201 for (i
= 0; i
< num
; i
++) {
202 g_free(cdat_table
[i
]);
207 static bool cxl_doe_cdat_rsp(DOECap
*doe_cap
)
209 CDATObject
*cdat
= &CXL_TYPE3(doe_cap
->pdev
)->cxl_cstate
.cdat
;
213 CDATReq
*req
= pcie_doe_get_write_mbox_ptr(doe_cap
);
216 assert(cdat
->entry_len
);
218 /* Discard if request length mismatched */
219 if (pcie_doe_get_obj_len(req
) <
220 DIV_ROUND_UP(sizeof(CDATReq
), DWORD_BYTE
)) {
224 ent
= req
->entry_handle
;
225 base
= cdat
->entry
[ent
].base
;
226 len
= cdat
->entry
[ent
].length
;
230 .vendor_id
= CXL_VENDOR_ID
,
231 .data_obj_type
= CXL_DOE_TABLE_ACCESS
,
233 .length
= DIV_ROUND_UP((sizeof(rsp
) + len
), DWORD_BYTE
),
235 .rsp_code
= CXL_DOE_TAB_RSP
,
236 .table_type
= CXL_DOE_TAB_TYPE_CDAT
,
237 .entry_handle
= (ent
< cdat
->entry_len
- 1) ?
238 ent
+ 1 : CXL_DOE_TAB_ENT_MAX
,
241 memcpy(doe_cap
->read_mbox
, &rsp
, sizeof(rsp
));
242 memcpy(doe_cap
->read_mbox
+ DIV_ROUND_UP(sizeof(rsp
), DWORD_BYTE
),
245 doe_cap
->read_mbox_len
+= rsp
.header
.length
;
250 static uint32_t ct3d_config_read(PCIDevice
*pci_dev
, uint32_t addr
, int size
)
252 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
255 if (pcie_doe_read_config(&ct3d
->doe_cdat
, addr
, size
, &val
)) {
259 return pci_default_read_config(pci_dev
, addr
, size
);
262 static void ct3d_config_write(PCIDevice
*pci_dev
, uint32_t addr
, uint32_t val
,
265 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
267 pcie_doe_write_config(&ct3d
->doe_cdat
, addr
, val
, size
);
268 pci_default_write_config(pci_dev
, addr
, val
, size
);
269 pcie_aer_write_config(pci_dev
, addr
, val
, size
);
273 * Null value of all Fs suggested by IEEE RA guidelines for use of
276 #define UI64_NULL ~(0ULL)
278 static void build_dvsecs(CXLType3Dev
*ct3d
)
280 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
282 uint32_t range1_size_hi
, range1_size_lo
,
283 range1_base_hi
= 0, range1_base_lo
= 0,
284 range2_size_hi
= 0, range2_size_lo
= 0,
285 range2_base_hi
= 0, range2_base_lo
= 0;
288 * Volatile memory is mapped as (0x0)
289 * Persistent memory is mapped at (volatile->size)
291 if (ct3d
->hostvmem
) {
292 range1_size_hi
= ct3d
->hostvmem
->size
>> 32;
293 range1_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
294 (ct3d
->hostvmem
->size
& 0xF0000000);
295 if (ct3d
->hostpmem
) {
296 range2_size_hi
= ct3d
->hostpmem
->size
>> 32;
297 range2_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
298 (ct3d
->hostpmem
->size
& 0xF0000000);
301 range1_size_hi
= ct3d
->hostpmem
->size
>> 32;
302 range1_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
303 (ct3d
->hostpmem
->size
& 0xF0000000);
306 dvsec
= (uint8_t *)&(CXLDVSECDevice
){
310 .range1_size_hi
= range1_size_hi
,
311 .range1_size_lo
= range1_size_lo
,
312 .range1_base_hi
= range1_base_hi
,
313 .range1_base_lo
= range1_base_lo
,
314 .range2_size_hi
= range2_size_hi
,
315 .range2_size_lo
= range2_size_lo
,
316 .range2_base_hi
= range2_base_hi
,
317 .range2_base_lo
= range2_base_lo
,
319 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
320 PCIE_CXL_DEVICE_DVSEC_LENGTH
,
321 PCIE_CXL_DEVICE_DVSEC
,
322 PCIE_CXL31_DEVICE_DVSEC_REVID
, dvsec
);
324 dvsec
= (uint8_t *)&(CXLDVSECRegisterLocator
){
326 .reg0_base_lo
= RBI_COMPONENT_REG
| CXL_COMPONENT_REG_BAR_IDX
,
328 .reg1_base_lo
= RBI_CXL_DEVICE_REG
| CXL_DEVICE_REG_BAR_IDX
,
331 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
332 REG_LOC_DVSEC_LENGTH
, REG_LOC_DVSEC
,
333 REG_LOC_DVSEC_REVID
, dvsec
);
334 dvsec
= (uint8_t *)&(CXLDVSECDeviceGPF
){
335 .phase2_duration
= 0x603, /* 3 seconds */
336 .phase2_power
= 0x33, /* 0x33 miliwatts */
338 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
339 GPF_DEVICE_DVSEC_LENGTH
, GPF_DEVICE_DVSEC
,
340 GPF_DEVICE_DVSEC_REVID
, dvsec
);
342 dvsec
= (uint8_t *)&(CXLDVSECPortFlexBus
){
343 .cap
= 0x26, /* 68B, IO, Mem, non-MLD */
344 .ctrl
= 0x02, /* IO always enabled */
345 .status
= 0x26, /* same as capabilities */
346 .rcvd_mod_ts_data_phase1
= 0xef, /* WTF? */
348 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
349 PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH
,
350 PCIE_FLEXBUS_PORT_DVSEC
,
351 PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID
, dvsec
);
354 static void hdm_decoder_commit(CXLType3Dev
*ct3d
, int which
)
356 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
357 ComponentRegisters
*cregs
= &ct3d
->cxl_cstate
.crb
;
358 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
361 ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
);
362 /* TODO: Sanity checks that the decoder is possible */
363 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
364 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
366 stl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
, ctrl
);
369 static void hdm_decoder_uncommit(CXLType3Dev
*ct3d
, int which
)
371 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
372 ComponentRegisters
*cregs
= &ct3d
->cxl_cstate
.crb
;
373 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
376 ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
);
378 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
379 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 0);
381 stl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
, ctrl
);
384 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err
)
387 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY
:
388 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY
;
389 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY
:
390 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY
;
391 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY
:
392 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY
;
393 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC
:
394 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC
;
395 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY
:
396 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY
;
397 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY
:
398 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY
;
399 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY
:
400 return CXL_RAS_UNC_ERR_MEM_BE_PARITY
;
401 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC
:
402 return CXL_RAS_UNC_ERR_MEM_DATA_ECC
;
403 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD
:
404 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD
;
405 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING
:
406 return CXL_RAS_UNC_ERR_RSVD_ENCODING
;
407 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED
:
408 return CXL_RAS_UNC_ERR_POISON_RECEIVED
;
409 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW
:
410 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW
;
411 case CXL_UNCOR_ERROR_TYPE_INTERNAL
:
412 return CXL_RAS_UNC_ERR_INTERNAL
;
413 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX
:
414 return CXL_RAS_UNC_ERR_CXL_IDE_TX
;
415 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX
:
416 return CXL_RAS_UNC_ERR_CXL_IDE_RX
;
422 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err
)
425 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC
:
426 return CXL_RAS_COR_ERR_CACHE_DATA_ECC
;
427 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC
:
428 return CXL_RAS_COR_ERR_MEM_DATA_ECC
;
429 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD
:
430 return CXL_RAS_COR_ERR_CRC_THRESHOLD
;
431 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD
:
432 return CXL_RAS_COR_ERR_RETRY_THRESHOLD
;
433 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED
:
434 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED
;
435 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED
:
436 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED
;
437 case CXL_COR_ERROR_TYPE_PHYSICAL
:
438 return CXL_RAS_COR_ERR_PHYSICAL
;
444 static void ct3d_reg_write(void *opaque
, hwaddr offset
, uint64_t value
,
447 CXLComponentState
*cxl_cstate
= opaque
;
448 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
449 CXLType3Dev
*ct3d
= container_of(cxl_cstate
, CXLType3Dev
, cxl_cstate
);
450 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
451 bool should_commit
= false;
452 bool should_uncommit
= false;
456 g_assert(offset
< CXL2_COMPONENT_CM_REGION_SIZE
);
459 case A_CXL_HDM_DECODER0_CTRL
:
460 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
461 should_uncommit
= !should_commit
;
464 case A_CXL_HDM_DECODER1_CTRL
:
465 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
466 should_uncommit
= !should_commit
;
469 case A_CXL_HDM_DECODER2_CTRL
:
470 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
471 should_uncommit
= !should_commit
;
474 case A_CXL_HDM_DECODER3_CTRL
:
475 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
476 should_uncommit
= !should_commit
;
479 case A_CXL_RAS_UNC_ERR_STATUS
:
481 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
482 uint32_t fe
= FIELD_EX32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
483 FIRST_ERROR_POINTER
);
488 * If single bit written that corresponds to the first error
489 * pointer being cleared, update the status and header log.
491 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
492 if ((1 << fe
) ^ value
) {
495 * Software is using wrong flow for multiple header recording
496 * Following behavior in PCIe r6.0 and assuming multiple
497 * header support. Implementation defined choice to clear all
498 * matching records if more than one bit set - which corresponds
499 * closest to behavior of hardware not capable of multiple
502 QTAILQ_FOREACH_SAFE(cxl_err
, &ct3d
->error_list
, node
,
504 if ((1 << cxl_err
->type
) & value
) {
505 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
510 /* Done with previous FE, so drop from list */
511 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
512 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
517 * If there is another FE, then put that in place and update
520 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
521 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
524 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
525 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
526 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
528 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
529 FIRST_ERROR_POINTER
, cxl_err
->type
);
532 * If no more errors, then follow recommendation of PCI spec
533 * r6.0 6.2.4.2 to set the first error pointer to a status
534 * bit that will never be used.
536 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
538 CXL_RAS_UNC_ERR_CXL_UNUSED
);
540 stl_le_p((uint8_t *)cache_mem
+ A_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
543 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
544 unc_err
|= 1 << cxl_err
->type
;
546 stl_le_p((uint8_t *)cache_mem
+ offset
, unc_err
);
550 case A_CXL_RAS_COR_ERR_STATUS
:
552 uint32_t rw1c
= value
;
553 uint32_t temp
= ldl_le_p((uint8_t *)cache_mem
+ offset
);
555 stl_le_p((uint8_t *)cache_mem
+ offset
, temp
);
562 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
564 hdm_decoder_commit(ct3d
, which_hdm
);
565 } else if (should_uncommit
) {
566 hdm_decoder_uncommit(ct3d
, which_hdm
);
570 static bool cxl_setup_memory(CXLType3Dev
*ct3d
, Error
**errp
)
572 DeviceState
*ds
= DEVICE(ct3d
);
574 if (!ct3d
->hostmem
&& !ct3d
->hostvmem
&& !ct3d
->hostpmem
) {
575 error_setg(errp
, "at least one memdev property must be set");
577 } else if (ct3d
->hostmem
&& ct3d
->hostpmem
) {
578 error_setg(errp
, "[memdev] cannot be used with new "
579 "[persistent-memdev] property");
581 } else if (ct3d
->hostmem
) {
582 /* Use of hostmem property implies pmem */
583 ct3d
->hostpmem
= ct3d
->hostmem
;
584 ct3d
->hostmem
= NULL
;
587 if (ct3d
->hostpmem
&& !ct3d
->lsa
) {
588 error_setg(errp
, "lsa property must be set for persistent devices");
592 if (ct3d
->hostvmem
) {
596 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
598 error_setg(errp
, "volatile memdev must have backing device");
601 memory_region_set_nonvolatile(vmr
, false);
602 memory_region_set_enabled(vmr
, true);
603 host_memory_backend_set_mapped(ct3d
->hostvmem
, true);
605 v_name
= g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds
->id
);
607 v_name
= g_strdup("cxl-type3-dpa-vmem-space");
609 address_space_init(&ct3d
->hostvmem_as
, vmr
, v_name
);
610 ct3d
->cxl_dstate
.vmem_size
= memory_region_size(vmr
);
611 ct3d
->cxl_dstate
.mem_size
+= memory_region_size(vmr
);
615 if (ct3d
->hostpmem
) {
619 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
621 error_setg(errp
, "persistent memdev must have backing device");
624 memory_region_set_nonvolatile(pmr
, true);
625 memory_region_set_enabled(pmr
, true);
626 host_memory_backend_set_mapped(ct3d
->hostpmem
, true);
628 p_name
= g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds
->id
);
630 p_name
= g_strdup("cxl-type3-dpa-pmem-space");
632 address_space_init(&ct3d
->hostpmem_as
, pmr
, p_name
);
633 ct3d
->cxl_dstate
.pmem_size
= memory_region_size(pmr
);
634 ct3d
->cxl_dstate
.mem_size
+= memory_region_size(pmr
);
641 static DOEProtocol doe_cdat_prot
[] = {
642 { CXL_VENDOR_ID
, CXL_DOE_TABLE_ACCESS
, cxl_doe_cdat_rsp
},
646 static void ct3_realize(PCIDevice
*pci_dev
, Error
**errp
)
649 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
650 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
651 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
652 MemoryRegion
*mr
= ®s
->component_registers
;
653 uint8_t *pci_conf
= pci_dev
->config
;
654 unsigned short msix_num
= 6;
657 QTAILQ_INIT(&ct3d
->error_list
);
659 if (!cxl_setup_memory(ct3d
, errp
)) {
663 pci_config_set_prog_interface(pci_conf
, 0x10);
665 pcie_endpoint_cap_init(pci_dev
, 0x80);
666 if (ct3d
->sn
!= UI64_NULL
) {
667 pcie_dev_ser_num_init(pci_dev
, 0x100, ct3d
->sn
);
668 cxl_cstate
->dvsec_offset
= 0x100 + 0x0c;
670 cxl_cstate
->dvsec_offset
= 0x100;
673 ct3d
->cxl_cstate
.pdev
= pci_dev
;
676 regs
->special_ops
= g_new0(MemoryRegionOps
, 1);
677 regs
->special_ops
->write
= ct3d_reg_write
;
679 cxl_component_register_block_init(OBJECT(pci_dev
), cxl_cstate
,
683 pci_dev
, CXL_COMPONENT_REG_BAR_IDX
,
684 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
, mr
);
686 cxl_device_register_block_init(OBJECT(pci_dev
), &ct3d
->cxl_dstate
,
688 pci_register_bar(pci_dev
, CXL_DEVICE_REG_BAR_IDX
,
689 PCI_BASE_ADDRESS_SPACE_MEMORY
|
690 PCI_BASE_ADDRESS_MEM_TYPE_64
,
691 &ct3d
->cxl_dstate
.device_registers
);
693 /* MSI(-X) Initialization */
694 rc
= msix_init_exclusive_bar(pci_dev
, msix_num
, 4, NULL
);
696 goto err_address_space_free
;
698 for (i
= 0; i
< msix_num
; i
++) {
699 msix_vector_use(pci_dev
, i
);
702 /* DOE Initialization */
703 pcie_doe_init(pci_dev
, &ct3d
->doe_cdat
, 0x190, doe_cdat_prot
, true, 0);
705 cxl_cstate
->cdat
.build_cdat_table
= ct3_build_cdat_table
;
706 cxl_cstate
->cdat
.free_cdat_table
= ct3_free_cdat_table
;
707 cxl_cstate
->cdat
.private = ct3d
;
708 cxl_doe_cdat_init(cxl_cstate
, errp
);
710 goto err_free_special_ops
;
713 pcie_cap_deverr_init(pci_dev
);
714 /* Leave a bit of room for expansion */
715 rc
= pcie_aer_init(pci_dev
, PCI_ERR_VER
, 0x200, PCI_ERR_SIZEOF
, NULL
);
717 goto err_release_cdat
;
719 cxl_event_init(&ct3d
->cxl_dstate
, 2);
724 cxl_doe_cdat_release(cxl_cstate
);
725 err_free_special_ops
:
726 g_free(regs
->special_ops
);
727 err_address_space_free
:
728 if (ct3d
->hostpmem
) {
729 address_space_destroy(&ct3d
->hostpmem_as
);
731 if (ct3d
->hostvmem
) {
732 address_space_destroy(&ct3d
->hostvmem_as
);
737 static void ct3_exit(PCIDevice
*pci_dev
)
739 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
740 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
741 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
743 pcie_aer_exit(pci_dev
);
744 cxl_doe_cdat_release(cxl_cstate
);
745 g_free(regs
->special_ops
);
746 if (ct3d
->hostpmem
) {
747 address_space_destroy(&ct3d
->hostpmem_as
);
749 if (ct3d
->hostvmem
) {
750 address_space_destroy(&ct3d
->hostvmem_as
);
754 static bool cxl_type3_dpa(CXLType3Dev
*ct3d
, hwaddr host_addr
, uint64_t *dpa
)
756 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
757 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
758 unsigned int hdm_count
;
760 uint64_t dpa_base
= 0;
763 cap
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER_CAPABILITY
);
764 hdm_count
= cxl_decoder_count_dec(FIELD_EX32(cap
,
765 CXL_HDM_DECODER_CAPABILITY
,
768 for (i
= 0; i
< hdm_count
; i
++) {
769 uint64_t decoder_base
, decoder_size
, hpa_offset
, skip
;
770 uint32_t hdm_ctrl
, low
, high
;
773 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_BASE_LO
+ i
* hdm_inc
);
774 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_BASE_HI
+ i
* hdm_inc
);
775 decoder_base
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
777 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_SIZE_LO
+ i
* hdm_inc
);
778 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_SIZE_HI
+ i
* hdm_inc
);
779 decoder_size
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
781 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_DPA_SKIP_LO
+
783 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_DPA_SKIP_HI
+
785 skip
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
788 hpa_offset
= (uint64_t)host_addr
- decoder_base
;
790 hdm_ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ i
* hdm_inc
);
791 iw
= FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, IW
);
792 ig
= FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, IG
);
793 if (!FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
)) {
796 if (((uint64_t)host_addr
< decoder_base
) ||
797 (hpa_offset
>= decoder_size
)) {
798 int decoded_iw
= cxl_interleave_ways_dec(iw
, &error_fatal
);
800 if (decoded_iw
== 0) {
804 dpa_base
+= decoder_size
/ decoded_iw
;
809 ((MAKE_64BIT_MASK(0, 8 + ig
) & hpa_offset
) |
810 ((MAKE_64BIT_MASK(8 + ig
+ iw
, 64 - 8 - ig
- iw
) & hpa_offset
)
818 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev
*ct3d
,
822 uint64_t *dpa_offset
)
824 MemoryRegion
*vmr
= NULL
, *pmr
= NULL
;
826 if (ct3d
->hostvmem
) {
827 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
829 if (ct3d
->hostpmem
) {
830 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
837 if (!cxl_type3_dpa(ct3d
, host_addr
, dpa_offset
)) {
841 if (*dpa_offset
> ct3d
->cxl_dstate
.mem_size
) {
846 if (*dpa_offset
< memory_region_size(vmr
)) {
847 *as
= &ct3d
->hostvmem_as
;
849 *as
= &ct3d
->hostpmem_as
;
850 *dpa_offset
-= memory_region_size(vmr
);
853 *as
= &ct3d
->hostpmem_as
;
859 MemTxResult
cxl_type3_read(PCIDevice
*d
, hwaddr host_addr
, uint64_t *data
,
860 unsigned size
, MemTxAttrs attrs
)
862 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
863 uint64_t dpa_offset
= 0;
864 AddressSpace
*as
= NULL
;
867 res
= cxl_type3_hpa_to_as_and_dpa(ct3d
, host_addr
, size
,
873 if (sanitize_running(&ct3d
->cci
)) {
874 qemu_guest_getrandom_nofail(data
, size
);
878 return address_space_read(as
, dpa_offset
, attrs
, data
, size
);
881 MemTxResult
cxl_type3_write(PCIDevice
*d
, hwaddr host_addr
, uint64_t data
,
882 unsigned size
, MemTxAttrs attrs
)
884 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
885 uint64_t dpa_offset
= 0;
886 AddressSpace
*as
= NULL
;
889 res
= cxl_type3_hpa_to_as_and_dpa(ct3d
, host_addr
, size
,
895 if (sanitize_running(&ct3d
->cci
)) {
899 return address_space_write(as
, dpa_offset
, attrs
, &data
, size
);
902 static void ct3d_reset(DeviceState
*dev
)
904 CXLType3Dev
*ct3d
= CXL_TYPE3(dev
);
905 uint32_t *reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
906 uint32_t *write_msk
= ct3d
->cxl_cstate
.crb
.cache_mem_regs_write_mask
;
908 cxl_component_register_init_common(reg_state
, write_msk
, CXL2_TYPE3_DEVICE
);
909 cxl_device_register_init_t3(ct3d
);
912 * Bring up an endpoint to target with MCTP over VDM.
913 * This device is emulating an MLD with single LD for now.
915 cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d
->vdm_fm_owned_ld_mctp_cci
,
916 DEVICE(ct3d
), DEVICE(ct3d
),
917 512); /* Max payload made up */
918 cxl_initialize_t3_ld_cci(&ct3d
->ld0_cci
, DEVICE(ct3d
), DEVICE(ct3d
),
919 512); /* Max payload made up */
923 static Property ct3_props
[] = {
924 DEFINE_PROP_LINK("memdev", CXLType3Dev
, hostmem
, TYPE_MEMORY_BACKEND
,
925 HostMemoryBackend
*), /* for backward compatibility */
926 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev
, hostpmem
,
927 TYPE_MEMORY_BACKEND
, HostMemoryBackend
*),
928 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev
, hostvmem
,
929 TYPE_MEMORY_BACKEND
, HostMemoryBackend
*),
930 DEFINE_PROP_LINK("lsa", CXLType3Dev
, lsa
, TYPE_MEMORY_BACKEND
,
931 HostMemoryBackend
*),
932 DEFINE_PROP_UINT64("sn", CXLType3Dev
, sn
, UI64_NULL
),
933 DEFINE_PROP_STRING("cdat", CXLType3Dev
, cxl_cstate
.cdat
.filename
),
934 DEFINE_PROP_END_OF_LIST(),
937 static uint64_t get_lsa_size(CXLType3Dev
*ct3d
)
945 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
946 return memory_region_size(mr
);
949 static void validate_lsa_access(MemoryRegion
*mr
, uint64_t size
,
952 assert(offset
+ size
<= memory_region_size(mr
));
953 assert(offset
+ size
> offset
);
956 static uint64_t get_lsa(CXLType3Dev
*ct3d
, void *buf
, uint64_t size
,
966 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
967 validate_lsa_access(mr
, size
, offset
);
969 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
970 memcpy(buf
, lsa
, size
);
975 static void set_lsa(CXLType3Dev
*ct3d
, const void *buf
, uint64_t size
,
985 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
986 validate_lsa_access(mr
, size
, offset
);
988 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
989 memcpy(lsa
, buf
, size
);
990 memory_region_set_dirty(mr
, offset
, size
);
993 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
994 * updates will get lost.
998 static bool set_cacheline(CXLType3Dev
*ct3d
, uint64_t dpa_offset
, uint8_t *data
)
1000 MemoryRegion
*vmr
= NULL
, *pmr
= NULL
;
1003 if (ct3d
->hostvmem
) {
1004 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
1006 if (ct3d
->hostpmem
) {
1007 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
1014 if (dpa_offset
+ CXL_CACHE_LINE_SIZE
> ct3d
->cxl_dstate
.mem_size
) {
1019 if (dpa_offset
< memory_region_size(vmr
)) {
1020 as
= &ct3d
->hostvmem_as
;
1022 as
= &ct3d
->hostpmem_as
;
1023 dpa_offset
-= memory_region_size(vmr
);
1026 as
= &ct3d
->hostpmem_as
;
1029 address_space_write(as
, dpa_offset
, MEMTXATTRS_UNSPECIFIED
, &data
,
1030 CXL_CACHE_LINE_SIZE
);
1034 void cxl_set_poison_list_overflowed(CXLType3Dev
*ct3d
)
1036 ct3d
->poison_list_overflowed
= true;
1037 ct3d
->poison_list_overflow_ts
=
1038 cxl_device_get_timestamp(&ct3d
->cxl_dstate
);
1041 void qmp_cxl_inject_poison(const char *path
, uint64_t start
, uint64_t length
,
1044 Object
*obj
= object_resolve_path(path
, NULL
);
1049 error_setg(errp
, "Poison injection must be in multiples of 64 bytes");
1053 error_setg(errp
, "Poison start address must be 64 byte aligned");
1057 error_setg(errp
, "Unable to resolve path");
1060 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1061 error_setg(errp
, "Path does not point to a CXL type 3 device");
1065 ct3d
= CXL_TYPE3(obj
);
1067 QLIST_FOREACH(p
, &ct3d
->poison_list
, node
) {
1068 if (((start
>= p
->start
) && (start
< p
->start
+ p
->length
)) ||
1069 ((start
+ length
> p
->start
) &&
1070 (start
+ length
<= p
->start
+ p
->length
))) {
1072 "Overlap with existing poisoned region not supported");
1077 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1078 cxl_set_poison_list_overflowed(ct3d
);
1082 p
= g_new0(CXLPoison
, 1);
1085 /* Different from injected via the mbox */
1086 p
->type
= CXL_POISON_TYPE_INTERNAL
;
1088 QLIST_INSERT_HEAD(&ct3d
->poison_list
, p
, node
);
1089 ct3d
->poison_list_cnt
++;
1092 /* For uncorrectable errors include support for multiple header recording */
1093 void qmp_cxl_inject_uncorrectable_errors(const char *path
,
1094 CXLUncorErrorRecordList
*errors
,
1097 Object
*obj
= object_resolve_path(path
, NULL
);
1098 static PCIEAERErr err
= {};
1101 uint32_t *reg_state
;
1106 error_setg(errp
, "Unable to resolve path");
1110 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1111 error_setg(errp
, "Path does not point to a CXL type 3 device");
1115 err
.status
= PCI_ERR_UNC_INTN
;
1116 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
1119 ct3d
= CXL_TYPE3(obj
);
1121 first
= QTAILQ_EMPTY(&ct3d
->error_list
);
1122 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1124 uint32List
*header
= errors
->value
->header
;
1125 uint8_t header_count
= 0;
1128 cxl_err_code
= ct3d_qmp_uncor_err_to_cxl(errors
->value
->type
);
1129 if (cxl_err_code
< 0) {
1130 error_setg(errp
, "Unknown error code");
1134 /* If the error is masked, nothing to do here */
1135 if (!((1 << cxl_err_code
) &
1136 ~ldl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
))) {
1137 errors
= errors
->next
;
1141 cxl_err
= g_malloc0(sizeof(*cxl_err
));
1143 cxl_err
->type
= cxl_err_code
;
1144 while (header
&& header_count
< 32) {
1145 cxl_err
->header
[header_count
++] = header
->value
;
1146 header
= header
->next
;
1148 if (header_count
> 32) {
1149 error_setg(errp
, "Header must be 32 DWORD or less");
1152 QTAILQ_INSERT_TAIL(&ct3d
->error_list
, cxl_err
, node
);
1154 errors
= errors
->next
;
1157 if (first
&& !QTAILQ_EMPTY(&ct3d
->error_list
)) {
1158 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1159 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
1160 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
1163 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
1164 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
1165 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
1168 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
1169 FIRST_ERROR_POINTER
, cxl_err
->type
);
1170 stl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
1174 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
1175 unc_err
|= (1 << cxl_err
->type
);
1181 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, unc_err
);
1182 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
1187 void qmp_cxl_inject_correctable_error(const char *path
, CxlCorErrorType type
,
1190 static PCIEAERErr err
= {};
1191 Object
*obj
= object_resolve_path(path
, NULL
);
1193 uint32_t *reg_state
;
1198 error_setg(errp
, "Unable to resolve path");
1201 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1202 error_setg(errp
, "Path does not point to a CXL type 3 device");
1206 err
.status
= PCI_ERR_COR_INTERNAL
;
1207 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
1208 err
.flags
= PCIE_AER_ERR_IS_CORRECTABLE
;
1210 ct3d
= CXL_TYPE3(obj
);
1211 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1212 cor_err
= ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
);
1214 cxl_err_type
= ct3d_qmp_cor_err_to_cxl(type
);
1215 if (cxl_err_type
< 0) {
1216 error_setg(errp
, "Invalid COR error");
1219 /* If the error is masked, nothting to do here */
1220 if (!((1 << cxl_err_type
) &
1221 ~ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
))) {
1225 cor_err
|= (1 << cxl_err_type
);
1226 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, cor_err
);
1228 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
1231 static void cxl_assign_event_header(CXLEventRecordHdr
*hdr
,
1232 const QemuUUID
*uuid
, uint32_t flags
,
1233 uint8_t length
, uint64_t timestamp
)
1235 st24_le_p(&hdr
->flags
, flags
);
1236 hdr
->length
= length
;
1237 memcpy(&hdr
->id
, uuid
, sizeof(hdr
->id
));
1238 stq_le_p(&hdr
->timestamp
, timestamp
);
1241 static const QemuUUID gen_media_uuid
= {
1242 .data
= UUID(0xfbcd0a77, 0xc260, 0x417f,
1243 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1246 static const QemuUUID dram_uuid
= {
1247 .data
= UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1248 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1251 static const QemuUUID memory_module_uuid
= {
1252 .data
= UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1253 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1256 #define CXL_GMER_VALID_CHANNEL BIT(0)
1257 #define CXL_GMER_VALID_RANK BIT(1)
1258 #define CXL_GMER_VALID_DEVICE BIT(2)
1259 #define CXL_GMER_VALID_COMPONENT BIT(3)
1261 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log
)
1264 case CXL_EVENT_LOG_INFORMATIONAL
:
1265 return CXL_EVENT_TYPE_INFO
;
1266 case CXL_EVENT_LOG_WARNING
:
1267 return CXL_EVENT_TYPE_WARN
;
1268 case CXL_EVENT_LOG_FAILURE
:
1269 return CXL_EVENT_TYPE_FAIL
;
1270 case CXL_EVENT_LOG_FATAL
:
1271 return CXL_EVENT_TYPE_FATAL
;
1272 /* DCD not yet supported */
1277 /* Component ID is device specific. Define this as a string. */
1278 void qmp_cxl_inject_general_media_event(const char *path
, CxlEventLog log
,
1279 uint8_t flags
, uint64_t dpa
,
1280 uint8_t descriptor
, uint8_t type
,
1281 uint8_t transaction_type
,
1282 bool has_channel
, uint8_t channel
,
1283 bool has_rank
, uint8_t rank
,
1284 bool has_device
, uint32_t device
,
1285 const char *component_id
,
1288 Object
*obj
= object_resolve_path(path
, NULL
);
1289 CXLEventGenMedia gem
;
1290 CXLEventRecordHdr
*hdr
= &gem
.hdr
;
1291 CXLDeviceState
*cxlds
;
1293 uint16_t valid_flags
= 0;
1298 error_setg(errp
, "Unable to resolve path");
1301 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1302 error_setg(errp
, "Path does not point to a CXL type 3 device");
1305 ct3d
= CXL_TYPE3(obj
);
1306 cxlds
= &ct3d
->cxl_dstate
;
1308 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1310 error_setg(errp
, "Unhandled error log type");
1315 memset(&gem
, 0, sizeof(gem
));
1316 cxl_assign_event_header(hdr
, &gen_media_uuid
, flags
, sizeof(gem
),
1317 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1319 stq_le_p(&gem
.phys_addr
, dpa
);
1320 gem
.descriptor
= descriptor
;
1322 gem
.transaction_type
= transaction_type
;
1325 gem
.channel
= channel
;
1326 valid_flags
|= CXL_GMER_VALID_CHANNEL
;
1331 valid_flags
|= CXL_GMER_VALID_RANK
;
1335 st24_le_p(gem
.device
, device
);
1336 valid_flags
|= CXL_GMER_VALID_DEVICE
;
1340 strncpy((char *)gem
.component_id
, component_id
,
1341 sizeof(gem
.component_id
) - 1);
1342 valid_flags
|= CXL_GMER_VALID_COMPONENT
;
1345 stw_le_p(&gem
.validity_flags
, valid_flags
);
1347 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&gem
)) {
1348 cxl_event_irq_assert(ct3d
);
1352 #define CXL_DRAM_VALID_CHANNEL BIT(0)
1353 #define CXL_DRAM_VALID_RANK BIT(1)
1354 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2)
1355 #define CXL_DRAM_VALID_BANK_GROUP BIT(3)
1356 #define CXL_DRAM_VALID_BANK BIT(4)
1357 #define CXL_DRAM_VALID_ROW BIT(5)
1358 #define CXL_DRAM_VALID_COLUMN BIT(6)
1359 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7)
1361 void qmp_cxl_inject_dram_event(const char *path
, CxlEventLog log
, uint8_t flags
,
1362 uint64_t dpa
, uint8_t descriptor
,
1363 uint8_t type
, uint8_t transaction_type
,
1364 bool has_channel
, uint8_t channel
,
1365 bool has_rank
, uint8_t rank
,
1366 bool has_nibble_mask
, uint32_t nibble_mask
,
1367 bool has_bank_group
, uint8_t bank_group
,
1368 bool has_bank
, uint8_t bank
,
1369 bool has_row
, uint32_t row
,
1370 bool has_column
, uint16_t column
,
1371 bool has_correction_mask
,
1372 uint64List
*correction_mask
,
1375 Object
*obj
= object_resolve_path(path
, NULL
);
1377 CXLEventRecordHdr
*hdr
= &dram
.hdr
;
1378 CXLDeviceState
*cxlds
;
1380 uint16_t valid_flags
= 0;
1385 error_setg(errp
, "Unable to resolve path");
1388 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1389 error_setg(errp
, "Path does not point to a CXL type 3 device");
1392 ct3d
= CXL_TYPE3(obj
);
1393 cxlds
= &ct3d
->cxl_dstate
;
1395 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1397 error_setg(errp
, "Unhandled error log type");
1402 memset(&dram
, 0, sizeof(dram
));
1403 cxl_assign_event_header(hdr
, &dram_uuid
, flags
, sizeof(dram
),
1404 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1405 stq_le_p(&dram
.phys_addr
, dpa
);
1406 dram
.descriptor
= descriptor
;
1408 dram
.transaction_type
= transaction_type
;
1411 dram
.channel
= channel
;
1412 valid_flags
|= CXL_DRAM_VALID_CHANNEL
;
1417 valid_flags
|= CXL_DRAM_VALID_RANK
;
1420 if (has_nibble_mask
) {
1421 st24_le_p(dram
.nibble_mask
, nibble_mask
);
1422 valid_flags
|= CXL_DRAM_VALID_NIBBLE_MASK
;
1425 if (has_bank_group
) {
1426 dram
.bank_group
= bank_group
;
1427 valid_flags
|= CXL_DRAM_VALID_BANK_GROUP
;
1432 valid_flags
|= CXL_DRAM_VALID_BANK
;
1436 st24_le_p(dram
.row
, row
);
1437 valid_flags
|= CXL_DRAM_VALID_ROW
;
1441 stw_le_p(&dram
.column
, column
);
1442 valid_flags
|= CXL_DRAM_VALID_COLUMN
;
1445 if (has_correction_mask
) {
1447 while (correction_mask
&& count
< 4) {
1448 stq_le_p(&dram
.correction_mask
[count
],
1449 correction_mask
->value
);
1451 correction_mask
= correction_mask
->next
;
1453 valid_flags
|= CXL_DRAM_VALID_CORRECTION_MASK
;
1456 stw_le_p(&dram
.validity_flags
, valid_flags
);
1458 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&dram
)) {
1459 cxl_event_irq_assert(ct3d
);
1464 void qmp_cxl_inject_memory_module_event(const char *path
, CxlEventLog log
,
1465 uint8_t flags
, uint8_t type
,
1466 uint8_t health_status
,
1467 uint8_t media_status
,
1468 uint8_t additional_status
,
1470 int16_t temperature
,
1471 uint32_t dirty_shutdown_count
,
1472 uint32_t corrected_volatile_error_count
,
1473 uint32_t corrected_persist_error_count
,
1476 Object
*obj
= object_resolve_path(path
, NULL
);
1477 CXLEventMemoryModule module
;
1478 CXLEventRecordHdr
*hdr
= &module
.hdr
;
1479 CXLDeviceState
*cxlds
;
1485 error_setg(errp
, "Unable to resolve path");
1488 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1489 error_setg(errp
, "Path does not point to a CXL type 3 device");
1492 ct3d
= CXL_TYPE3(obj
);
1493 cxlds
= &ct3d
->cxl_dstate
;
1495 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1497 error_setg(errp
, "Unhandled error log type");
1502 memset(&module
, 0, sizeof(module
));
1503 cxl_assign_event_header(hdr
, &memory_module_uuid
, flags
, sizeof(module
),
1504 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1507 module
.health_status
= health_status
;
1508 module
.media_status
= media_status
;
1509 module
.additional_status
= additional_status
;
1510 module
.life_used
= life_used
;
1511 stw_le_p(&module
.temperature
, temperature
);
1512 stl_le_p(&module
.dirty_shutdown_count
, dirty_shutdown_count
);
1513 stl_le_p(&module
.corrected_volatile_error_count
,
1514 corrected_volatile_error_count
);
1515 stl_le_p(&module
.corrected_persistent_error_count
,
1516 corrected_persist_error_count
);
1518 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&module
)) {
1519 cxl_event_irq_assert(ct3d
);
1523 static void ct3_class_init(ObjectClass
*oc
, void *data
)
1525 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1526 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1527 CXLType3Class
*cvc
= CXL_TYPE3_CLASS(oc
);
1529 pc
->realize
= ct3_realize
;
1530 pc
->exit
= ct3_exit
;
1531 pc
->class_id
= PCI_CLASS_MEMORY_CXL
;
1532 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
1533 pc
->device_id
= 0xd93; /* LVF for now */
1536 pc
->config_write
= ct3d_config_write
;
1537 pc
->config_read
= ct3d_config_read
;
1539 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1540 dc
->desc
= "CXL Memory Device (Type 3)";
1541 dc
->reset
= ct3d_reset
;
1542 device_class_set_props(dc
, ct3_props
);
1544 cvc
->get_lsa_size
= get_lsa_size
;
1545 cvc
->get_lsa
= get_lsa
;
1546 cvc
->set_lsa
= set_lsa
;
1547 cvc
->set_cacheline
= set_cacheline
;
1550 static const TypeInfo ct3d_info
= {
1551 .name
= TYPE_CXL_TYPE3
,
1552 .parent
= TYPE_PCI_DEVICE
,
1553 .class_size
= sizeof(struct CXLType3Class
),
1554 .class_init
= ct3_class_init
,
1555 .instance_size
= sizeof(CXLType3Dev
),
1556 .interfaces
= (InterfaceInfo
[]) {
1557 { INTERFACE_CXL_DEVICE
},
1558 { INTERFACE_PCIE_DEVICE
},
1563 static void ct3d_registers(void)
1565 type_register_static(&ct3d_info
);
1568 type_init(ct3d_registers
);