2 * CXL Type 3 (memory expander) device
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
9 * SPDX-License-Identifier: GPL-v2-only
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
26 #include "qemu/guest-random.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "hw/cxl/cxl.h"
30 #include "hw/pci/msix.h"
34 /* Default CDAT entries for a memory region */
45 static void ct3_build_cdat_entries_for_mr(CDATSubHeader
**cdat_table
,
46 int dsmad_handle
, MemoryRegion
*mr
,
47 bool is_pmem
, uint64_t dpa_base
)
49 g_autofree CDATDsmas
*dsmas
= NULL
;
50 g_autofree CDATDslbis
*dslbis0
= NULL
;
51 g_autofree CDATDslbis
*dslbis1
= NULL
;
52 g_autofree CDATDslbis
*dslbis2
= NULL
;
53 g_autofree CDATDslbis
*dslbis3
= NULL
;
54 g_autofree CDATDsemts
*dsemts
= NULL
;
56 dsmas
= g_malloc(sizeof(*dsmas
));
57 *dsmas
= (CDATDsmas
) {
59 .type
= CDAT_TYPE_DSMAS
,
60 .length
= sizeof(*dsmas
),
62 .DSMADhandle
= dsmad_handle
,
63 .flags
= is_pmem
? CDAT_DSMAS_FLAG_NV
: 0,
65 .DPA_length
= memory_region_size(mr
),
68 /* For now, no memory side cache, plausiblish numbers */
69 dslbis0
= g_malloc(sizeof(*dslbis0
));
70 *dslbis0
= (CDATDslbis
) {
72 .type
= CDAT_TYPE_DSLBIS
,
73 .length
= sizeof(*dslbis0
),
75 .handle
= dsmad_handle
,
76 .flags
= HMAT_LB_MEM_MEMORY
,
77 .data_type
= HMAT_LB_DATA_READ_LATENCY
,
78 .entry_base_unit
= 10000, /* 10ns base */
79 .entry
[0] = 15, /* 150ns */
82 dslbis1
= g_malloc(sizeof(*dslbis1
));
83 *dslbis1
= (CDATDslbis
) {
85 .type
= CDAT_TYPE_DSLBIS
,
86 .length
= sizeof(*dslbis1
),
88 .handle
= dsmad_handle
,
89 .flags
= HMAT_LB_MEM_MEMORY
,
90 .data_type
= HMAT_LB_DATA_WRITE_LATENCY
,
91 .entry_base_unit
= 10000,
92 .entry
[0] = 25, /* 250ns */
95 dslbis2
= g_malloc(sizeof(*dslbis2
));
96 *dslbis2
= (CDATDslbis
) {
98 .type
= CDAT_TYPE_DSLBIS
,
99 .length
= sizeof(*dslbis2
),
101 .handle
= dsmad_handle
,
102 .flags
= HMAT_LB_MEM_MEMORY
,
103 .data_type
= HMAT_LB_DATA_READ_BANDWIDTH
,
104 .entry_base_unit
= 1000, /* GB/s */
108 dslbis3
= g_malloc(sizeof(*dslbis3
));
109 *dslbis3
= (CDATDslbis
) {
111 .type
= CDAT_TYPE_DSLBIS
,
112 .length
= sizeof(*dslbis3
),
114 .handle
= dsmad_handle
,
115 .flags
= HMAT_LB_MEM_MEMORY
,
116 .data_type
= HMAT_LB_DATA_WRITE_BANDWIDTH
,
117 .entry_base_unit
= 1000, /* GB/s */
121 dsemts
= g_malloc(sizeof(*dsemts
));
122 *dsemts
= (CDATDsemts
) {
124 .type
= CDAT_TYPE_DSEMTS
,
125 .length
= sizeof(*dsemts
),
127 .DSMAS_handle
= dsmad_handle
,
129 * NV: Reserved - the non volatile from DSMAS matters
132 .EFI_memory_type_attr
= is_pmem
? 2 : 1,
134 .DPA_length
= memory_region_size(mr
),
137 /* Header always at start of structure */
138 cdat_table
[CT3_CDAT_DSMAS
] = g_steal_pointer(&dsmas
);
139 cdat_table
[CT3_CDAT_DSLBIS0
] = g_steal_pointer(&dslbis0
);
140 cdat_table
[CT3_CDAT_DSLBIS1
] = g_steal_pointer(&dslbis1
);
141 cdat_table
[CT3_CDAT_DSLBIS2
] = g_steal_pointer(&dslbis2
);
142 cdat_table
[CT3_CDAT_DSLBIS3
] = g_steal_pointer(&dslbis3
);
143 cdat_table
[CT3_CDAT_DSEMTS
] = g_steal_pointer(&dsemts
);
146 static int ct3_build_cdat_table(CDATSubHeader
***cdat_table
, void *priv
)
148 g_autofree CDATSubHeader
**table
= NULL
;
149 CXLType3Dev
*ct3d
= priv
;
150 MemoryRegion
*volatile_mr
= NULL
, *nonvolatile_mr
= NULL
;
151 int dsmad_handle
= 0;
155 if (!ct3d
->hostpmem
&& !ct3d
->hostvmem
) {
159 if (ct3d
->hostvmem
) {
160 volatile_mr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
164 len
+= CT3_CDAT_NUM_ENTRIES
;
167 if (ct3d
->hostpmem
) {
168 nonvolatile_mr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
169 if (!nonvolatile_mr
) {
172 len
+= CT3_CDAT_NUM_ENTRIES
;
175 table
= g_malloc0(len
* sizeof(*table
));
177 /* Now fill them in */
179 ct3_build_cdat_entries_for_mr(table
, dsmad_handle
++, volatile_mr
,
181 cur_ent
= CT3_CDAT_NUM_ENTRIES
;
184 if (nonvolatile_mr
) {
185 uint64_t base
= volatile_mr
? memory_region_size(volatile_mr
) : 0;
186 ct3_build_cdat_entries_for_mr(&(table
[cur_ent
]), dsmad_handle
++,
187 nonvolatile_mr
, true, base
);
188 cur_ent
+= CT3_CDAT_NUM_ENTRIES
;
190 assert(len
== cur_ent
);
192 *cdat_table
= g_steal_pointer(&table
);
197 static void ct3_free_cdat_table(CDATSubHeader
**cdat_table
, int num
, void *priv
)
201 for (i
= 0; i
< num
; i
++) {
202 g_free(cdat_table
[i
]);
207 static bool cxl_doe_cdat_rsp(DOECap
*doe_cap
)
209 CDATObject
*cdat
= &CXL_TYPE3(doe_cap
->pdev
)->cxl_cstate
.cdat
;
213 CDATReq
*req
= pcie_doe_get_write_mbox_ptr(doe_cap
);
216 assert(cdat
->entry_len
);
218 /* Discard if request length mismatched */
219 if (pcie_doe_get_obj_len(req
) <
220 DIV_ROUND_UP(sizeof(CDATReq
), DWORD_BYTE
)) {
224 ent
= req
->entry_handle
;
225 base
= cdat
->entry
[ent
].base
;
226 len
= cdat
->entry
[ent
].length
;
230 .vendor_id
= CXL_VENDOR_ID
,
231 .data_obj_type
= CXL_DOE_TABLE_ACCESS
,
233 .length
= DIV_ROUND_UP((sizeof(rsp
) + len
), DWORD_BYTE
),
235 .rsp_code
= CXL_DOE_TAB_RSP
,
236 .table_type
= CXL_DOE_TAB_TYPE_CDAT
,
237 .entry_handle
= (ent
< cdat
->entry_len
- 1) ?
238 ent
+ 1 : CXL_DOE_TAB_ENT_MAX
,
241 memcpy(doe_cap
->read_mbox
, &rsp
, sizeof(rsp
));
242 memcpy(doe_cap
->read_mbox
+ DIV_ROUND_UP(sizeof(rsp
), DWORD_BYTE
),
245 doe_cap
->read_mbox_len
+= rsp
.header
.length
;
250 static uint32_t ct3d_config_read(PCIDevice
*pci_dev
, uint32_t addr
, int size
)
252 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
255 if (pcie_doe_read_config(&ct3d
->doe_cdat
, addr
, size
, &val
)) {
259 return pci_default_read_config(pci_dev
, addr
, size
);
262 static void ct3d_config_write(PCIDevice
*pci_dev
, uint32_t addr
, uint32_t val
,
265 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
267 pcie_doe_write_config(&ct3d
->doe_cdat
, addr
, val
, size
);
268 pci_default_write_config(pci_dev
, addr
, val
, size
);
269 pcie_aer_write_config(pci_dev
, addr
, val
, size
);
273 * Null value of all Fs suggested by IEEE RA guidelines for use of
276 #define UI64_NULL ~(0ULL)
278 static void build_dvsecs(CXLType3Dev
*ct3d
)
280 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
282 uint32_t range1_size_hi
, range1_size_lo
,
283 range1_base_hi
= 0, range1_base_lo
= 0,
284 range2_size_hi
= 0, range2_size_lo
= 0,
285 range2_base_hi
= 0, range2_base_lo
= 0;
288 * Volatile memory is mapped as (0x0)
289 * Persistent memory is mapped at (volatile->size)
291 if (ct3d
->hostvmem
) {
292 range1_size_hi
= ct3d
->hostvmem
->size
>> 32;
293 range1_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
294 (ct3d
->hostvmem
->size
& 0xF0000000);
295 if (ct3d
->hostpmem
) {
296 range2_size_hi
= ct3d
->hostpmem
->size
>> 32;
297 range2_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
298 (ct3d
->hostpmem
->size
& 0xF0000000);
301 range1_size_hi
= ct3d
->hostpmem
->size
>> 32;
302 range1_size_lo
= (2 << 5) | (2 << 2) | 0x3 |
303 (ct3d
->hostpmem
->size
& 0xF0000000);
306 dvsec
= (uint8_t *)&(CXLDVSECDevice
){
310 .range1_size_hi
= range1_size_hi
,
311 .range1_size_lo
= range1_size_lo
,
312 .range1_base_hi
= range1_base_hi
,
313 .range1_base_lo
= range1_base_lo
,
314 .range2_size_hi
= range2_size_hi
,
315 .range2_size_lo
= range2_size_lo
,
316 .range2_base_hi
= range2_base_hi
,
317 .range2_base_lo
= range2_base_lo
,
319 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
320 PCIE_CXL_DEVICE_DVSEC_LENGTH
,
321 PCIE_CXL_DEVICE_DVSEC
,
322 PCIE_CXL31_DEVICE_DVSEC_REVID
, dvsec
);
324 dvsec
= (uint8_t *)&(CXLDVSECRegisterLocator
){
326 .reg0_base_lo
= RBI_COMPONENT_REG
| CXL_COMPONENT_REG_BAR_IDX
,
328 .reg1_base_lo
= RBI_CXL_DEVICE_REG
| CXL_DEVICE_REG_BAR_IDX
,
331 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
332 REG_LOC_DVSEC_LENGTH
, REG_LOC_DVSEC
,
333 REG_LOC_DVSEC_REVID
, dvsec
);
334 dvsec
= (uint8_t *)&(CXLDVSECDeviceGPF
){
335 .phase2_duration
= 0x603, /* 3 seconds */
336 .phase2_power
= 0x33, /* 0x33 miliwatts */
338 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
339 GPF_DEVICE_DVSEC_LENGTH
, GPF_DEVICE_DVSEC
,
340 GPF_DEVICE_DVSEC_REVID
, dvsec
);
342 dvsec
= (uint8_t *)&(CXLDVSECPortFlexBus
){
343 .cap
= 0x26, /* 68B, IO, Mem, non-MLD */
344 .ctrl
= 0x02, /* IO always enabled */
345 .status
= 0x26, /* same as capabilities */
346 .rcvd_mod_ts_data_phase1
= 0xef, /* WTF? */
348 cxl_component_create_dvsec(cxl_cstate
, CXL2_TYPE3_DEVICE
,
349 PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH
,
350 PCIE_FLEXBUS_PORT_DVSEC
,
351 PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID
, dvsec
);
354 static void hdm_decoder_commit(CXLType3Dev
*ct3d
, int which
)
356 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
357 ComponentRegisters
*cregs
= &ct3d
->cxl_cstate
.crb
;
358 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
361 ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
);
362 /* TODO: Sanity checks that the decoder is possible */
363 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
364 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
366 stl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
, ctrl
);
369 static void hdm_decoder_uncommit(CXLType3Dev
*ct3d
, int which
)
371 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
372 ComponentRegisters
*cregs
= &ct3d
->cxl_cstate
.crb
;
373 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
376 ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
);
378 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
379 ctrl
= FIELD_DP32(ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 0);
381 stl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ which
* hdm_inc
, ctrl
);
384 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err
)
387 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY
:
388 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY
;
389 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY
:
390 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY
;
391 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY
:
392 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY
;
393 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC
:
394 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC
;
395 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY
:
396 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY
;
397 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY
:
398 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY
;
399 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY
:
400 return CXL_RAS_UNC_ERR_MEM_BE_PARITY
;
401 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC
:
402 return CXL_RAS_UNC_ERR_MEM_DATA_ECC
;
403 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD
:
404 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD
;
405 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING
:
406 return CXL_RAS_UNC_ERR_RSVD_ENCODING
;
407 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED
:
408 return CXL_RAS_UNC_ERR_POISON_RECEIVED
;
409 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW
:
410 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW
;
411 case CXL_UNCOR_ERROR_TYPE_INTERNAL
:
412 return CXL_RAS_UNC_ERR_INTERNAL
;
413 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX
:
414 return CXL_RAS_UNC_ERR_CXL_IDE_TX
;
415 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX
:
416 return CXL_RAS_UNC_ERR_CXL_IDE_RX
;
422 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err
)
425 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC
:
426 return CXL_RAS_COR_ERR_CACHE_DATA_ECC
;
427 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC
:
428 return CXL_RAS_COR_ERR_MEM_DATA_ECC
;
429 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD
:
430 return CXL_RAS_COR_ERR_CRC_THRESHOLD
;
431 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD
:
432 return CXL_RAS_COR_ERR_RETRY_THRESHOLD
;
433 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED
:
434 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED
;
435 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED
:
436 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED
;
437 case CXL_COR_ERROR_TYPE_PHYSICAL
:
438 return CXL_RAS_COR_ERR_PHYSICAL
;
444 static void ct3d_reg_write(void *opaque
, hwaddr offset
, uint64_t value
,
447 CXLComponentState
*cxl_cstate
= opaque
;
448 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
449 CXLType3Dev
*ct3d
= container_of(cxl_cstate
, CXLType3Dev
, cxl_cstate
);
450 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
451 bool should_commit
= false;
452 bool should_uncommit
= false;
456 g_assert(offset
< CXL2_COMPONENT_CM_REGION_SIZE
);
459 case A_CXL_HDM_DECODER0_CTRL
:
460 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
461 should_uncommit
= !should_commit
;
464 case A_CXL_HDM_DECODER1_CTRL
:
465 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
466 should_uncommit
= !should_commit
;
469 case A_CXL_HDM_DECODER2_CTRL
:
470 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
471 should_uncommit
= !should_commit
;
474 case A_CXL_HDM_DECODER3_CTRL
:
475 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
476 should_uncommit
= !should_commit
;
479 case A_CXL_RAS_UNC_ERR_STATUS
:
481 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
482 uint32_t fe
= FIELD_EX32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
483 FIRST_ERROR_POINTER
);
488 * If single bit written that corresponds to the first error
489 * pointer being cleared, update the status and header log.
491 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
492 if ((1 << fe
) ^ value
) {
495 * Software is using wrong flow for multiple header recording
496 * Following behavior in PCIe r6.0 and assuming multiple
497 * header support. Implementation defined choice to clear all
498 * matching records if more than one bit set - which corresponds
499 * closest to behavior of hardware not capable of multiple
502 QTAILQ_FOREACH_SAFE(cxl_err
, &ct3d
->error_list
, node
,
504 if ((1 << cxl_err
->type
) & value
) {
505 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
510 /* Done with previous FE, so drop from list */
511 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
512 QTAILQ_REMOVE(&ct3d
->error_list
, cxl_err
, node
);
517 * If there is another FE, then put that in place and update
520 if (!QTAILQ_EMPTY(&ct3d
->error_list
)) {
521 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
524 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
525 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
526 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
528 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
529 FIRST_ERROR_POINTER
, cxl_err
->type
);
532 * If no more errors, then follow recommendation of PCI spec
533 * r6.0 6.2.4.2 to set the first error pointer to a status
534 * bit that will never be used.
536 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
538 CXL_RAS_UNC_ERR_CXL_UNUSED
);
540 stl_le_p((uint8_t *)cache_mem
+ A_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
543 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
544 unc_err
|= 1 << cxl_err
->type
;
546 stl_le_p((uint8_t *)cache_mem
+ offset
, unc_err
);
550 case A_CXL_RAS_COR_ERR_STATUS
:
552 uint32_t rw1c
= value
;
553 uint32_t temp
= ldl_le_p((uint8_t *)cache_mem
+ offset
);
555 stl_le_p((uint8_t *)cache_mem
+ offset
, temp
);
562 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
564 hdm_decoder_commit(ct3d
, which_hdm
);
565 } else if (should_uncommit
) {
566 hdm_decoder_uncommit(ct3d
, which_hdm
);
570 static bool cxl_setup_memory(CXLType3Dev
*ct3d
, Error
**errp
)
572 DeviceState
*ds
= DEVICE(ct3d
);
574 if (!ct3d
->hostmem
&& !ct3d
->hostvmem
&& !ct3d
->hostpmem
) {
575 error_setg(errp
, "at least one memdev property must be set");
577 } else if (ct3d
->hostmem
&& ct3d
->hostpmem
) {
578 error_setg(errp
, "[memdev] cannot be used with new "
579 "[persistent-memdev] property");
581 } else if (ct3d
->hostmem
) {
582 /* Use of hostmem property implies pmem */
583 ct3d
->hostpmem
= ct3d
->hostmem
;
584 ct3d
->hostmem
= NULL
;
587 if (ct3d
->hostpmem
&& !ct3d
->lsa
) {
588 error_setg(errp
, "lsa property must be set for persistent devices");
592 if (ct3d
->hostvmem
) {
596 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
598 error_setg(errp
, "volatile memdev must have backing device");
601 memory_region_set_nonvolatile(vmr
, false);
602 memory_region_set_enabled(vmr
, true);
603 host_memory_backend_set_mapped(ct3d
->hostvmem
, true);
605 v_name
= g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds
->id
);
607 v_name
= g_strdup("cxl-type3-dpa-vmem-space");
609 address_space_init(&ct3d
->hostvmem_as
, vmr
, v_name
);
610 ct3d
->cxl_dstate
.vmem_size
= memory_region_size(vmr
);
611 ct3d
->cxl_dstate
.mem_size
+= memory_region_size(vmr
);
615 if (ct3d
->hostpmem
) {
619 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
621 error_setg(errp
, "persistent memdev must have backing device");
624 memory_region_set_nonvolatile(pmr
, true);
625 memory_region_set_enabled(pmr
, true);
626 host_memory_backend_set_mapped(ct3d
->hostpmem
, true);
628 p_name
= g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds
->id
);
630 p_name
= g_strdup("cxl-type3-dpa-pmem-space");
632 address_space_init(&ct3d
->hostpmem_as
, pmr
, p_name
);
633 ct3d
->cxl_dstate
.pmem_size
= memory_region_size(pmr
);
634 ct3d
->cxl_dstate
.mem_size
+= memory_region_size(pmr
);
641 static DOEProtocol doe_cdat_prot
[] = {
642 { CXL_VENDOR_ID
, CXL_DOE_TABLE_ACCESS
, cxl_doe_cdat_rsp
},
646 static void ct3_realize(PCIDevice
*pci_dev
, Error
**errp
)
648 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
649 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
650 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
651 MemoryRegion
*mr
= ®s
->component_registers
;
652 uint8_t *pci_conf
= pci_dev
->config
;
653 unsigned short msix_num
= 6;
656 QTAILQ_INIT(&ct3d
->error_list
);
658 if (!cxl_setup_memory(ct3d
, errp
)) {
662 pci_config_set_prog_interface(pci_conf
, 0x10);
664 pcie_endpoint_cap_init(pci_dev
, 0x80);
665 if (ct3d
->sn
!= UI64_NULL
) {
666 pcie_dev_ser_num_init(pci_dev
, 0x100, ct3d
->sn
);
667 cxl_cstate
->dvsec_offset
= 0x100 + 0x0c;
669 cxl_cstate
->dvsec_offset
= 0x100;
672 ct3d
->cxl_cstate
.pdev
= pci_dev
;
675 regs
->special_ops
= g_new0(MemoryRegionOps
, 1);
676 regs
->special_ops
->write
= ct3d_reg_write
;
678 cxl_component_register_block_init(OBJECT(pci_dev
), cxl_cstate
,
682 pci_dev
, CXL_COMPONENT_REG_BAR_IDX
,
683 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
, mr
);
685 cxl_device_register_block_init(OBJECT(pci_dev
), &ct3d
->cxl_dstate
,
687 pci_register_bar(pci_dev
, CXL_DEVICE_REG_BAR_IDX
,
688 PCI_BASE_ADDRESS_SPACE_MEMORY
|
689 PCI_BASE_ADDRESS_MEM_TYPE_64
,
690 &ct3d
->cxl_dstate
.device_registers
);
692 /* MSI(-X) Initialization */
693 rc
= msix_init_exclusive_bar(pci_dev
, msix_num
, 4, NULL
);
695 goto err_address_space_free
;
697 for (i
= 0; i
< msix_num
; i
++) {
698 msix_vector_use(pci_dev
, i
);
701 /* DOE Initialization */
702 pcie_doe_init(pci_dev
, &ct3d
->doe_cdat
, 0x190, doe_cdat_prot
, true, 0);
704 cxl_cstate
->cdat
.build_cdat_table
= ct3_build_cdat_table
;
705 cxl_cstate
->cdat
.free_cdat_table
= ct3_free_cdat_table
;
706 cxl_cstate
->cdat
.private = ct3d
;
707 cxl_doe_cdat_init(cxl_cstate
, errp
);
709 goto err_free_special_ops
;
712 pcie_cap_deverr_init(pci_dev
);
713 /* Leave a bit of room for expansion */
714 rc
= pcie_aer_init(pci_dev
, PCI_ERR_VER
, 0x200, PCI_ERR_SIZEOF
, NULL
);
716 goto err_release_cdat
;
718 cxl_event_init(&ct3d
->cxl_dstate
, 2);
723 cxl_doe_cdat_release(cxl_cstate
);
724 err_free_special_ops
:
725 g_free(regs
->special_ops
);
726 err_address_space_free
:
727 if (ct3d
->hostpmem
) {
728 address_space_destroy(&ct3d
->hostpmem_as
);
730 if (ct3d
->hostvmem
) {
731 address_space_destroy(&ct3d
->hostvmem_as
);
736 static void ct3_exit(PCIDevice
*pci_dev
)
738 CXLType3Dev
*ct3d
= CXL_TYPE3(pci_dev
);
739 CXLComponentState
*cxl_cstate
= &ct3d
->cxl_cstate
;
740 ComponentRegisters
*regs
= &cxl_cstate
->crb
;
742 pcie_aer_exit(pci_dev
);
743 cxl_doe_cdat_release(cxl_cstate
);
744 g_free(regs
->special_ops
);
745 if (ct3d
->hostpmem
) {
746 address_space_destroy(&ct3d
->hostpmem_as
);
748 if (ct3d
->hostvmem
) {
749 address_space_destroy(&ct3d
->hostvmem_as
);
753 static bool cxl_type3_dpa(CXLType3Dev
*ct3d
, hwaddr host_addr
, uint64_t *dpa
)
755 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
756 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
757 unsigned int hdm_count
;
759 uint64_t dpa_base
= 0;
762 cap
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER_CAPABILITY
);
763 hdm_count
= cxl_decoder_count_dec(FIELD_EX32(cap
,
764 CXL_HDM_DECODER_CAPABILITY
,
767 for (i
= 0; i
< hdm_count
; i
++) {
768 uint64_t decoder_base
, decoder_size
, hpa_offset
, skip
;
769 uint32_t hdm_ctrl
, low
, high
;
772 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_BASE_LO
+ i
* hdm_inc
);
773 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_BASE_HI
+ i
* hdm_inc
);
774 decoder_base
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
776 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_SIZE_LO
+ i
* hdm_inc
);
777 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_SIZE_HI
+ i
* hdm_inc
);
778 decoder_size
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
780 low
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_DPA_SKIP_LO
+
782 high
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_DPA_SKIP_HI
+
784 skip
= ((uint64_t)high
<< 32) | (low
& 0xf0000000);
787 hpa_offset
= (uint64_t)host_addr
- decoder_base
;
789 hdm_ctrl
= ldl_le_p(cache_mem
+ R_CXL_HDM_DECODER0_CTRL
+ i
* hdm_inc
);
790 iw
= FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, IW
);
791 ig
= FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, IG
);
792 if (!FIELD_EX32(hdm_ctrl
, CXL_HDM_DECODER0_CTRL
, COMMITTED
)) {
795 if (((uint64_t)host_addr
< decoder_base
) ||
796 (hpa_offset
>= decoder_size
)) {
797 int decoded_iw
= cxl_interleave_ways_dec(iw
, &error_fatal
);
799 if (decoded_iw
== 0) {
803 dpa_base
+= decoder_size
/ decoded_iw
;
808 ((MAKE_64BIT_MASK(0, 8 + ig
) & hpa_offset
) |
809 ((MAKE_64BIT_MASK(8 + ig
+ iw
, 64 - 8 - ig
- iw
) & hpa_offset
)
817 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev
*ct3d
,
821 uint64_t *dpa_offset
)
823 MemoryRegion
*vmr
= NULL
, *pmr
= NULL
;
825 if (ct3d
->hostvmem
) {
826 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
828 if (ct3d
->hostpmem
) {
829 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
836 if (!cxl_type3_dpa(ct3d
, host_addr
, dpa_offset
)) {
840 if (*dpa_offset
> ct3d
->cxl_dstate
.mem_size
) {
845 if (*dpa_offset
< memory_region_size(vmr
)) {
846 *as
= &ct3d
->hostvmem_as
;
848 *as
= &ct3d
->hostpmem_as
;
849 *dpa_offset
-= memory_region_size(vmr
);
852 *as
= &ct3d
->hostpmem_as
;
858 MemTxResult
cxl_type3_read(PCIDevice
*d
, hwaddr host_addr
, uint64_t *data
,
859 unsigned size
, MemTxAttrs attrs
)
861 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
862 uint64_t dpa_offset
= 0;
863 AddressSpace
*as
= NULL
;
866 res
= cxl_type3_hpa_to_as_and_dpa(ct3d
, host_addr
, size
,
872 if (sanitize_running(&ct3d
->cci
)) {
873 qemu_guest_getrandom_nofail(data
, size
);
877 return address_space_read(as
, dpa_offset
, attrs
, data
, size
);
880 MemTxResult
cxl_type3_write(PCIDevice
*d
, hwaddr host_addr
, uint64_t data
,
881 unsigned size
, MemTxAttrs attrs
)
883 CXLType3Dev
*ct3d
= CXL_TYPE3(d
);
884 uint64_t dpa_offset
= 0;
885 AddressSpace
*as
= NULL
;
888 res
= cxl_type3_hpa_to_as_and_dpa(ct3d
, host_addr
, size
,
894 if (sanitize_running(&ct3d
->cci
)) {
898 return address_space_write(as
, dpa_offset
, attrs
, &data
, size
);
901 static void ct3d_reset(DeviceState
*dev
)
903 CXLType3Dev
*ct3d
= CXL_TYPE3(dev
);
904 uint32_t *reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
905 uint32_t *write_msk
= ct3d
->cxl_cstate
.crb
.cache_mem_regs_write_mask
;
907 cxl_component_register_init_common(reg_state
, write_msk
, CXL2_TYPE3_DEVICE
);
908 cxl_device_register_init_t3(ct3d
);
911 * Bring up an endpoint to target with MCTP over VDM.
912 * This device is emulating an MLD with single LD for now.
914 cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d
->vdm_fm_owned_ld_mctp_cci
,
915 DEVICE(ct3d
), DEVICE(ct3d
),
916 512); /* Max payload made up */
917 cxl_initialize_t3_ld_cci(&ct3d
->ld0_cci
, DEVICE(ct3d
), DEVICE(ct3d
),
918 512); /* Max payload made up */
922 static Property ct3_props
[] = {
923 DEFINE_PROP_LINK("memdev", CXLType3Dev
, hostmem
, TYPE_MEMORY_BACKEND
,
924 HostMemoryBackend
*), /* for backward compatibility */
925 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev
, hostpmem
,
926 TYPE_MEMORY_BACKEND
, HostMemoryBackend
*),
927 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev
, hostvmem
,
928 TYPE_MEMORY_BACKEND
, HostMemoryBackend
*),
929 DEFINE_PROP_LINK("lsa", CXLType3Dev
, lsa
, TYPE_MEMORY_BACKEND
,
930 HostMemoryBackend
*),
931 DEFINE_PROP_UINT64("sn", CXLType3Dev
, sn
, UI64_NULL
),
932 DEFINE_PROP_STRING("cdat", CXLType3Dev
, cxl_cstate
.cdat
.filename
),
933 DEFINE_PROP_END_OF_LIST(),
936 static uint64_t get_lsa_size(CXLType3Dev
*ct3d
)
944 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
945 return memory_region_size(mr
);
948 static void validate_lsa_access(MemoryRegion
*mr
, uint64_t size
,
951 assert(offset
+ size
<= memory_region_size(mr
));
952 assert(offset
+ size
> offset
);
955 static uint64_t get_lsa(CXLType3Dev
*ct3d
, void *buf
, uint64_t size
,
965 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
966 validate_lsa_access(mr
, size
, offset
);
968 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
969 memcpy(buf
, lsa
, size
);
974 static void set_lsa(CXLType3Dev
*ct3d
, const void *buf
, uint64_t size
,
984 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
985 validate_lsa_access(mr
, size
, offset
);
987 lsa
= memory_region_get_ram_ptr(mr
) + offset
;
988 memcpy(lsa
, buf
, size
);
989 memory_region_set_dirty(mr
, offset
, size
);
992 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
993 * updates will get lost.
997 static bool set_cacheline(CXLType3Dev
*ct3d
, uint64_t dpa_offset
, uint8_t *data
)
999 MemoryRegion
*vmr
= NULL
, *pmr
= NULL
;
1002 if (ct3d
->hostvmem
) {
1003 vmr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
1005 if (ct3d
->hostpmem
) {
1006 pmr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
1013 if (dpa_offset
+ CXL_CACHE_LINE_SIZE
> ct3d
->cxl_dstate
.mem_size
) {
1018 if (dpa_offset
< memory_region_size(vmr
)) {
1019 as
= &ct3d
->hostvmem_as
;
1021 as
= &ct3d
->hostpmem_as
;
1022 dpa_offset
-= memory_region_size(vmr
);
1025 as
= &ct3d
->hostpmem_as
;
1028 address_space_write(as
, dpa_offset
, MEMTXATTRS_UNSPECIFIED
, &data
,
1029 CXL_CACHE_LINE_SIZE
);
1033 void cxl_set_poison_list_overflowed(CXLType3Dev
*ct3d
)
1035 ct3d
->poison_list_overflowed
= true;
1036 ct3d
->poison_list_overflow_ts
=
1037 cxl_device_get_timestamp(&ct3d
->cxl_dstate
);
1040 void qmp_cxl_inject_poison(const char *path
, uint64_t start
, uint64_t length
,
1043 Object
*obj
= object_resolve_path(path
, NULL
);
1048 error_setg(errp
, "Poison injection must be in multiples of 64 bytes");
1052 error_setg(errp
, "Poison start address must be 64 byte aligned");
1056 error_setg(errp
, "Unable to resolve path");
1059 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1060 error_setg(errp
, "Path does not point to a CXL type 3 device");
1064 ct3d
= CXL_TYPE3(obj
);
1066 QLIST_FOREACH(p
, &ct3d
->poison_list
, node
) {
1067 if (((start
>= p
->start
) && (start
< p
->start
+ p
->length
)) ||
1068 ((start
+ length
> p
->start
) &&
1069 (start
+ length
<= p
->start
+ p
->length
))) {
1071 "Overlap with existing poisoned region not supported");
1076 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1077 cxl_set_poison_list_overflowed(ct3d
);
1081 p
= g_new0(CXLPoison
, 1);
1084 /* Different from injected via the mbox */
1085 p
->type
= CXL_POISON_TYPE_INTERNAL
;
1087 QLIST_INSERT_HEAD(&ct3d
->poison_list
, p
, node
);
1088 ct3d
->poison_list_cnt
++;
1091 /* For uncorrectable errors include support for multiple header recording */
1092 void qmp_cxl_inject_uncorrectable_errors(const char *path
,
1093 CXLUncorErrorRecordList
*errors
,
1096 Object
*obj
= object_resolve_path(path
, NULL
);
1097 static PCIEAERErr err
= {};
1100 uint32_t *reg_state
;
1105 error_setg(errp
, "Unable to resolve path");
1109 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1110 error_setg(errp
, "Path does not point to a CXL type 3 device");
1114 err
.status
= PCI_ERR_UNC_INTN
;
1115 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
1118 ct3d
= CXL_TYPE3(obj
);
1120 first
= QTAILQ_EMPTY(&ct3d
->error_list
);
1121 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1123 uint32List
*header
= errors
->value
->header
;
1124 uint8_t header_count
= 0;
1127 cxl_err_code
= ct3d_qmp_uncor_err_to_cxl(errors
->value
->type
);
1128 if (cxl_err_code
< 0) {
1129 error_setg(errp
, "Unknown error code");
1133 /* If the error is masked, nothing to do here */
1134 if (!((1 << cxl_err_code
) &
1135 ~ldl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
))) {
1136 errors
= errors
->next
;
1140 cxl_err
= g_malloc0(sizeof(*cxl_err
));
1142 cxl_err
->type
= cxl_err_code
;
1143 while (header
&& header_count
< 32) {
1144 cxl_err
->header
[header_count
++] = header
->value
;
1145 header
= header
->next
;
1147 if (header_count
> 32) {
1148 error_setg(errp
, "Header must be 32 DWORD or less");
1151 QTAILQ_INSERT_TAIL(&ct3d
->error_list
, cxl_err
, node
);
1153 errors
= errors
->next
;
1156 if (first
&& !QTAILQ_EMPTY(&ct3d
->error_list
)) {
1157 uint32_t *cache_mem
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1158 uint32_t capctrl
= ldl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
);
1159 uint32_t *header_log
= &cache_mem
[R_CXL_RAS_ERR_HEADER0
];
1162 cxl_err
= QTAILQ_FIRST(&ct3d
->error_list
);
1163 for (i
= 0; i
< CXL_RAS_ERR_HEADER_NUM
; i
++) {
1164 stl_le_p(header_log
+ i
, cxl_err
->header
[i
]);
1167 capctrl
= FIELD_DP32(capctrl
, CXL_RAS_ERR_CAP_CTRL
,
1168 FIRST_ERROR_POINTER
, cxl_err
->type
);
1169 stl_le_p(cache_mem
+ R_CXL_RAS_ERR_CAP_CTRL
, capctrl
);
1173 QTAILQ_FOREACH(cxl_err
, &ct3d
->error_list
, node
) {
1174 unc_err
|= (1 << cxl_err
->type
);
1180 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, unc_err
);
1181 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
1186 void qmp_cxl_inject_correctable_error(const char *path
, CxlCorErrorType type
,
1189 static PCIEAERErr err
= {};
1190 Object
*obj
= object_resolve_path(path
, NULL
);
1192 uint32_t *reg_state
;
1197 error_setg(errp
, "Unable to resolve path");
1200 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1201 error_setg(errp
, "Path does not point to a CXL type 3 device");
1205 err
.status
= PCI_ERR_COR_INTERNAL
;
1206 err
.source_id
= pci_requester_id(PCI_DEVICE(obj
));
1207 err
.flags
= PCIE_AER_ERR_IS_CORRECTABLE
;
1209 ct3d
= CXL_TYPE3(obj
);
1210 reg_state
= ct3d
->cxl_cstate
.crb
.cache_mem_registers
;
1211 cor_err
= ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
);
1213 cxl_err_type
= ct3d_qmp_cor_err_to_cxl(type
);
1214 if (cxl_err_type
< 0) {
1215 error_setg(errp
, "Invalid COR error");
1218 /* If the error is masked, nothting to do here */
1219 if (!((1 << cxl_err_type
) &
1220 ~ldl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
))) {
1224 cor_err
|= (1 << cxl_err_type
);
1225 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, cor_err
);
1227 pcie_aer_inject_error(PCI_DEVICE(obj
), &err
);
1230 static void cxl_assign_event_header(CXLEventRecordHdr
*hdr
,
1231 const QemuUUID
*uuid
, uint32_t flags
,
1232 uint8_t length
, uint64_t timestamp
)
1234 st24_le_p(&hdr
->flags
, flags
);
1235 hdr
->length
= length
;
1236 memcpy(&hdr
->id
, uuid
, sizeof(hdr
->id
));
1237 stq_le_p(&hdr
->timestamp
, timestamp
);
1240 static const QemuUUID gen_media_uuid
= {
1241 .data
= UUID(0xfbcd0a77, 0xc260, 0x417f,
1242 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1245 static const QemuUUID dram_uuid
= {
1246 .data
= UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1247 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1250 static const QemuUUID memory_module_uuid
= {
1251 .data
= UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1252 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1255 #define CXL_GMER_VALID_CHANNEL BIT(0)
1256 #define CXL_GMER_VALID_RANK BIT(1)
1257 #define CXL_GMER_VALID_DEVICE BIT(2)
1258 #define CXL_GMER_VALID_COMPONENT BIT(3)
1260 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log
)
1263 case CXL_EVENT_LOG_INFORMATIONAL
:
1264 return CXL_EVENT_TYPE_INFO
;
1265 case CXL_EVENT_LOG_WARNING
:
1266 return CXL_EVENT_TYPE_WARN
;
1267 case CXL_EVENT_LOG_FAILURE
:
1268 return CXL_EVENT_TYPE_FAIL
;
1269 case CXL_EVENT_LOG_FATAL
:
1270 return CXL_EVENT_TYPE_FATAL
;
1271 /* DCD not yet supported */
1276 /* Component ID is device specific. Define this as a string. */
1277 void qmp_cxl_inject_general_media_event(const char *path
, CxlEventLog log
,
1278 uint8_t flags
, uint64_t dpa
,
1279 uint8_t descriptor
, uint8_t type
,
1280 uint8_t transaction_type
,
1281 bool has_channel
, uint8_t channel
,
1282 bool has_rank
, uint8_t rank
,
1283 bool has_device
, uint32_t device
,
1284 const char *component_id
,
1287 Object
*obj
= object_resolve_path(path
, NULL
);
1288 CXLEventGenMedia gem
;
1289 CXLEventRecordHdr
*hdr
= &gem
.hdr
;
1290 CXLDeviceState
*cxlds
;
1292 uint16_t valid_flags
= 0;
1297 error_setg(errp
, "Unable to resolve path");
1300 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1301 error_setg(errp
, "Path does not point to a CXL type 3 device");
1304 ct3d
= CXL_TYPE3(obj
);
1305 cxlds
= &ct3d
->cxl_dstate
;
1307 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1309 error_setg(errp
, "Unhandled error log type");
1314 memset(&gem
, 0, sizeof(gem
));
1315 cxl_assign_event_header(hdr
, &gen_media_uuid
, flags
, sizeof(gem
),
1316 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1318 stq_le_p(&gem
.phys_addr
, dpa
);
1319 gem
.descriptor
= descriptor
;
1321 gem
.transaction_type
= transaction_type
;
1324 gem
.channel
= channel
;
1325 valid_flags
|= CXL_GMER_VALID_CHANNEL
;
1330 valid_flags
|= CXL_GMER_VALID_RANK
;
1334 st24_le_p(gem
.device
, device
);
1335 valid_flags
|= CXL_GMER_VALID_DEVICE
;
1339 strncpy((char *)gem
.component_id
, component_id
,
1340 sizeof(gem
.component_id
) - 1);
1341 valid_flags
|= CXL_GMER_VALID_COMPONENT
;
1344 stw_le_p(&gem
.validity_flags
, valid_flags
);
1346 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&gem
)) {
1347 cxl_event_irq_assert(ct3d
);
1351 #define CXL_DRAM_VALID_CHANNEL BIT(0)
1352 #define CXL_DRAM_VALID_RANK BIT(1)
1353 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2)
1354 #define CXL_DRAM_VALID_BANK_GROUP BIT(3)
1355 #define CXL_DRAM_VALID_BANK BIT(4)
1356 #define CXL_DRAM_VALID_ROW BIT(5)
1357 #define CXL_DRAM_VALID_COLUMN BIT(6)
1358 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7)
1360 void qmp_cxl_inject_dram_event(const char *path
, CxlEventLog log
, uint8_t flags
,
1361 uint64_t dpa
, uint8_t descriptor
,
1362 uint8_t type
, uint8_t transaction_type
,
1363 bool has_channel
, uint8_t channel
,
1364 bool has_rank
, uint8_t rank
,
1365 bool has_nibble_mask
, uint32_t nibble_mask
,
1366 bool has_bank_group
, uint8_t bank_group
,
1367 bool has_bank
, uint8_t bank
,
1368 bool has_row
, uint32_t row
,
1369 bool has_column
, uint16_t column
,
1370 bool has_correction_mask
,
1371 uint64List
*correction_mask
,
1374 Object
*obj
= object_resolve_path(path
, NULL
);
1376 CXLEventRecordHdr
*hdr
= &dram
.hdr
;
1377 CXLDeviceState
*cxlds
;
1379 uint16_t valid_flags
= 0;
1384 error_setg(errp
, "Unable to resolve path");
1387 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1388 error_setg(errp
, "Path does not point to a CXL type 3 device");
1391 ct3d
= CXL_TYPE3(obj
);
1392 cxlds
= &ct3d
->cxl_dstate
;
1394 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1396 error_setg(errp
, "Unhandled error log type");
1401 memset(&dram
, 0, sizeof(dram
));
1402 cxl_assign_event_header(hdr
, &dram_uuid
, flags
, sizeof(dram
),
1403 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1404 stq_le_p(&dram
.phys_addr
, dpa
);
1405 dram
.descriptor
= descriptor
;
1407 dram
.transaction_type
= transaction_type
;
1410 dram
.channel
= channel
;
1411 valid_flags
|= CXL_DRAM_VALID_CHANNEL
;
1416 valid_flags
|= CXL_DRAM_VALID_RANK
;
1419 if (has_nibble_mask
) {
1420 st24_le_p(dram
.nibble_mask
, nibble_mask
);
1421 valid_flags
|= CXL_DRAM_VALID_NIBBLE_MASK
;
1424 if (has_bank_group
) {
1425 dram
.bank_group
= bank_group
;
1426 valid_flags
|= CXL_DRAM_VALID_BANK_GROUP
;
1431 valid_flags
|= CXL_DRAM_VALID_BANK
;
1435 st24_le_p(dram
.row
, row
);
1436 valid_flags
|= CXL_DRAM_VALID_ROW
;
1440 stw_le_p(&dram
.column
, column
);
1441 valid_flags
|= CXL_DRAM_VALID_COLUMN
;
1444 if (has_correction_mask
) {
1446 while (correction_mask
&& count
< 4) {
1447 stq_le_p(&dram
.correction_mask
[count
],
1448 correction_mask
->value
);
1450 correction_mask
= correction_mask
->next
;
1452 valid_flags
|= CXL_DRAM_VALID_CORRECTION_MASK
;
1455 stw_le_p(&dram
.validity_flags
, valid_flags
);
1457 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&dram
)) {
1458 cxl_event_irq_assert(ct3d
);
1463 void qmp_cxl_inject_memory_module_event(const char *path
, CxlEventLog log
,
1464 uint8_t flags
, uint8_t type
,
1465 uint8_t health_status
,
1466 uint8_t media_status
,
1467 uint8_t additional_status
,
1469 int16_t temperature
,
1470 uint32_t dirty_shutdown_count
,
1471 uint32_t corrected_volatile_error_count
,
1472 uint32_t corrected_persist_error_count
,
1475 Object
*obj
= object_resolve_path(path
, NULL
);
1476 CXLEventMemoryModule module
;
1477 CXLEventRecordHdr
*hdr
= &module
.hdr
;
1478 CXLDeviceState
*cxlds
;
1484 error_setg(errp
, "Unable to resolve path");
1487 if (!object_dynamic_cast(obj
, TYPE_CXL_TYPE3
)) {
1488 error_setg(errp
, "Path does not point to a CXL type 3 device");
1491 ct3d
= CXL_TYPE3(obj
);
1492 cxlds
= &ct3d
->cxl_dstate
;
1494 rc
= ct3d_qmp_cxl_event_log_enc(log
);
1496 error_setg(errp
, "Unhandled error log type");
1501 memset(&module
, 0, sizeof(module
));
1502 cxl_assign_event_header(hdr
, &memory_module_uuid
, flags
, sizeof(module
),
1503 cxl_device_get_timestamp(&ct3d
->cxl_dstate
));
1506 module
.health_status
= health_status
;
1507 module
.media_status
= media_status
;
1508 module
.additional_status
= additional_status
;
1509 module
.life_used
= life_used
;
1510 stw_le_p(&module
.temperature
, temperature
);
1511 stl_le_p(&module
.dirty_shutdown_count
, dirty_shutdown_count
);
1512 stl_le_p(&module
.corrected_volatile_error_count
,
1513 corrected_volatile_error_count
);
1514 stl_le_p(&module
.corrected_persistent_error_count
,
1515 corrected_persist_error_count
);
1517 if (cxl_event_insert(cxlds
, enc_log
, (CXLEventRecordRaw
*)&module
)) {
1518 cxl_event_irq_assert(ct3d
);
1522 static void ct3_class_init(ObjectClass
*oc
, void *data
)
1524 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1525 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1526 CXLType3Class
*cvc
= CXL_TYPE3_CLASS(oc
);
1528 pc
->realize
= ct3_realize
;
1529 pc
->exit
= ct3_exit
;
1530 pc
->class_id
= PCI_CLASS_MEMORY_CXL
;
1531 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
1532 pc
->device_id
= 0xd93; /* LVF for now */
1535 pc
->config_write
= ct3d_config_write
;
1536 pc
->config_read
= ct3d_config_read
;
1538 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1539 dc
->desc
= "CXL Memory Device (Type 3)";
1540 dc
->reset
= ct3d_reset
;
1541 device_class_set_props(dc
, ct3_props
);
1543 cvc
->get_lsa_size
= get_lsa_size
;
1544 cvc
->get_lsa
= get_lsa
;
1545 cvc
->set_lsa
= set_lsa
;
1546 cvc
->set_cacheline
= set_cacheline
;
1549 static const TypeInfo ct3d_info
= {
1550 .name
= TYPE_CXL_TYPE3
,
1551 .parent
= TYPE_PCI_DEVICE
,
1552 .class_size
= sizeof(struct CXLType3Class
),
1553 .class_init
= ct3_class_init
,
1554 .instance_size
= sizeof(CXLType3Dev
),
1555 .interfaces
= (InterfaceInfo
[]) {
1556 { INTERFACE_CXL_DEVICE
},
1557 { INTERFACE_PCIE_DEVICE
},
1562 static void ct3d_registers(void)
1564 type_register_static(&ct3d_info
);
1567 type_init(ct3d_registers
);