ppc/pnv: Add a pca9554 I2C device to powernv10-rainier
[qemu/ar7.git] / hw / mem / cxl_type3.c
blobe8801805b90faf0b7415566b0b2b81173b083a6c
1 /*
2 * CXL Type 3 (memory expander) device
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
9 * SPDX-License-Identifier: GPL-v2-only
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "qemu/guest-random.h"
27 #include "sysemu/hostmem.h"
28 #include "sysemu/numa.h"
29 #include "hw/cxl/cxl.h"
30 #include "hw/pci/msix.h"
32 #define DWORD_BYTE 4
34 /* Default CDAT entries for a memory region */
35 enum {
36 CT3_CDAT_DSMAS,
37 CT3_CDAT_DSLBIS0,
38 CT3_CDAT_DSLBIS1,
39 CT3_CDAT_DSLBIS2,
40 CT3_CDAT_DSLBIS3,
41 CT3_CDAT_DSEMTS,
42 CT3_CDAT_NUM_ENTRIES
45 static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
46 int dsmad_handle, MemoryRegion *mr,
47 bool is_pmem, uint64_t dpa_base)
49 g_autofree CDATDsmas *dsmas = NULL;
50 g_autofree CDATDslbis *dslbis0 = NULL;
51 g_autofree CDATDslbis *dslbis1 = NULL;
52 g_autofree CDATDslbis *dslbis2 = NULL;
53 g_autofree CDATDslbis *dslbis3 = NULL;
54 g_autofree CDATDsemts *dsemts = NULL;
56 dsmas = g_malloc(sizeof(*dsmas));
57 *dsmas = (CDATDsmas) {
58 .header = {
59 .type = CDAT_TYPE_DSMAS,
60 .length = sizeof(*dsmas),
62 .DSMADhandle = dsmad_handle,
63 .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
64 .DPA_base = dpa_base,
65 .DPA_length = memory_region_size(mr),
68 /* For now, no memory side cache, plausiblish numbers */
69 dslbis0 = g_malloc(sizeof(*dslbis0));
70 *dslbis0 = (CDATDslbis) {
71 .header = {
72 .type = CDAT_TYPE_DSLBIS,
73 .length = sizeof(*dslbis0),
75 .handle = dsmad_handle,
76 .flags = HMAT_LB_MEM_MEMORY,
77 .data_type = HMAT_LB_DATA_READ_LATENCY,
78 .entry_base_unit = 10000, /* 10ns base */
79 .entry[0] = 15, /* 150ns */
82 dslbis1 = g_malloc(sizeof(*dslbis1));
83 *dslbis1 = (CDATDslbis) {
84 .header = {
85 .type = CDAT_TYPE_DSLBIS,
86 .length = sizeof(*dslbis1),
88 .handle = dsmad_handle,
89 .flags = HMAT_LB_MEM_MEMORY,
90 .data_type = HMAT_LB_DATA_WRITE_LATENCY,
91 .entry_base_unit = 10000,
92 .entry[0] = 25, /* 250ns */
95 dslbis2 = g_malloc(sizeof(*dslbis2));
96 *dslbis2 = (CDATDslbis) {
97 .header = {
98 .type = CDAT_TYPE_DSLBIS,
99 .length = sizeof(*dslbis2),
101 .handle = dsmad_handle,
102 .flags = HMAT_LB_MEM_MEMORY,
103 .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
104 .entry_base_unit = 1000, /* GB/s */
105 .entry[0] = 16,
108 dslbis3 = g_malloc(sizeof(*dslbis3));
109 *dslbis3 = (CDATDslbis) {
110 .header = {
111 .type = CDAT_TYPE_DSLBIS,
112 .length = sizeof(*dslbis3),
114 .handle = dsmad_handle,
115 .flags = HMAT_LB_MEM_MEMORY,
116 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
117 .entry_base_unit = 1000, /* GB/s */
118 .entry[0] = 16,
121 dsemts = g_malloc(sizeof(*dsemts));
122 *dsemts = (CDATDsemts) {
123 .header = {
124 .type = CDAT_TYPE_DSEMTS,
125 .length = sizeof(*dsemts),
127 .DSMAS_handle = dsmad_handle,
129 * NV: Reserved - the non volatile from DSMAS matters
130 * V: EFI_MEMORY_SP
132 .EFI_memory_type_attr = is_pmem ? 2 : 1,
133 .DPA_offset = 0,
134 .DPA_length = memory_region_size(mr),
137 /* Header always at start of structure */
138 cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
139 cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
140 cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
141 cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
142 cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
143 cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
146 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
148 g_autofree CDATSubHeader **table = NULL;
149 CXLType3Dev *ct3d = priv;
150 MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
151 int dsmad_handle = 0;
152 int cur_ent = 0;
153 int len = 0;
155 if (!ct3d->hostpmem && !ct3d->hostvmem) {
156 return 0;
159 if (ct3d->hostvmem) {
160 volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
161 if (!volatile_mr) {
162 return -EINVAL;
164 len += CT3_CDAT_NUM_ENTRIES;
167 if (ct3d->hostpmem) {
168 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
169 if (!nonvolatile_mr) {
170 return -EINVAL;
172 len += CT3_CDAT_NUM_ENTRIES;
175 table = g_malloc0(len * sizeof(*table));
177 /* Now fill them in */
178 if (volatile_mr) {
179 ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
180 false, 0);
181 cur_ent = CT3_CDAT_NUM_ENTRIES;
184 if (nonvolatile_mr) {
185 uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
186 ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
187 nonvolatile_mr, true, base);
188 cur_ent += CT3_CDAT_NUM_ENTRIES;
190 assert(len == cur_ent);
192 *cdat_table = g_steal_pointer(&table);
194 return len;
197 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
199 int i;
201 for (i = 0; i < num; i++) {
202 g_free(cdat_table[i]);
204 g_free(cdat_table);
207 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
209 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
210 uint16_t ent;
211 void *base;
212 uint32_t len;
213 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
214 CDATRsp rsp;
216 assert(cdat->entry_len);
218 /* Discard if request length mismatched */
219 if (pcie_doe_get_obj_len(req) <
220 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
221 return false;
224 ent = req->entry_handle;
225 base = cdat->entry[ent].base;
226 len = cdat->entry[ent].length;
228 rsp = (CDATRsp) {
229 .header = {
230 .vendor_id = CXL_VENDOR_ID,
231 .data_obj_type = CXL_DOE_TABLE_ACCESS,
232 .reserved = 0x0,
233 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
235 .rsp_code = CXL_DOE_TAB_RSP,
236 .table_type = CXL_DOE_TAB_TYPE_CDAT,
237 .entry_handle = (ent < cdat->entry_len - 1) ?
238 ent + 1 : CXL_DOE_TAB_ENT_MAX,
241 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
242 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
243 base, len);
245 doe_cap->read_mbox_len += rsp.header.length;
247 return true;
250 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
252 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
253 uint32_t val;
255 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
256 return val;
259 return pci_default_read_config(pci_dev, addr, size);
262 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
263 int size)
265 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
267 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
268 pci_default_write_config(pci_dev, addr, val, size);
269 pcie_aer_write_config(pci_dev, addr, val, size);
273 * Null value of all Fs suggested by IEEE RA guidelines for use of
274 * EU, OUI and CID
276 #define UI64_NULL ~(0ULL)
278 static void build_dvsecs(CXLType3Dev *ct3d)
280 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
281 uint8_t *dvsec;
282 uint32_t range1_size_hi, range1_size_lo,
283 range1_base_hi = 0, range1_base_lo = 0,
284 range2_size_hi = 0, range2_size_lo = 0,
285 range2_base_hi = 0, range2_base_lo = 0;
288 * Volatile memory is mapped as (0x0)
289 * Persistent memory is mapped at (volatile->size)
291 if (ct3d->hostvmem) {
292 range1_size_hi = ct3d->hostvmem->size >> 32;
293 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
294 (ct3d->hostvmem->size & 0xF0000000);
295 if (ct3d->hostpmem) {
296 range2_size_hi = ct3d->hostpmem->size >> 32;
297 range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
298 (ct3d->hostpmem->size & 0xF0000000);
300 } else {
301 range1_size_hi = ct3d->hostpmem->size >> 32;
302 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
303 (ct3d->hostpmem->size & 0xF0000000);
306 dvsec = (uint8_t *)&(CXLDVSECDevice){
307 .cap = 0x1e,
308 .ctrl = 0x2,
309 .status2 = 0x2,
310 .range1_size_hi = range1_size_hi,
311 .range1_size_lo = range1_size_lo,
312 .range1_base_hi = range1_base_hi,
313 .range1_base_lo = range1_base_lo,
314 .range2_size_hi = range2_size_hi,
315 .range2_size_lo = range2_size_lo,
316 .range2_base_hi = range2_base_hi,
317 .range2_base_lo = range2_base_lo,
319 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
320 PCIE_CXL_DEVICE_DVSEC_LENGTH,
321 PCIE_CXL_DEVICE_DVSEC,
322 PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec);
324 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
325 .rsvd = 0,
326 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
327 .reg0_base_hi = 0,
328 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
329 .reg1_base_hi = 0,
331 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
332 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
333 REG_LOC_DVSEC_REVID, dvsec);
334 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
335 .phase2_duration = 0x603, /* 3 seconds */
336 .phase2_power = 0x33, /* 0x33 miliwatts */
338 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
339 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
340 GPF_DEVICE_DVSEC_REVID, dvsec);
342 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
343 .cap = 0x26, /* 68B, IO, Mem, non-MLD */
344 .ctrl = 0x02, /* IO always enabled */
345 .status = 0x26, /* same as capabilities */
346 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
348 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
349 PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
350 PCIE_FLEXBUS_PORT_DVSEC,
351 PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
354 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
356 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
357 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
358 uint32_t *cache_mem = cregs->cache_mem_registers;
359 uint32_t ctrl;
361 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
362 /* TODO: Sanity checks that the decoder is possible */
363 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
364 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
366 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
369 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
371 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
372 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
373 uint32_t *cache_mem = cregs->cache_mem_registers;
374 uint32_t ctrl;
376 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
378 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
379 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
381 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
384 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
386 switch (qmp_err) {
387 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
388 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
389 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
390 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
391 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
392 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
393 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
394 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
395 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
396 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
397 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
398 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
399 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
400 return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
401 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
402 return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
403 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
404 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
405 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
406 return CXL_RAS_UNC_ERR_RSVD_ENCODING;
407 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
408 return CXL_RAS_UNC_ERR_POISON_RECEIVED;
409 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
410 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
411 case CXL_UNCOR_ERROR_TYPE_INTERNAL:
412 return CXL_RAS_UNC_ERR_INTERNAL;
413 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
414 return CXL_RAS_UNC_ERR_CXL_IDE_TX;
415 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
416 return CXL_RAS_UNC_ERR_CXL_IDE_RX;
417 default:
418 return -EINVAL;
422 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
424 switch (qmp_err) {
425 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
426 return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
427 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
428 return CXL_RAS_COR_ERR_MEM_DATA_ECC;
429 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
430 return CXL_RAS_COR_ERR_CRC_THRESHOLD;
431 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
432 return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
433 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
434 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
435 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
436 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
437 case CXL_COR_ERROR_TYPE_PHYSICAL:
438 return CXL_RAS_COR_ERR_PHYSICAL;
439 default:
440 return -EINVAL;
444 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
445 unsigned size)
447 CXLComponentState *cxl_cstate = opaque;
448 ComponentRegisters *cregs = &cxl_cstate->crb;
449 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
450 uint32_t *cache_mem = cregs->cache_mem_registers;
451 bool should_commit = false;
452 bool should_uncommit = false;
453 int which_hdm = -1;
455 assert(size == 4);
456 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
458 switch (offset) {
459 case A_CXL_HDM_DECODER0_CTRL:
460 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
461 should_uncommit = !should_commit;
462 which_hdm = 0;
463 break;
464 case A_CXL_HDM_DECODER1_CTRL:
465 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
466 should_uncommit = !should_commit;
467 which_hdm = 1;
468 break;
469 case A_CXL_HDM_DECODER2_CTRL:
470 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
471 should_uncommit = !should_commit;
472 which_hdm = 2;
473 break;
474 case A_CXL_HDM_DECODER3_CTRL:
475 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
476 should_uncommit = !should_commit;
477 which_hdm = 3;
478 break;
479 case A_CXL_RAS_UNC_ERR_STATUS:
481 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
482 uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL,
483 FIRST_ERROR_POINTER);
484 CXLError *cxl_err;
485 uint32_t unc_err;
488 * If single bit written that corresponds to the first error
489 * pointer being cleared, update the status and header log.
491 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
492 if ((1 << fe) ^ value) {
493 CXLError *cxl_next;
495 * Software is using wrong flow for multiple header recording
496 * Following behavior in PCIe r6.0 and assuming multiple
497 * header support. Implementation defined choice to clear all
498 * matching records if more than one bit set - which corresponds
499 * closest to behavior of hardware not capable of multiple
500 * header recording.
502 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node,
503 cxl_next) {
504 if ((1 << cxl_err->type) & value) {
505 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
506 g_free(cxl_err);
509 } else {
510 /* Done with previous FE, so drop from list */
511 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
512 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
513 g_free(cxl_err);
517 * If there is another FE, then put that in place and update
518 * the header log
520 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
521 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
522 int i;
524 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
525 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
526 stl_le_p(header_log + i, cxl_err->header[i]);
528 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
529 FIRST_ERROR_POINTER, cxl_err->type);
530 } else {
532 * If no more errors, then follow recommendation of PCI spec
533 * r6.0 6.2.4.2 to set the first error pointer to a status
534 * bit that will never be used.
536 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
537 FIRST_ERROR_POINTER,
538 CXL_RAS_UNC_ERR_CXL_UNUSED);
540 stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
542 unc_err = 0;
543 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
544 unc_err |= 1 << cxl_err->type;
546 stl_le_p((uint8_t *)cache_mem + offset, unc_err);
548 return;
550 case A_CXL_RAS_COR_ERR_STATUS:
552 uint32_t rw1c = value;
553 uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
554 temp &= ~rw1c;
555 stl_le_p((uint8_t *)cache_mem + offset, temp);
556 return;
558 default:
559 break;
562 stl_le_p((uint8_t *)cache_mem + offset, value);
563 if (should_commit) {
564 hdm_decoder_commit(ct3d, which_hdm);
565 } else if (should_uncommit) {
566 hdm_decoder_uncommit(ct3d, which_hdm);
570 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
572 DeviceState *ds = DEVICE(ct3d);
574 if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
575 error_setg(errp, "at least one memdev property must be set");
576 return false;
577 } else if (ct3d->hostmem && ct3d->hostpmem) {
578 error_setg(errp, "[memdev] cannot be used with new "
579 "[persistent-memdev] property");
580 return false;
581 } else if (ct3d->hostmem) {
582 /* Use of hostmem property implies pmem */
583 ct3d->hostpmem = ct3d->hostmem;
584 ct3d->hostmem = NULL;
587 if (ct3d->hostpmem && !ct3d->lsa) {
588 error_setg(errp, "lsa property must be set for persistent devices");
589 return false;
592 if (ct3d->hostvmem) {
593 MemoryRegion *vmr;
594 char *v_name;
596 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
597 if (!vmr) {
598 error_setg(errp, "volatile memdev must have backing device");
599 return false;
601 memory_region_set_nonvolatile(vmr, false);
602 memory_region_set_enabled(vmr, true);
603 host_memory_backend_set_mapped(ct3d->hostvmem, true);
604 if (ds->id) {
605 v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
606 } else {
607 v_name = g_strdup("cxl-type3-dpa-vmem-space");
609 address_space_init(&ct3d->hostvmem_as, vmr, v_name);
610 ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
611 ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
612 g_free(v_name);
615 if (ct3d->hostpmem) {
616 MemoryRegion *pmr;
617 char *p_name;
619 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
620 if (!pmr) {
621 error_setg(errp, "persistent memdev must have backing device");
622 return false;
624 memory_region_set_nonvolatile(pmr, true);
625 memory_region_set_enabled(pmr, true);
626 host_memory_backend_set_mapped(ct3d->hostpmem, true);
627 if (ds->id) {
628 p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
629 } else {
630 p_name = g_strdup("cxl-type3-dpa-pmem-space");
632 address_space_init(&ct3d->hostpmem_as, pmr, p_name);
633 ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
634 ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
635 g_free(p_name);
638 return true;
641 static DOEProtocol doe_cdat_prot[] = {
642 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
646 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
648 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
649 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
650 ComponentRegisters *regs = &cxl_cstate->crb;
651 MemoryRegion *mr = &regs->component_registers;
652 uint8_t *pci_conf = pci_dev->config;
653 unsigned short msix_num = 6;
654 int i, rc;
656 QTAILQ_INIT(&ct3d->error_list);
658 if (!cxl_setup_memory(ct3d, errp)) {
659 return;
662 pci_config_set_prog_interface(pci_conf, 0x10);
664 pcie_endpoint_cap_init(pci_dev, 0x80);
665 if (ct3d->sn != UI64_NULL) {
666 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
667 cxl_cstate->dvsec_offset = 0x100 + 0x0c;
668 } else {
669 cxl_cstate->dvsec_offset = 0x100;
672 ct3d->cxl_cstate.pdev = pci_dev;
673 build_dvsecs(ct3d);
675 regs->special_ops = g_new0(MemoryRegionOps, 1);
676 regs->special_ops->write = ct3d_reg_write;
678 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
679 TYPE_CXL_TYPE3);
681 pci_register_bar(
682 pci_dev, CXL_COMPONENT_REG_BAR_IDX,
683 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
685 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate,
686 &ct3d->cci);
687 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
688 PCI_BASE_ADDRESS_SPACE_MEMORY |
689 PCI_BASE_ADDRESS_MEM_TYPE_64,
690 &ct3d->cxl_dstate.device_registers);
692 /* MSI(-X) Initialization */
693 rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
694 if (rc) {
695 goto err_address_space_free;
697 for (i = 0; i < msix_num; i++) {
698 msix_vector_use(pci_dev, i);
701 /* DOE Initialization */
702 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
704 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
705 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
706 cxl_cstate->cdat.private = ct3d;
707 cxl_doe_cdat_init(cxl_cstate, errp);
708 if (*errp) {
709 goto err_free_special_ops;
712 pcie_cap_deverr_init(pci_dev);
713 /* Leave a bit of room for expansion */
714 rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
715 if (rc) {
716 goto err_release_cdat;
718 cxl_event_init(&ct3d->cxl_dstate, 2);
720 return;
722 err_release_cdat:
723 cxl_doe_cdat_release(cxl_cstate);
724 err_free_special_ops:
725 g_free(regs->special_ops);
726 err_address_space_free:
727 if (ct3d->hostpmem) {
728 address_space_destroy(&ct3d->hostpmem_as);
730 if (ct3d->hostvmem) {
731 address_space_destroy(&ct3d->hostvmem_as);
733 return;
736 static void ct3_exit(PCIDevice *pci_dev)
738 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
739 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
740 ComponentRegisters *regs = &cxl_cstate->crb;
742 pcie_aer_exit(pci_dev);
743 cxl_doe_cdat_release(cxl_cstate);
744 g_free(regs->special_ops);
745 if (ct3d->hostpmem) {
746 address_space_destroy(&ct3d->hostpmem_as);
748 if (ct3d->hostvmem) {
749 address_space_destroy(&ct3d->hostvmem_as);
753 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
755 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
756 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
757 unsigned int hdm_count;
758 uint32_t cap;
759 uint64_t dpa_base = 0;
760 int i;
762 cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
763 hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
764 CXL_HDM_DECODER_CAPABILITY,
765 DECODER_COUNT));
767 for (i = 0; i < hdm_count; i++) {
768 uint64_t decoder_base, decoder_size, hpa_offset, skip;
769 uint32_t hdm_ctrl, low, high;
770 int ig, iw;
772 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
773 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
774 decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
776 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
777 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
778 decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
780 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
781 i * hdm_inc);
782 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
783 i * hdm_inc);
784 skip = ((uint64_t)high << 32) | (low & 0xf0000000);
785 dpa_base += skip;
787 hpa_offset = (uint64_t)host_addr - decoder_base;
789 hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
790 iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
791 ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
792 if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
793 return false;
795 if (((uint64_t)host_addr < decoder_base) ||
796 (hpa_offset >= decoder_size)) {
797 int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal);
799 if (decoded_iw == 0) {
800 return false;
803 dpa_base += decoder_size / decoded_iw;
804 continue;
807 *dpa = dpa_base +
808 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
809 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
810 >> iw));
812 return true;
814 return false;
817 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
818 hwaddr host_addr,
819 unsigned int size,
820 AddressSpace **as,
821 uint64_t *dpa_offset)
823 MemoryRegion *vmr = NULL, *pmr = NULL;
825 if (ct3d->hostvmem) {
826 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
828 if (ct3d->hostpmem) {
829 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
832 if (!vmr && !pmr) {
833 return -ENODEV;
836 if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
837 return -EINVAL;
840 if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
841 return -EINVAL;
844 if (vmr) {
845 if (*dpa_offset < memory_region_size(vmr)) {
846 *as = &ct3d->hostvmem_as;
847 } else {
848 *as = &ct3d->hostpmem_as;
849 *dpa_offset -= memory_region_size(vmr);
851 } else {
852 *as = &ct3d->hostpmem_as;
855 return 0;
858 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
859 unsigned size, MemTxAttrs attrs)
861 CXLType3Dev *ct3d = CXL_TYPE3(d);
862 uint64_t dpa_offset = 0;
863 AddressSpace *as = NULL;
864 int res;
866 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
867 &as, &dpa_offset);
868 if (res) {
869 return MEMTX_ERROR;
872 if (sanitize_running(&ct3d->cci)) {
873 qemu_guest_getrandom_nofail(data, size);
874 return MEMTX_OK;
877 return address_space_read(as, dpa_offset, attrs, data, size);
880 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
881 unsigned size, MemTxAttrs attrs)
883 CXLType3Dev *ct3d = CXL_TYPE3(d);
884 uint64_t dpa_offset = 0;
885 AddressSpace *as = NULL;
886 int res;
888 res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
889 &as, &dpa_offset);
890 if (res) {
891 return MEMTX_ERROR;
894 if (sanitize_running(&ct3d->cci)) {
895 return MEMTX_OK;
898 return address_space_write(as, dpa_offset, attrs, &data, size);
901 static void ct3d_reset(DeviceState *dev)
903 CXLType3Dev *ct3d = CXL_TYPE3(dev);
904 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
905 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
907 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
908 cxl_device_register_init_t3(ct3d);
911 * Bring up an endpoint to target with MCTP over VDM.
912 * This device is emulating an MLD with single LD for now.
914 cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
915 DEVICE(ct3d), DEVICE(ct3d),
916 512); /* Max payload made up */
917 cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
918 512); /* Max payload made up */
922 static Property ct3_props[] = {
923 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
924 HostMemoryBackend *), /* for backward compatibility */
925 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
926 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
927 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
928 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
929 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
930 HostMemoryBackend *),
931 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
932 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
933 DEFINE_PROP_END_OF_LIST(),
936 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
938 MemoryRegion *mr;
940 if (!ct3d->lsa) {
941 return 0;
944 mr = host_memory_backend_get_memory(ct3d->lsa);
945 return memory_region_size(mr);
948 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
949 uint64_t offset)
951 assert(offset + size <= memory_region_size(mr));
952 assert(offset + size > offset);
955 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
956 uint64_t offset)
958 MemoryRegion *mr;
959 void *lsa;
961 if (!ct3d->lsa) {
962 return 0;
965 mr = host_memory_backend_get_memory(ct3d->lsa);
966 validate_lsa_access(mr, size, offset);
968 lsa = memory_region_get_ram_ptr(mr) + offset;
969 memcpy(buf, lsa, size);
971 return size;
974 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
975 uint64_t offset)
977 MemoryRegion *mr;
978 void *lsa;
980 if (!ct3d->lsa) {
981 return;
984 mr = host_memory_backend_get_memory(ct3d->lsa);
985 validate_lsa_access(mr, size, offset);
987 lsa = memory_region_get_ram_ptr(mr) + offset;
988 memcpy(lsa, buf, size);
989 memory_region_set_dirty(mr, offset, size);
992 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
993 * updates will get lost.
997 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
999 MemoryRegion *vmr = NULL, *pmr = NULL;
1000 AddressSpace *as;
1002 if (ct3d->hostvmem) {
1003 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1005 if (ct3d->hostpmem) {
1006 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1009 if (!vmr && !pmr) {
1010 return false;
1013 if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
1014 return false;
1017 if (vmr) {
1018 if (dpa_offset < memory_region_size(vmr)) {
1019 as = &ct3d->hostvmem_as;
1020 } else {
1021 as = &ct3d->hostpmem_as;
1022 dpa_offset -= memory_region_size(vmr);
1024 } else {
1025 as = &ct3d->hostpmem_as;
1028 address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
1029 CXL_CACHE_LINE_SIZE);
1030 return true;
1033 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1035 ct3d->poison_list_overflowed = true;
1036 ct3d->poison_list_overflow_ts =
1037 cxl_device_get_timestamp(&ct3d->cxl_dstate);
1040 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1041 Error **errp)
1043 Object *obj = object_resolve_path(path, NULL);
1044 CXLType3Dev *ct3d;
1045 CXLPoison *p;
1047 if (length % 64) {
1048 error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1049 return;
1051 if (start % 64) {
1052 error_setg(errp, "Poison start address must be 64 byte aligned");
1053 return;
1055 if (!obj) {
1056 error_setg(errp, "Unable to resolve path");
1057 return;
1059 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1060 error_setg(errp, "Path does not point to a CXL type 3 device");
1061 return;
1064 ct3d = CXL_TYPE3(obj);
1066 QLIST_FOREACH(p, &ct3d->poison_list, node) {
1067 if (((start >= p->start) && (start < p->start + p->length)) ||
1068 ((start + length > p->start) &&
1069 (start + length <= p->start + p->length))) {
1070 error_setg(errp,
1071 "Overlap with existing poisoned region not supported");
1072 return;
1076 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1077 cxl_set_poison_list_overflowed(ct3d);
1078 return;
1081 p = g_new0(CXLPoison, 1);
1082 p->length = length;
1083 p->start = start;
1084 /* Different from injected via the mbox */
1085 p->type = CXL_POISON_TYPE_INTERNAL;
1087 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1088 ct3d->poison_list_cnt++;
1091 /* For uncorrectable errors include support for multiple header recording */
1092 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1093 CXLUncorErrorRecordList *errors,
1094 Error **errp)
1096 Object *obj = object_resolve_path(path, NULL);
1097 static PCIEAERErr err = {};
1098 CXLType3Dev *ct3d;
1099 CXLError *cxl_err;
1100 uint32_t *reg_state;
1101 uint32_t unc_err;
1102 bool first;
1104 if (!obj) {
1105 error_setg(errp, "Unable to resolve path");
1106 return;
1109 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1110 error_setg(errp, "Path does not point to a CXL type 3 device");
1111 return;
1114 err.status = PCI_ERR_UNC_INTN;
1115 err.source_id = pci_requester_id(PCI_DEVICE(obj));
1116 err.flags = 0;
1118 ct3d = CXL_TYPE3(obj);
1120 first = QTAILQ_EMPTY(&ct3d->error_list);
1121 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1122 while (errors) {
1123 uint32List *header = errors->value->header;
1124 uint8_t header_count = 0;
1125 int cxl_err_code;
1127 cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1128 if (cxl_err_code < 0) {
1129 error_setg(errp, "Unknown error code");
1130 return;
1133 /* If the error is masked, nothing to do here */
1134 if (!((1 << cxl_err_code) &
1135 ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1136 errors = errors->next;
1137 continue;
1140 cxl_err = g_malloc0(sizeof(*cxl_err));
1142 cxl_err->type = cxl_err_code;
1143 while (header && header_count < 32) {
1144 cxl_err->header[header_count++] = header->value;
1145 header = header->next;
1147 if (header_count > 32) {
1148 error_setg(errp, "Header must be 32 DWORD or less");
1149 return;
1151 QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1153 errors = errors->next;
1156 if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1157 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1158 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1159 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1160 int i;
1162 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1163 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1164 stl_le_p(header_log + i, cxl_err->header[i]);
1167 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1168 FIRST_ERROR_POINTER, cxl_err->type);
1169 stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1172 unc_err = 0;
1173 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1174 unc_err |= (1 << cxl_err->type);
1176 if (!unc_err) {
1177 return;
1180 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1181 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1183 return;
1186 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1187 Error **errp)
1189 static PCIEAERErr err = {};
1190 Object *obj = object_resolve_path(path, NULL);
1191 CXLType3Dev *ct3d;
1192 uint32_t *reg_state;
1193 uint32_t cor_err;
1194 int cxl_err_type;
1196 if (!obj) {
1197 error_setg(errp, "Unable to resolve path");
1198 return;
1200 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1201 error_setg(errp, "Path does not point to a CXL type 3 device");
1202 return;
1205 err.status = PCI_ERR_COR_INTERNAL;
1206 err.source_id = pci_requester_id(PCI_DEVICE(obj));
1207 err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1209 ct3d = CXL_TYPE3(obj);
1210 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1211 cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1213 cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1214 if (cxl_err_type < 0) {
1215 error_setg(errp, "Invalid COR error");
1216 return;
1218 /* If the error is masked, nothting to do here */
1219 if (!((1 << cxl_err_type) &
1220 ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1221 return;
1224 cor_err |= (1 << cxl_err_type);
1225 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1227 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1230 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1231 const QemuUUID *uuid, uint32_t flags,
1232 uint8_t length, uint64_t timestamp)
1234 st24_le_p(&hdr->flags, flags);
1235 hdr->length = length;
1236 memcpy(&hdr->id, uuid, sizeof(hdr->id));
1237 stq_le_p(&hdr->timestamp, timestamp);
1240 static const QemuUUID gen_media_uuid = {
1241 .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1242 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1245 static const QemuUUID dram_uuid = {
1246 .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1247 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1250 static const QemuUUID memory_module_uuid = {
1251 .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1252 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1255 #define CXL_GMER_VALID_CHANNEL BIT(0)
1256 #define CXL_GMER_VALID_RANK BIT(1)
1257 #define CXL_GMER_VALID_DEVICE BIT(2)
1258 #define CXL_GMER_VALID_COMPONENT BIT(3)
1260 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1262 switch (log) {
1263 case CXL_EVENT_LOG_INFORMATIONAL:
1264 return CXL_EVENT_TYPE_INFO;
1265 case CXL_EVENT_LOG_WARNING:
1266 return CXL_EVENT_TYPE_WARN;
1267 case CXL_EVENT_LOG_FAILURE:
1268 return CXL_EVENT_TYPE_FAIL;
1269 case CXL_EVENT_LOG_FATAL:
1270 return CXL_EVENT_TYPE_FATAL;
1271 /* DCD not yet supported */
1272 default:
1273 return -EINVAL;
1276 /* Component ID is device specific. Define this as a string. */
1277 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1278 uint8_t flags, uint64_t dpa,
1279 uint8_t descriptor, uint8_t type,
1280 uint8_t transaction_type,
1281 bool has_channel, uint8_t channel,
1282 bool has_rank, uint8_t rank,
1283 bool has_device, uint32_t device,
1284 const char *component_id,
1285 Error **errp)
1287 Object *obj = object_resolve_path(path, NULL);
1288 CXLEventGenMedia gem;
1289 CXLEventRecordHdr *hdr = &gem.hdr;
1290 CXLDeviceState *cxlds;
1291 CXLType3Dev *ct3d;
1292 uint16_t valid_flags = 0;
1293 uint8_t enc_log;
1294 int rc;
1296 if (!obj) {
1297 error_setg(errp, "Unable to resolve path");
1298 return;
1300 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1301 error_setg(errp, "Path does not point to a CXL type 3 device");
1302 return;
1304 ct3d = CXL_TYPE3(obj);
1305 cxlds = &ct3d->cxl_dstate;
1307 rc = ct3d_qmp_cxl_event_log_enc(log);
1308 if (rc < 0) {
1309 error_setg(errp, "Unhandled error log type");
1310 return;
1312 enc_log = rc;
1314 memset(&gem, 0, sizeof(gem));
1315 cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1316 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1318 stq_le_p(&gem.phys_addr, dpa);
1319 gem.descriptor = descriptor;
1320 gem.type = type;
1321 gem.transaction_type = transaction_type;
1323 if (has_channel) {
1324 gem.channel = channel;
1325 valid_flags |= CXL_GMER_VALID_CHANNEL;
1328 if (has_rank) {
1329 gem.rank = rank;
1330 valid_flags |= CXL_GMER_VALID_RANK;
1333 if (has_device) {
1334 st24_le_p(gem.device, device);
1335 valid_flags |= CXL_GMER_VALID_DEVICE;
1338 if (component_id) {
1339 strncpy((char *)gem.component_id, component_id,
1340 sizeof(gem.component_id) - 1);
1341 valid_flags |= CXL_GMER_VALID_COMPONENT;
1344 stw_le_p(&gem.validity_flags, valid_flags);
1346 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1347 cxl_event_irq_assert(ct3d);
1351 #define CXL_DRAM_VALID_CHANNEL BIT(0)
1352 #define CXL_DRAM_VALID_RANK BIT(1)
1353 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2)
1354 #define CXL_DRAM_VALID_BANK_GROUP BIT(3)
1355 #define CXL_DRAM_VALID_BANK BIT(4)
1356 #define CXL_DRAM_VALID_ROW BIT(5)
1357 #define CXL_DRAM_VALID_COLUMN BIT(6)
1358 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7)
1360 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1361 uint64_t dpa, uint8_t descriptor,
1362 uint8_t type, uint8_t transaction_type,
1363 bool has_channel, uint8_t channel,
1364 bool has_rank, uint8_t rank,
1365 bool has_nibble_mask, uint32_t nibble_mask,
1366 bool has_bank_group, uint8_t bank_group,
1367 bool has_bank, uint8_t bank,
1368 bool has_row, uint32_t row,
1369 bool has_column, uint16_t column,
1370 bool has_correction_mask,
1371 uint64List *correction_mask,
1372 Error **errp)
1374 Object *obj = object_resolve_path(path, NULL);
1375 CXLEventDram dram;
1376 CXLEventRecordHdr *hdr = &dram.hdr;
1377 CXLDeviceState *cxlds;
1378 CXLType3Dev *ct3d;
1379 uint16_t valid_flags = 0;
1380 uint8_t enc_log;
1381 int rc;
1383 if (!obj) {
1384 error_setg(errp, "Unable to resolve path");
1385 return;
1387 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1388 error_setg(errp, "Path does not point to a CXL type 3 device");
1389 return;
1391 ct3d = CXL_TYPE3(obj);
1392 cxlds = &ct3d->cxl_dstate;
1394 rc = ct3d_qmp_cxl_event_log_enc(log);
1395 if (rc < 0) {
1396 error_setg(errp, "Unhandled error log type");
1397 return;
1399 enc_log = rc;
1401 memset(&dram, 0, sizeof(dram));
1402 cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1403 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1404 stq_le_p(&dram.phys_addr, dpa);
1405 dram.descriptor = descriptor;
1406 dram.type = type;
1407 dram.transaction_type = transaction_type;
1409 if (has_channel) {
1410 dram.channel = channel;
1411 valid_flags |= CXL_DRAM_VALID_CHANNEL;
1414 if (has_rank) {
1415 dram.rank = rank;
1416 valid_flags |= CXL_DRAM_VALID_RANK;
1419 if (has_nibble_mask) {
1420 st24_le_p(dram.nibble_mask, nibble_mask);
1421 valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1424 if (has_bank_group) {
1425 dram.bank_group = bank_group;
1426 valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1429 if (has_bank) {
1430 dram.bank = bank;
1431 valid_flags |= CXL_DRAM_VALID_BANK;
1434 if (has_row) {
1435 st24_le_p(dram.row, row);
1436 valid_flags |= CXL_DRAM_VALID_ROW;
1439 if (has_column) {
1440 stw_le_p(&dram.column, column);
1441 valid_flags |= CXL_DRAM_VALID_COLUMN;
1444 if (has_correction_mask) {
1445 int count = 0;
1446 while (correction_mask && count < 4) {
1447 stq_le_p(&dram.correction_mask[count],
1448 correction_mask->value);
1449 count++;
1450 correction_mask = correction_mask->next;
1452 valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1455 stw_le_p(&dram.validity_flags, valid_flags);
1457 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1458 cxl_event_irq_assert(ct3d);
1460 return;
1463 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1464 uint8_t flags, uint8_t type,
1465 uint8_t health_status,
1466 uint8_t media_status,
1467 uint8_t additional_status,
1468 uint8_t life_used,
1469 int16_t temperature,
1470 uint32_t dirty_shutdown_count,
1471 uint32_t corrected_volatile_error_count,
1472 uint32_t corrected_persist_error_count,
1473 Error **errp)
1475 Object *obj = object_resolve_path(path, NULL);
1476 CXLEventMemoryModule module;
1477 CXLEventRecordHdr *hdr = &module.hdr;
1478 CXLDeviceState *cxlds;
1479 CXLType3Dev *ct3d;
1480 uint8_t enc_log;
1481 int rc;
1483 if (!obj) {
1484 error_setg(errp, "Unable to resolve path");
1485 return;
1487 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1488 error_setg(errp, "Path does not point to a CXL type 3 device");
1489 return;
1491 ct3d = CXL_TYPE3(obj);
1492 cxlds = &ct3d->cxl_dstate;
1494 rc = ct3d_qmp_cxl_event_log_enc(log);
1495 if (rc < 0) {
1496 error_setg(errp, "Unhandled error log type");
1497 return;
1499 enc_log = rc;
1501 memset(&module, 0, sizeof(module));
1502 cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1503 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1505 module.type = type;
1506 module.health_status = health_status;
1507 module.media_status = media_status;
1508 module.additional_status = additional_status;
1509 module.life_used = life_used;
1510 stw_le_p(&module.temperature, temperature);
1511 stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1512 stl_le_p(&module.corrected_volatile_error_count,
1513 corrected_volatile_error_count);
1514 stl_le_p(&module.corrected_persistent_error_count,
1515 corrected_persist_error_count);
1517 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1518 cxl_event_irq_assert(ct3d);
1522 static void ct3_class_init(ObjectClass *oc, void *data)
1524 DeviceClass *dc = DEVICE_CLASS(oc);
1525 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1526 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1528 pc->realize = ct3_realize;
1529 pc->exit = ct3_exit;
1530 pc->class_id = PCI_CLASS_MEMORY_CXL;
1531 pc->vendor_id = PCI_VENDOR_ID_INTEL;
1532 pc->device_id = 0xd93; /* LVF for now */
1533 pc->revision = 1;
1535 pc->config_write = ct3d_config_write;
1536 pc->config_read = ct3d_config_read;
1538 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1539 dc->desc = "CXL Memory Device (Type 3)";
1540 dc->reset = ct3d_reset;
1541 device_class_set_props(dc, ct3_props);
1543 cvc->get_lsa_size = get_lsa_size;
1544 cvc->get_lsa = get_lsa;
1545 cvc->set_lsa = set_lsa;
1546 cvc->set_cacheline = set_cacheline;
1549 static const TypeInfo ct3d_info = {
1550 .name = TYPE_CXL_TYPE3,
1551 .parent = TYPE_PCI_DEVICE,
1552 .class_size = sizeof(struct CXLType3Class),
1553 .class_init = ct3_class_init,
1554 .instance_size = sizeof(CXLType3Dev),
1555 .interfaces = (InterfaceInfo[]) {
1556 { INTERFACE_CXL_DEVICE },
1557 { INTERFACE_PCIE_DEVICE },
1562 static void ct3d_registers(void)
1564 type_register_static(&ct3d_info);
1567 type_init(ct3d_registers);