hw/cxl: Support 4 HDM decoders at all levels of topology
[qemu/ar7.git] / hw / mem / cxl_type3.c
blobc02be4ce4554a727ce11347b5f1c03a606fdb969
1 /*
2 * CXL Type 3 (memory expander) device
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
9 * SPDX-License-Identifier: GPL-v2-only
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "sysemu/hostmem.h"
27 #include "sysemu/numa.h"
28 #include "hw/cxl/cxl.h"
29 #include "hw/pci/msix.h"
31 #define DWORD_BYTE 4
33 /* Default CDAT entries for a memory region */
34 enum {
35 CT3_CDAT_DSMAS,
36 CT3_CDAT_DSLBIS0,
37 CT3_CDAT_DSLBIS1,
38 CT3_CDAT_DSLBIS2,
39 CT3_CDAT_DSLBIS3,
40 CT3_CDAT_DSEMTS,
41 CT3_CDAT_NUM_ENTRIES
44 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
45 int dsmad_handle, MemoryRegion *mr,
46 bool is_pmem, uint64_t dpa_base)
48 g_autofree CDATDsmas *dsmas = NULL;
49 g_autofree CDATDslbis *dslbis0 = NULL;
50 g_autofree CDATDslbis *dslbis1 = NULL;
51 g_autofree CDATDslbis *dslbis2 = NULL;
52 g_autofree CDATDslbis *dslbis3 = NULL;
53 g_autofree CDATDsemts *dsemts = NULL;
55 dsmas = g_malloc(sizeof(*dsmas));
56 if (!dsmas) {
57 return -ENOMEM;
59 *dsmas = (CDATDsmas) {
60 .header = {
61 .type = CDAT_TYPE_DSMAS,
62 .length = sizeof(*dsmas),
64 .DSMADhandle = dsmad_handle,
65 .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
66 .DPA_base = dpa_base,
67 .DPA_length = memory_region_size(mr),
70 /* For now, no memory side cache, plausiblish numbers */
71 dslbis0 = g_malloc(sizeof(*dslbis0));
72 if (!dslbis0) {
73 return -ENOMEM;
75 *dslbis0 = (CDATDslbis) {
76 .header = {
77 .type = CDAT_TYPE_DSLBIS,
78 .length = sizeof(*dslbis0),
80 .handle = dsmad_handle,
81 .flags = HMAT_LB_MEM_MEMORY,
82 .data_type = HMAT_LB_DATA_READ_LATENCY,
83 .entry_base_unit = 10000, /* 10ns base */
84 .entry[0] = 15, /* 150ns */
87 dslbis1 = g_malloc(sizeof(*dslbis1));
88 if (!dslbis1) {
89 return -ENOMEM;
91 *dslbis1 = (CDATDslbis) {
92 .header = {
93 .type = CDAT_TYPE_DSLBIS,
94 .length = sizeof(*dslbis1),
96 .handle = dsmad_handle,
97 .flags = HMAT_LB_MEM_MEMORY,
98 .data_type = HMAT_LB_DATA_WRITE_LATENCY,
99 .entry_base_unit = 10000,
100 .entry[0] = 25, /* 250ns */
103 dslbis2 = g_malloc(sizeof(*dslbis2));
104 if (!dslbis2) {
105 return -ENOMEM;
107 *dslbis2 = (CDATDslbis) {
108 .header = {
109 .type = CDAT_TYPE_DSLBIS,
110 .length = sizeof(*dslbis2),
112 .handle = dsmad_handle,
113 .flags = HMAT_LB_MEM_MEMORY,
114 .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
115 .entry_base_unit = 1000, /* GB/s */
116 .entry[0] = 16,
119 dslbis3 = g_malloc(sizeof(*dslbis3));
120 if (!dslbis3) {
121 return -ENOMEM;
123 *dslbis3 = (CDATDslbis) {
124 .header = {
125 .type = CDAT_TYPE_DSLBIS,
126 .length = sizeof(*dslbis3),
128 .handle = dsmad_handle,
129 .flags = HMAT_LB_MEM_MEMORY,
130 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
131 .entry_base_unit = 1000, /* GB/s */
132 .entry[0] = 16,
135 dsemts = g_malloc(sizeof(*dsemts));
136 if (!dsemts) {
137 return -ENOMEM;
139 *dsemts = (CDATDsemts) {
140 .header = {
141 .type = CDAT_TYPE_DSEMTS,
142 .length = sizeof(*dsemts),
144 .DSMAS_handle = dsmad_handle,
146 * NV: Reserved - the non volatile from DSMAS matters
147 * V: EFI_MEMORY_SP
149 .EFI_memory_type_attr = is_pmem ? 2 : 1,
150 .DPA_offset = 0,
151 .DPA_length = memory_region_size(mr),
154 /* Header always at start of structure */
155 cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
156 cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
157 cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
158 cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
159 cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
160 cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
162 return 0;
165 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
167 g_autofree CDATSubHeader **table = NULL;
168 CXLType3Dev *ct3d = priv;
169 MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
170 int dsmad_handle = 0;
171 int cur_ent = 0;
172 int len = 0;
173 int rc, i;
175 if (!ct3d->hostpmem && !ct3d->hostvmem) {
176 return 0;
179 if (ct3d->hostvmem) {
180 volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
181 if (!volatile_mr) {
182 return -EINVAL;
184 len += CT3_CDAT_NUM_ENTRIES;
187 if (ct3d->hostpmem) {
188 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
189 if (!nonvolatile_mr) {
190 return -EINVAL;
192 len += CT3_CDAT_NUM_ENTRIES;
195 table = g_malloc0(len * sizeof(*table));
196 if (!table) {
197 return -ENOMEM;
200 /* Now fill them in */
201 if (volatile_mr) {
202 rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
203 false, 0);
204 if (rc < 0) {
205 return rc;
207 cur_ent = CT3_CDAT_NUM_ENTRIES;
210 if (nonvolatile_mr) {
211 rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
212 nonvolatile_mr, true,
213 (volatile_mr ?
214 memory_region_size(volatile_mr) : 0));
215 if (rc < 0) {
216 goto error_cleanup;
218 cur_ent += CT3_CDAT_NUM_ENTRIES;
220 assert(len == cur_ent);
222 *cdat_table = g_steal_pointer(&table);
224 return len;
225 error_cleanup:
226 for (i = 0; i < cur_ent; i++) {
227 g_free(table[i]);
229 return rc;
232 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
234 int i;
236 for (i = 0; i < num; i++) {
237 g_free(cdat_table[i]);
239 g_free(cdat_table);
242 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
244 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
245 uint16_t ent;
246 void *base;
247 uint32_t len;
248 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
249 CDATRsp rsp;
251 assert(cdat->entry_len);
253 /* Discard if request length mismatched */
254 if (pcie_doe_get_obj_len(req) <
255 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
256 return false;
259 ent = req->entry_handle;
260 base = cdat->entry[ent].base;
261 len = cdat->entry[ent].length;
263 rsp = (CDATRsp) {
264 .header = {
265 .vendor_id = CXL_VENDOR_ID,
266 .data_obj_type = CXL_DOE_TABLE_ACCESS,
267 .reserved = 0x0,
268 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
270 .rsp_code = CXL_DOE_TAB_RSP,
271 .table_type = CXL_DOE_TAB_TYPE_CDAT,
272 .entry_handle = (ent < cdat->entry_len - 1) ?
273 ent + 1 : CXL_DOE_TAB_ENT_MAX,
276 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
277 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
278 base, len);
280 doe_cap->read_mbox_len += rsp.header.length;
282 return true;
285 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
287 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
288 uint32_t val;
290 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
291 return val;
294 return pci_default_read_config(pci_dev, addr, size);
297 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
298 int size)
300 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
302 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
303 pci_default_write_config(pci_dev, addr, val, size);
304 pcie_aer_write_config(pci_dev, addr, val, size);
308 * Null value of all Fs suggested by IEEE RA guidelines for use of
309 * EU, OUI and CID
311 #define UI64_NULL ~(0ULL)
313 static void build_dvsecs(CXLType3Dev *ct3d)
315 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
316 uint8_t *dvsec;
317 uint32_t range1_size_hi, range1_size_lo,
318 range1_base_hi = 0, range1_base_lo = 0,
319 range2_size_hi = 0, range2_size_lo = 0,
320 range2_base_hi = 0, range2_base_lo = 0;
323 * Volatile memory is mapped as (0x0)
324 * Persistent memory is mapped at (volatile->size)
326 if (ct3d->hostvmem) {
327 range1_size_hi = ct3d->hostvmem->size >> 32;
328 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
329 (ct3d->hostvmem->size & 0xF0000000);
330 if (ct3d->hostpmem) {
331 range2_size_hi = ct3d->hostpmem->size >> 32;
332 range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
333 (ct3d->hostpmem->size & 0xF0000000);
335 } else {
336 range1_size_hi = ct3d->hostpmem->size >> 32;
337 range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
338 (ct3d->hostpmem->size & 0xF0000000);
341 dvsec = (uint8_t *)&(CXLDVSECDevice){
342 .cap = 0x1e,
343 .ctrl = 0x2,
344 .status2 = 0x2,
345 .range1_size_hi = range1_size_hi,
346 .range1_size_lo = range1_size_lo,
347 .range1_base_hi = range1_base_hi,
348 .range1_base_lo = range1_base_lo,
349 .range2_size_hi = range2_size_hi,
350 .range2_size_lo = range2_size_lo,
351 .range2_base_hi = range2_base_hi,
352 .range2_base_lo = range2_base_lo,
354 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
355 PCIE_CXL_DEVICE_DVSEC_LENGTH,
356 PCIE_CXL_DEVICE_DVSEC,
357 PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
359 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
360 .rsvd = 0,
361 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
362 .reg0_base_hi = 0,
363 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
364 .reg1_base_hi = 0,
366 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
367 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
368 REG_LOC_DVSEC_REVID, dvsec);
369 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
370 .phase2_duration = 0x603, /* 3 seconds */
371 .phase2_power = 0x33, /* 0x33 miliwatts */
373 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
374 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
375 GPF_DEVICE_DVSEC_REVID, dvsec);
377 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
378 .cap = 0x26, /* 68B, IO, Mem, non-MLD */
379 .ctrl = 0x02, /* IO always enabled */
380 .status = 0x26, /* same as capabilities */
381 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
383 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
384 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
385 PCIE_FLEXBUS_PORT_DVSEC,
386 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
389 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
391 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
392 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
393 uint32_t *cache_mem = cregs->cache_mem_registers;
394 uint32_t ctrl;
396 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
397 /* TODO: Sanity checks that the decoder is possible */
398 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
399 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
401 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
404 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
406 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
407 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
408 uint32_t *cache_mem = cregs->cache_mem_registers;
409 uint32_t ctrl;
411 ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
413 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
414 ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
416 stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
419 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
421 switch (qmp_err) {
422 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
423 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
424 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
425 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
426 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
427 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
428 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
429 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
430 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
431 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
432 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
433 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
434 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
435 return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
436 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
437 return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
438 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
439 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
440 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
441 return CXL_RAS_UNC_ERR_RSVD_ENCODING;
442 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
443 return CXL_RAS_UNC_ERR_POISON_RECEIVED;
444 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
445 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
446 case CXL_UNCOR_ERROR_TYPE_INTERNAL:
447 return CXL_RAS_UNC_ERR_INTERNAL;
448 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
449 return CXL_RAS_UNC_ERR_CXL_IDE_TX;
450 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
451 return CXL_RAS_UNC_ERR_CXL_IDE_RX;
452 default:
453 return -EINVAL;
457 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
459 switch (qmp_err) {
460 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
461 return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
462 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
463 return CXL_RAS_COR_ERR_MEM_DATA_ECC;
464 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
465 return CXL_RAS_COR_ERR_CRC_THRESHOLD;
466 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
467 return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
468 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
469 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
470 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
471 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
472 case CXL_COR_ERROR_TYPE_PHYSICAL:
473 return CXL_RAS_COR_ERR_PHYSICAL;
474 default:
475 return -EINVAL;
479 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
480 unsigned size)
482 CXLComponentState *cxl_cstate = opaque;
483 ComponentRegisters *cregs = &cxl_cstate->crb;
484 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
485 uint32_t *cache_mem = cregs->cache_mem_registers;
486 bool should_commit = false;
487 bool should_uncommit = false;
488 int which_hdm = -1;
490 assert(size == 4);
491 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
493 switch (offset) {
494 case A_CXL_HDM_DECODER0_CTRL:
495 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
496 should_uncommit = !should_commit;
497 which_hdm = 0;
498 break;
499 case A_CXL_HDM_DECODER1_CTRL:
500 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
501 should_uncommit = !should_commit;
502 which_hdm = 1;
503 break;
504 case A_CXL_HDM_DECODER2_CTRL:
505 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
506 should_uncommit = !should_commit;
507 which_hdm = 2;
508 break;
509 case A_CXL_HDM_DECODER3_CTRL:
510 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
511 should_uncommit = !should_commit;
512 which_hdm = 3;
513 break;
514 case A_CXL_RAS_UNC_ERR_STATUS:
516 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
517 uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
518 CXLError *cxl_err;
519 uint32_t unc_err;
522 * If single bit written that corresponds to the first error
523 * pointer being cleared, update the status and header log.
525 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
526 if ((1 << fe) ^ value) {
527 CXLError *cxl_next;
529 * Software is using wrong flow for multiple header recording
530 * Following behavior in PCIe r6.0 and assuming multiple
531 * header support. Implementation defined choice to clear all
532 * matching records if more than one bit set - which corresponds
533 * closest to behavior of hardware not capable of multiple
534 * header recording.
536 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
537 if ((1 << cxl_err->type) & value) {
538 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
539 g_free(cxl_err);
542 } else {
543 /* Done with previous FE, so drop from list */
544 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
545 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
546 g_free(cxl_err);
550 * If there is another FE, then put that in place and update
551 * the header log
553 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
554 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
555 int i;
557 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
558 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
559 stl_le_p(header_log + i, cxl_err->header[i]);
561 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
562 FIRST_ERROR_POINTER, cxl_err->type);
563 } else {
565 * If no more errors, then follow recommendation of PCI spec
566 * r6.0 6.2.4.2 to set the first error pointer to a status
567 * bit that will never be used.
569 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
570 FIRST_ERROR_POINTER,
571 CXL_RAS_UNC_ERR_CXL_UNUSED);
573 stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
575 unc_err = 0;
576 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
577 unc_err |= 1 << cxl_err->type;
579 stl_le_p((uint8_t *)cache_mem + offset, unc_err);
581 return;
583 case A_CXL_RAS_COR_ERR_STATUS:
585 uint32_t rw1c = value;
586 uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
587 temp &= ~rw1c;
588 stl_le_p((uint8_t *)cache_mem + offset, temp);
589 return;
591 default:
592 break;
595 stl_le_p((uint8_t *)cache_mem + offset, value);
596 if (should_commit) {
597 hdm_decoder_commit(ct3d, which_hdm);
598 } else if (should_uncommit) {
599 hdm_decoder_uncommit(ct3d, which_hdm);
603 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
605 DeviceState *ds = DEVICE(ct3d);
607 if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
608 error_setg(errp, "at least one memdev property must be set");
609 return false;
610 } else if (ct3d->hostmem && ct3d->hostpmem) {
611 error_setg(errp, "[memdev] cannot be used with new "
612 "[persistent-memdev] property");
613 return false;
614 } else if (ct3d->hostmem) {
615 /* Use of hostmem property implies pmem */
616 ct3d->hostpmem = ct3d->hostmem;
617 ct3d->hostmem = NULL;
620 if (ct3d->hostpmem && !ct3d->lsa) {
621 error_setg(errp, "lsa property must be set for persistent devices");
622 return false;
625 if (ct3d->hostvmem) {
626 MemoryRegion *vmr;
627 char *v_name;
629 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
630 if (!vmr) {
631 error_setg(errp, "volatile memdev must have backing device");
632 return false;
634 memory_region_set_nonvolatile(vmr, false);
635 memory_region_set_enabled(vmr, true);
636 host_memory_backend_set_mapped(ct3d->hostvmem, true);
637 if (ds->id) {
638 v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
639 } else {
640 v_name = g_strdup("cxl-type3-dpa-vmem-space");
642 address_space_init(&ct3d->hostvmem_as, vmr, v_name);
643 ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
644 ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
645 g_free(v_name);
648 if (ct3d->hostpmem) {
649 MemoryRegion *pmr;
650 char *p_name;
652 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
653 if (!pmr) {
654 error_setg(errp, "persistent memdev must have backing device");
655 return false;
657 memory_region_set_nonvolatile(pmr, true);
658 memory_region_set_enabled(pmr, true);
659 host_memory_backend_set_mapped(ct3d->hostpmem, true);
660 if (ds->id) {
661 p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
662 } else {
663 p_name = g_strdup("cxl-type3-dpa-pmem-space");
665 address_space_init(&ct3d->hostpmem_as, pmr, p_name);
666 ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
667 ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
668 g_free(p_name);
671 return true;
674 static DOEProtocol doe_cdat_prot[] = {
675 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
679 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
681 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
682 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
683 ComponentRegisters *regs = &cxl_cstate->crb;
684 MemoryRegion *mr = &regs->component_registers;
685 uint8_t *pci_conf = pci_dev->config;
686 unsigned short msix_num = 6;
687 int i, rc;
689 QTAILQ_INIT(&ct3d->error_list);
691 if (!cxl_setup_memory(ct3d, errp)) {
692 return;
695 pci_config_set_prog_interface(pci_conf, 0x10);
697 pcie_endpoint_cap_init(pci_dev, 0x80);
698 if (ct3d->sn != UI64_NULL) {
699 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
700 cxl_cstate->dvsec_offset = 0x100 + 0x0c;
701 } else {
702 cxl_cstate->dvsec_offset = 0x100;
705 ct3d->cxl_cstate.pdev = pci_dev;
706 build_dvsecs(ct3d);
708 regs->special_ops = g_new0(MemoryRegionOps, 1);
709 regs->special_ops->write = ct3d_reg_write;
711 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
712 TYPE_CXL_TYPE3);
714 pci_register_bar(
715 pci_dev, CXL_COMPONENT_REG_BAR_IDX,
716 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
718 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
719 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
720 PCI_BASE_ADDRESS_SPACE_MEMORY |
721 PCI_BASE_ADDRESS_MEM_TYPE_64,
722 &ct3d->cxl_dstate.device_registers);
724 /* MSI(-X) Initialization */
725 rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
726 if (rc) {
727 goto err_address_space_free;
729 for (i = 0; i < msix_num; i++) {
730 msix_vector_use(pci_dev, i);
733 /* DOE Initialization */
734 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
736 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
737 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
738 cxl_cstate->cdat.private = ct3d;
739 cxl_doe_cdat_init(cxl_cstate, errp);
740 if (*errp) {
741 goto err_free_special_ops;
744 pcie_cap_deverr_init(pci_dev);
745 /* Leave a bit of room for expansion */
746 rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
747 if (rc) {
748 goto err_release_cdat;
750 cxl_event_init(&ct3d->cxl_dstate, 2);
752 return;
754 err_release_cdat:
755 cxl_doe_cdat_release(cxl_cstate);
756 err_free_special_ops:
757 g_free(regs->special_ops);
758 err_address_space_free:
759 if (ct3d->hostpmem) {
760 address_space_destroy(&ct3d->hostpmem_as);
762 if (ct3d->hostvmem) {
763 address_space_destroy(&ct3d->hostvmem_as);
765 return;
768 static void ct3_exit(PCIDevice *pci_dev)
770 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
771 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
772 ComponentRegisters *regs = &cxl_cstate->crb;
774 pcie_aer_exit(pci_dev);
775 cxl_doe_cdat_release(cxl_cstate);
776 g_free(regs->special_ops);
777 if (ct3d->hostpmem) {
778 address_space_destroy(&ct3d->hostpmem_as);
780 if (ct3d->hostvmem) {
781 address_space_destroy(&ct3d->hostvmem_as);
785 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
787 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
788 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
789 unsigned int hdm_count;
790 uint32_t cap;
791 uint64_t dpa_base = 0;
792 int i;
794 cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
795 hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
796 CXL_HDM_DECODER_CAPABILITY,
797 DECODER_COUNT));
799 for (i = 0; i < hdm_count; i++) {
800 uint64_t decoder_base, decoder_size, hpa_offset, skip;
801 uint32_t hdm_ctrl, low, high;
802 int ig, iw;
804 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
805 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
806 decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
808 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
809 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
810 decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
812 low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
813 i * hdm_inc);
814 high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
815 i * hdm_inc);
816 skip = ((uint64_t)high << 32) | (low & 0xf0000000);
817 dpa_base += skip;
819 hpa_offset = (uint64_t)host_addr - decoder_base;
821 hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
822 iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
823 ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
824 if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
825 return false;
827 if (((uint64_t)host_addr < decoder_base) ||
828 (hpa_offset >= decoder_size)) {
829 dpa_base += decoder_size /
830 cxl_interleave_ways_dec(iw, &error_fatal);
831 continue;
834 *dpa = dpa_base +
835 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
836 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
837 >> iw));
839 return true;
841 return false;
844 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
845 hwaddr host_addr,
846 unsigned int size,
847 AddressSpace **as,
848 uint64_t *dpa_offset)
850 MemoryRegion *vmr = NULL, *pmr = NULL;
852 if (ct3d->hostvmem) {
853 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
855 if (ct3d->hostpmem) {
856 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
859 if (!vmr && !pmr) {
860 return -ENODEV;
863 if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
864 return -EINVAL;
867 if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
868 return -EINVAL;
871 if (vmr) {
872 if (*dpa_offset < memory_region_size(vmr)) {
873 *as = &ct3d->hostvmem_as;
874 } else {
875 *as = &ct3d->hostpmem_as;
876 *dpa_offset -= memory_region_size(vmr);
878 } else {
879 *as = &ct3d->hostpmem_as;
882 return 0;
885 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
886 unsigned size, MemTxAttrs attrs)
888 uint64_t dpa_offset = 0;
889 AddressSpace *as = NULL;
890 int res;
892 res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
893 &as, &dpa_offset);
894 if (res) {
895 return MEMTX_ERROR;
898 return address_space_read(as, dpa_offset, attrs, data, size);
901 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
902 unsigned size, MemTxAttrs attrs)
904 uint64_t dpa_offset = 0;
905 AddressSpace *as = NULL;
906 int res;
908 res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
909 &as, &dpa_offset);
910 if (res) {
911 return MEMTX_ERROR;
914 return address_space_write(as, dpa_offset, attrs, &data, size);
917 static void ct3d_reset(DeviceState *dev)
919 CXLType3Dev *ct3d = CXL_TYPE3(dev);
920 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
921 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
923 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
924 cxl_device_register_init_common(&ct3d->cxl_dstate);
927 static Property ct3_props[] = {
928 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
929 HostMemoryBackend *), /* for backward compatibility */
930 DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
931 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
932 DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
933 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
934 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
935 HostMemoryBackend *),
936 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
937 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
938 DEFINE_PROP_END_OF_LIST(),
941 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
943 MemoryRegion *mr;
945 if (!ct3d->lsa) {
946 return 0;
949 mr = host_memory_backend_get_memory(ct3d->lsa);
950 return memory_region_size(mr);
953 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
954 uint64_t offset)
956 assert(offset + size <= memory_region_size(mr));
957 assert(offset + size > offset);
960 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
961 uint64_t offset)
963 MemoryRegion *mr;
964 void *lsa;
966 if (!ct3d->lsa) {
967 return 0;
970 mr = host_memory_backend_get_memory(ct3d->lsa);
971 validate_lsa_access(mr, size, offset);
973 lsa = memory_region_get_ram_ptr(mr) + offset;
974 memcpy(buf, lsa, size);
976 return size;
979 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
980 uint64_t offset)
982 MemoryRegion *mr;
983 void *lsa;
985 if (!ct3d->lsa) {
986 return;
989 mr = host_memory_backend_get_memory(ct3d->lsa);
990 validate_lsa_access(mr, size, offset);
992 lsa = memory_region_get_ram_ptr(mr) + offset;
993 memcpy(lsa, buf, size);
994 memory_region_set_dirty(mr, offset, size);
997 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
998 * updates will get lost.
1002 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
1004 MemoryRegion *vmr = NULL, *pmr = NULL;
1005 AddressSpace *as;
1007 if (ct3d->hostvmem) {
1008 vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1010 if (ct3d->hostpmem) {
1011 pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1014 if (!vmr && !pmr) {
1015 return false;
1018 if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
1019 return false;
1022 if (vmr) {
1023 if (dpa_offset < memory_region_size(vmr)) {
1024 as = &ct3d->hostvmem_as;
1025 } else {
1026 as = &ct3d->hostpmem_as;
1027 dpa_offset -= memory_region_size(vmr);
1029 } else {
1030 as = &ct3d->hostpmem_as;
1033 address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
1034 CXL_CACHE_LINE_SIZE);
1035 return true;
1038 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1040 ct3d->poison_list_overflowed = true;
1041 ct3d->poison_list_overflow_ts =
1042 cxl_device_get_timestamp(&ct3d->cxl_dstate);
1045 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1046 Error **errp)
1048 Object *obj = object_resolve_path(path, NULL);
1049 CXLType3Dev *ct3d;
1050 CXLPoison *p;
1052 if (length % 64) {
1053 error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1054 return;
1056 if (start % 64) {
1057 error_setg(errp, "Poison start address must be 64 byte aligned");
1058 return;
1060 if (!obj) {
1061 error_setg(errp, "Unable to resolve path");
1062 return;
1064 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1065 error_setg(errp, "Path does not point to a CXL type 3 device");
1066 return;
1069 ct3d = CXL_TYPE3(obj);
1071 QLIST_FOREACH(p, &ct3d->poison_list, node) {
1072 if (((start >= p->start) && (start < p->start + p->length)) ||
1073 ((start + length > p->start) &&
1074 (start + length <= p->start + p->length))) {
1075 error_setg(errp, "Overlap with existing poisoned region not supported");
1076 return;
1080 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1081 cxl_set_poison_list_overflowed(ct3d);
1082 return;
1085 p = g_new0(CXLPoison, 1);
1086 p->length = length;
1087 p->start = start;
1088 p->type = CXL_POISON_TYPE_INTERNAL; /* Different from injected via the mbox */
1090 QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1091 ct3d->poison_list_cnt++;
1094 /* For uncorrectable errors include support for multiple header recording */
1095 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1096 CXLUncorErrorRecordList *errors,
1097 Error **errp)
1099 Object *obj = object_resolve_path(path, NULL);
1100 static PCIEAERErr err = {};
1101 CXLType3Dev *ct3d;
1102 CXLError *cxl_err;
1103 uint32_t *reg_state;
1104 uint32_t unc_err;
1105 bool first;
1107 if (!obj) {
1108 error_setg(errp, "Unable to resolve path");
1109 return;
1112 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1113 error_setg(errp, "Path does not point to a CXL type 3 device");
1114 return;
1117 err.status = PCI_ERR_UNC_INTN;
1118 err.source_id = pci_requester_id(PCI_DEVICE(obj));
1119 err.flags = 0;
1121 ct3d = CXL_TYPE3(obj);
1123 first = QTAILQ_EMPTY(&ct3d->error_list);
1124 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1125 while (errors) {
1126 uint32List *header = errors->value->header;
1127 uint8_t header_count = 0;
1128 int cxl_err_code;
1130 cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1131 if (cxl_err_code < 0) {
1132 error_setg(errp, "Unknown error code");
1133 return;
1136 /* If the error is masked, nothing to do here */
1137 if (!((1 << cxl_err_code) &
1138 ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1139 errors = errors->next;
1140 continue;
1143 cxl_err = g_malloc0(sizeof(*cxl_err));
1144 if (!cxl_err) {
1145 return;
1148 cxl_err->type = cxl_err_code;
1149 while (header && header_count < 32) {
1150 cxl_err->header[header_count++] = header->value;
1151 header = header->next;
1153 if (header_count > 32) {
1154 error_setg(errp, "Header must be 32 DWORD or less");
1155 return;
1157 QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1159 errors = errors->next;
1162 if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1163 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1164 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1165 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1166 int i;
1168 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1169 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1170 stl_le_p(header_log + i, cxl_err->header[i]);
1173 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1174 FIRST_ERROR_POINTER, cxl_err->type);
1175 stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1178 unc_err = 0;
1179 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1180 unc_err |= (1 << cxl_err->type);
1182 if (!unc_err) {
1183 return;
1186 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1187 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1189 return;
1192 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1193 Error **errp)
1195 static PCIEAERErr err = {};
1196 Object *obj = object_resolve_path(path, NULL);
1197 CXLType3Dev *ct3d;
1198 uint32_t *reg_state;
1199 uint32_t cor_err;
1200 int cxl_err_type;
1202 if (!obj) {
1203 error_setg(errp, "Unable to resolve path");
1204 return;
1206 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1207 error_setg(errp, "Path does not point to a CXL type 3 device");
1208 return;
1211 err.status = PCI_ERR_COR_INTERNAL;
1212 err.source_id = pci_requester_id(PCI_DEVICE(obj));
1213 err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1215 ct3d = CXL_TYPE3(obj);
1216 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1217 cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1219 cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1220 if (cxl_err_type < 0) {
1221 error_setg(errp, "Invalid COR error");
1222 return;
1224 /* If the error is masked, nothting to do here */
1225 if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1226 return;
1229 cor_err |= (1 << cxl_err_type);
1230 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1232 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1235 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1236 const QemuUUID *uuid, uint32_t flags,
1237 uint8_t length, uint64_t timestamp)
1239 st24_le_p(&hdr->flags, flags);
1240 hdr->length = length;
1241 memcpy(&hdr->id, uuid, sizeof(hdr->id));
1242 stq_le_p(&hdr->timestamp, timestamp);
1245 static const QemuUUID gen_media_uuid = {
1246 .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1247 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1250 static const QemuUUID dram_uuid = {
1251 .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1252 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1255 static const QemuUUID memory_module_uuid = {
1256 .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1257 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1260 #define CXL_GMER_VALID_CHANNEL BIT(0)
1261 #define CXL_GMER_VALID_RANK BIT(1)
1262 #define CXL_GMER_VALID_DEVICE BIT(2)
1263 #define CXL_GMER_VALID_COMPONENT BIT(3)
1265 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1267 switch (log) {
1268 case CXL_EVENT_LOG_INFORMATIONAL:
1269 return CXL_EVENT_TYPE_INFO;
1270 case CXL_EVENT_LOG_WARNING:
1271 return CXL_EVENT_TYPE_WARN;
1272 case CXL_EVENT_LOG_FAILURE:
1273 return CXL_EVENT_TYPE_FAIL;
1274 case CXL_EVENT_LOG_FATAL:
1275 return CXL_EVENT_TYPE_FATAL;
1276 /* DCD not yet supported */
1277 default:
1278 return -EINVAL;
1281 /* Component ID is device specific. Define this as a string. */
1282 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1283 uint8_t flags, uint64_t dpa,
1284 uint8_t descriptor, uint8_t type,
1285 uint8_t transaction_type,
1286 bool has_channel, uint8_t channel,
1287 bool has_rank, uint8_t rank,
1288 bool has_device, uint32_t device,
1289 const char *component_id,
1290 Error **errp)
1292 Object *obj = object_resolve_path(path, NULL);
1293 CXLEventGenMedia gem;
1294 CXLEventRecordHdr *hdr = &gem.hdr;
1295 CXLDeviceState *cxlds;
1296 CXLType3Dev *ct3d;
1297 uint16_t valid_flags = 0;
1298 uint8_t enc_log;
1299 int rc;
1301 if (!obj) {
1302 error_setg(errp, "Unable to resolve path");
1303 return;
1305 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1306 error_setg(errp, "Path does not point to a CXL type 3 device");
1307 return;
1309 ct3d = CXL_TYPE3(obj);
1310 cxlds = &ct3d->cxl_dstate;
1312 rc = ct3d_qmp_cxl_event_log_enc(log);
1313 if (rc < 0) {
1314 error_setg(errp, "Unhandled error log type");
1315 return;
1317 enc_log = rc;
1319 memset(&gem, 0, sizeof(gem));
1320 cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1321 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1323 stq_le_p(&gem.phys_addr, dpa);
1324 gem.descriptor = descriptor;
1325 gem.type = type;
1326 gem.transaction_type = transaction_type;
1328 if (has_channel) {
1329 gem.channel = channel;
1330 valid_flags |= CXL_GMER_VALID_CHANNEL;
1333 if (has_rank) {
1334 gem.rank = rank;
1335 valid_flags |= CXL_GMER_VALID_RANK;
1338 if (has_device) {
1339 st24_le_p(gem.device, device);
1340 valid_flags |= CXL_GMER_VALID_DEVICE;
1343 if (component_id) {
1344 strncpy((char *)gem.component_id, component_id,
1345 sizeof(gem.component_id) - 1);
1346 valid_flags |= CXL_GMER_VALID_COMPONENT;
1349 stw_le_p(&gem.validity_flags, valid_flags);
1351 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1352 cxl_event_irq_assert(ct3d);
1356 #define CXL_DRAM_VALID_CHANNEL BIT(0)
1357 #define CXL_DRAM_VALID_RANK BIT(1)
1358 #define CXL_DRAM_VALID_NIBBLE_MASK BIT(2)
1359 #define CXL_DRAM_VALID_BANK_GROUP BIT(3)
1360 #define CXL_DRAM_VALID_BANK BIT(4)
1361 #define CXL_DRAM_VALID_ROW BIT(5)
1362 #define CXL_DRAM_VALID_COLUMN BIT(6)
1363 #define CXL_DRAM_VALID_CORRECTION_MASK BIT(7)
1365 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1366 uint64_t dpa, uint8_t descriptor,
1367 uint8_t type, uint8_t transaction_type,
1368 bool has_channel, uint8_t channel,
1369 bool has_rank, uint8_t rank,
1370 bool has_nibble_mask, uint32_t nibble_mask,
1371 bool has_bank_group, uint8_t bank_group,
1372 bool has_bank, uint8_t bank,
1373 bool has_row, uint32_t row,
1374 bool has_column, uint16_t column,
1375 bool has_correction_mask, uint64List *correction_mask,
1376 Error **errp)
1378 Object *obj = object_resolve_path(path, NULL);
1379 CXLEventDram dram;
1380 CXLEventRecordHdr *hdr = &dram.hdr;
1381 CXLDeviceState *cxlds;
1382 CXLType3Dev *ct3d;
1383 uint16_t valid_flags = 0;
1384 uint8_t enc_log;
1385 int rc;
1387 if (!obj) {
1388 error_setg(errp, "Unable to resolve path");
1389 return;
1391 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1392 error_setg(errp, "Path does not point to a CXL type 3 device");
1393 return;
1395 ct3d = CXL_TYPE3(obj);
1396 cxlds = &ct3d->cxl_dstate;
1398 rc = ct3d_qmp_cxl_event_log_enc(log);
1399 if (rc < 0) {
1400 error_setg(errp, "Unhandled error log type");
1401 return;
1403 enc_log = rc;
1405 memset(&dram, 0, sizeof(dram));
1406 cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1407 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1408 stq_le_p(&dram.phys_addr, dpa);
1409 dram.descriptor = descriptor;
1410 dram.type = type;
1411 dram.transaction_type = transaction_type;
1413 if (has_channel) {
1414 dram.channel = channel;
1415 valid_flags |= CXL_DRAM_VALID_CHANNEL;
1418 if (has_rank) {
1419 dram.rank = rank;
1420 valid_flags |= CXL_DRAM_VALID_RANK;
1423 if (has_nibble_mask) {
1424 st24_le_p(dram.nibble_mask, nibble_mask);
1425 valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1428 if (has_bank_group) {
1429 dram.bank_group = bank_group;
1430 valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1433 if (has_bank) {
1434 dram.bank = bank;
1435 valid_flags |= CXL_DRAM_VALID_BANK;
1438 if (has_row) {
1439 st24_le_p(dram.row, row);
1440 valid_flags |= CXL_DRAM_VALID_ROW;
1443 if (has_column) {
1444 stw_le_p(&dram.column, column);
1445 valid_flags |= CXL_DRAM_VALID_COLUMN;
1448 if (has_correction_mask) {
1449 int count = 0;
1450 while (correction_mask && count < 4) {
1451 stq_le_p(&dram.correction_mask[count],
1452 correction_mask->value);
1453 count++;
1454 correction_mask = correction_mask->next;
1456 valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1459 stw_le_p(&dram.validity_flags, valid_flags);
1461 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1462 cxl_event_irq_assert(ct3d);
1464 return;
1467 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1468 uint8_t flags, uint8_t type,
1469 uint8_t health_status,
1470 uint8_t media_status,
1471 uint8_t additional_status,
1472 uint8_t life_used,
1473 int16_t temperature,
1474 uint32_t dirty_shutdown_count,
1475 uint32_t corrected_volatile_error_count,
1476 uint32_t corrected_persistent_error_count,
1477 Error **errp)
1479 Object *obj = object_resolve_path(path, NULL);
1480 CXLEventMemoryModule module;
1481 CXLEventRecordHdr *hdr = &module.hdr;
1482 CXLDeviceState *cxlds;
1483 CXLType3Dev *ct3d;
1484 uint8_t enc_log;
1485 int rc;
1487 if (!obj) {
1488 error_setg(errp, "Unable to resolve path");
1489 return;
1491 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1492 error_setg(errp, "Path does not point to a CXL type 3 device");
1493 return;
1495 ct3d = CXL_TYPE3(obj);
1496 cxlds = &ct3d->cxl_dstate;
1498 rc = ct3d_qmp_cxl_event_log_enc(log);
1499 if (rc < 0) {
1500 error_setg(errp, "Unhandled error log type");
1501 return;
1503 enc_log = rc;
1505 memset(&module, 0, sizeof(module));
1506 cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1507 cxl_device_get_timestamp(&ct3d->cxl_dstate));
1509 module.type = type;
1510 module.health_status = health_status;
1511 module.media_status = media_status;
1512 module.additional_status = additional_status;
1513 module.life_used = life_used;
1514 stw_le_p(&module.temperature, temperature);
1515 stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1516 stl_le_p(&module.corrected_volatile_error_count, corrected_volatile_error_count);
1517 stl_le_p(&module.corrected_persistent_error_count, corrected_persistent_error_count);
1519 if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1520 cxl_event_irq_assert(ct3d);
1524 static void ct3_class_init(ObjectClass *oc, void *data)
1526 DeviceClass *dc = DEVICE_CLASS(oc);
1527 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1528 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1530 pc->realize = ct3_realize;
1531 pc->exit = ct3_exit;
1532 pc->class_id = PCI_CLASS_MEMORY_CXL;
1533 pc->vendor_id = PCI_VENDOR_ID_INTEL;
1534 pc->device_id = 0xd93; /* LVF for now */
1535 pc->revision = 1;
1537 pc->config_write = ct3d_config_write;
1538 pc->config_read = ct3d_config_read;
1540 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1541 dc->desc = "CXL Memory Device (Type 3)";
1542 dc->reset = ct3d_reset;
1543 device_class_set_props(dc, ct3_props);
1545 cvc->get_lsa_size = get_lsa_size;
1546 cvc->get_lsa = get_lsa;
1547 cvc->set_lsa = set_lsa;
1548 cvc->set_cacheline = set_cacheline;
1551 static const TypeInfo ct3d_info = {
1552 .name = TYPE_CXL_TYPE3,
1553 .parent = TYPE_PCI_DEVICE,
1554 .class_size = sizeof(struct CXLType3Class),
1555 .class_init = ct3_class_init,
1556 .instance_size = sizeof(CXLType3Dev),
1557 .interfaces = (InterfaceInfo[]) {
1558 { INTERFACE_CXL_DEVICE },
1559 { INTERFACE_PCIE_DEVICE },
1564 static void ct3d_registers(void)
1566 type_register_static(&ct3d_info);
1569 type_init(ct3d_registers);