docs/devel: mention the spacing requirement for QOM
[qemu/kevin.git] / hw / mem / cxl_type3.c
blobabe60b362c25b8189afdfe393ee391bc6c38321f
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qemu/error-report.h"
4 #include "qapi/qapi-commands-cxl.h"
5 #include "hw/mem/memory-device.h"
6 #include "hw/mem/pc-dimm.h"
7 #include "hw/pci/pci.h"
8 #include "hw/qdev-properties.h"
9 #include "qapi/error.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qemu/pmem.h"
13 #include "qemu/range.h"
14 #include "qemu/rcu.h"
15 #include "sysemu/hostmem.h"
16 #include "sysemu/numa.h"
17 #include "hw/cxl/cxl.h"
18 #include "hw/pci/msix.h"
20 #define DWORD_BYTE 4
22 /* Default CDAT entries for a memory region */
23 enum {
24 CT3_CDAT_DSMAS,
25 CT3_CDAT_DSLBIS0,
26 CT3_CDAT_DSLBIS1,
27 CT3_CDAT_DSLBIS2,
28 CT3_CDAT_DSLBIS3,
29 CT3_CDAT_DSEMTS,
30 CT3_CDAT_NUM_ENTRIES
33 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
34 int dsmad_handle, MemoryRegion *mr)
36 g_autofree CDATDsmas *dsmas = NULL;
37 g_autofree CDATDslbis *dslbis0 = NULL;
38 g_autofree CDATDslbis *dslbis1 = NULL;
39 g_autofree CDATDslbis *dslbis2 = NULL;
40 g_autofree CDATDslbis *dslbis3 = NULL;
41 g_autofree CDATDsemts *dsemts = NULL;
43 dsmas = g_malloc(sizeof(*dsmas));
44 if (!dsmas) {
45 return -ENOMEM;
47 *dsmas = (CDATDsmas) {
48 .header = {
49 .type = CDAT_TYPE_DSMAS,
50 .length = sizeof(*dsmas),
52 .DSMADhandle = dsmad_handle,
53 .flags = CDAT_DSMAS_FLAG_NV,
54 .DPA_base = 0,
55 .DPA_length = int128_get64(mr->size),
58 /* For now, no memory side cache, plausiblish numbers */
59 dslbis0 = g_malloc(sizeof(*dslbis0));
60 if (!dslbis0) {
61 return -ENOMEM;
63 *dslbis0 = (CDATDslbis) {
64 .header = {
65 .type = CDAT_TYPE_DSLBIS,
66 .length = sizeof(*dslbis0),
68 .handle = dsmad_handle,
69 .flags = HMAT_LB_MEM_MEMORY,
70 .data_type = HMAT_LB_DATA_READ_LATENCY,
71 .entry_base_unit = 10000, /* 10ns base */
72 .entry[0] = 15, /* 150ns */
75 dslbis1 = g_malloc(sizeof(*dslbis1));
76 if (!dslbis1) {
77 return -ENOMEM;
79 *dslbis1 = (CDATDslbis) {
80 .header = {
81 .type = CDAT_TYPE_DSLBIS,
82 .length = sizeof(*dslbis1),
84 .handle = dsmad_handle,
85 .flags = HMAT_LB_MEM_MEMORY,
86 .data_type = HMAT_LB_DATA_WRITE_LATENCY,
87 .entry_base_unit = 10000,
88 .entry[0] = 25, /* 250ns */
91 dslbis2 = g_malloc(sizeof(*dslbis2));
92 if (!dslbis2) {
93 return -ENOMEM;
95 *dslbis2 = (CDATDslbis) {
96 .header = {
97 .type = CDAT_TYPE_DSLBIS,
98 .length = sizeof(*dslbis2),
100 .handle = dsmad_handle,
101 .flags = HMAT_LB_MEM_MEMORY,
102 .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
103 .entry_base_unit = 1000, /* GB/s */
104 .entry[0] = 16,
107 dslbis3 = g_malloc(sizeof(*dslbis3));
108 if (!dslbis3) {
109 return -ENOMEM;
111 *dslbis3 = (CDATDslbis) {
112 .header = {
113 .type = CDAT_TYPE_DSLBIS,
114 .length = sizeof(*dslbis3),
116 .handle = dsmad_handle,
117 .flags = HMAT_LB_MEM_MEMORY,
118 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
119 .entry_base_unit = 1000, /* GB/s */
120 .entry[0] = 16,
123 dsemts = g_malloc(sizeof(*dsemts));
124 if (!dsemts) {
125 return -ENOMEM;
127 *dsemts = (CDATDsemts) {
128 .header = {
129 .type = CDAT_TYPE_DSEMTS,
130 .length = sizeof(*dsemts),
132 .DSMAS_handle = dsmad_handle,
133 /* Reserved - the non volatile from DSMAS matters */
134 .EFI_memory_type_attr = 2,
135 .DPA_offset = 0,
136 .DPA_length = int128_get64(mr->size),
139 /* Header always at start of structure */
140 cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
141 cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
142 cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
143 cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
144 cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
145 cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
147 return 0;
150 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
152 g_autofree CDATSubHeader **table = NULL;
153 MemoryRegion *nonvolatile_mr;
154 CXLType3Dev *ct3d = priv;
155 int dsmad_handle = 0;
156 int rc;
158 if (!ct3d->hostmem) {
159 return 0;
162 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem);
163 if (!nonvolatile_mr) {
164 return -EINVAL;
167 table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table));
168 if (!table) {
169 return -ENOMEM;
172 rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr);
173 if (rc < 0) {
174 return rc;
177 *cdat_table = g_steal_pointer(&table);
179 return CT3_CDAT_NUM_ENTRIES;
182 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
184 int i;
186 for (i = 0; i < num; i++) {
187 g_free(cdat_table[i]);
189 g_free(cdat_table);
192 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
194 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
195 uint16_t ent;
196 void *base;
197 uint32_t len;
198 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
199 CDATRsp rsp;
201 assert(cdat->entry_len);
203 /* Discard if request length mismatched */
204 if (pcie_doe_get_obj_len(req) <
205 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
206 return false;
209 ent = req->entry_handle;
210 base = cdat->entry[ent].base;
211 len = cdat->entry[ent].length;
213 rsp = (CDATRsp) {
214 .header = {
215 .vendor_id = CXL_VENDOR_ID,
216 .data_obj_type = CXL_DOE_TABLE_ACCESS,
217 .reserved = 0x0,
218 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
220 .rsp_code = CXL_DOE_TAB_RSP,
221 .table_type = CXL_DOE_TAB_TYPE_CDAT,
222 .entry_handle = (ent < cdat->entry_len - 1) ?
223 ent + 1 : CXL_DOE_TAB_ENT_MAX,
226 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
227 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
228 base, len);
230 doe_cap->read_mbox_len += rsp.header.length;
232 return true;
235 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
237 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
238 uint32_t val;
240 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
241 return val;
244 return pci_default_read_config(pci_dev, addr, size);
247 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
248 int size)
250 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
252 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
253 pci_default_write_config(pci_dev, addr, val, size);
254 pcie_aer_write_config(pci_dev, addr, val, size);
258 * Null value of all Fs suggested by IEEE RA guidelines for use of
259 * EU, OUI and CID
261 #define UI64_NULL ~(0ULL)
263 static void build_dvsecs(CXLType3Dev *ct3d)
265 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
266 uint8_t *dvsec;
268 dvsec = (uint8_t *)&(CXLDVSECDevice){
269 .cap = 0x1e,
270 .ctrl = 0x2,
271 .status2 = 0x2,
272 .range1_size_hi = ct3d->hostmem->size >> 32,
273 .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
274 (ct3d->hostmem->size & 0xF0000000),
275 .range1_base_hi = 0,
276 .range1_base_lo = 0,
278 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
279 PCIE_CXL_DEVICE_DVSEC_LENGTH,
280 PCIE_CXL_DEVICE_DVSEC,
281 PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
283 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
284 .rsvd = 0,
285 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
286 .reg0_base_hi = 0,
287 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
288 .reg1_base_hi = 0,
290 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
291 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
292 REG_LOC_DVSEC_REVID, dvsec);
293 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
294 .phase2_duration = 0x603, /* 3 seconds */
295 .phase2_power = 0x33, /* 0x33 miliwatts */
297 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
298 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
299 GPF_DEVICE_DVSEC_REVID, dvsec);
301 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
302 .cap = 0x26, /* 68B, IO, Mem, non-MLD */
303 .ctrl = 0x02, /* IO always enabled */
304 .status = 0x26, /* same as capabilities */
305 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
307 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
308 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
309 PCIE_FLEXBUS_PORT_DVSEC,
310 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
313 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
315 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
316 uint32_t *cache_mem = cregs->cache_mem_registers;
318 assert(which == 0);
320 /* TODO: Sanity checks that the decoder is possible */
321 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
322 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
324 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
327 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
329 switch (qmp_err) {
330 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
331 return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
332 case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
333 return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
334 case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
335 return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
336 case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
337 return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
338 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
339 return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
340 case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
341 return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
342 case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
343 return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
344 case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
345 return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
346 case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
347 return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
348 case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
349 return CXL_RAS_UNC_ERR_RSVD_ENCODING;
350 case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
351 return CXL_RAS_UNC_ERR_POISON_RECEIVED;
352 case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
353 return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
354 case CXL_UNCOR_ERROR_TYPE_INTERNAL:
355 return CXL_RAS_UNC_ERR_INTERNAL;
356 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
357 return CXL_RAS_UNC_ERR_CXL_IDE_TX;
358 case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
359 return CXL_RAS_UNC_ERR_CXL_IDE_RX;
360 default:
361 return -EINVAL;
365 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
367 switch (qmp_err) {
368 case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
369 return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
370 case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
371 return CXL_RAS_COR_ERR_MEM_DATA_ECC;
372 case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
373 return CXL_RAS_COR_ERR_CRC_THRESHOLD;
374 case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
375 return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
376 case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
377 return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
378 case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
379 return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
380 case CXL_COR_ERROR_TYPE_PHYSICAL:
381 return CXL_RAS_COR_ERR_PHYSICAL;
382 default:
383 return -EINVAL;
387 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
388 unsigned size)
390 CXLComponentState *cxl_cstate = opaque;
391 ComponentRegisters *cregs = &cxl_cstate->crb;
392 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
393 uint32_t *cache_mem = cregs->cache_mem_registers;
394 bool should_commit = false;
395 int which_hdm = -1;
397 assert(size == 4);
398 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
400 switch (offset) {
401 case A_CXL_HDM_DECODER0_CTRL:
402 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
403 which_hdm = 0;
404 break;
405 case A_CXL_RAS_UNC_ERR_STATUS:
407 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
408 uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
409 CXLError *cxl_err;
410 uint32_t unc_err;
413 * If single bit written that corresponds to the first error
414 * pointer being cleared, update the status and header log.
416 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
417 if ((1 << fe) ^ value) {
418 CXLError *cxl_next;
420 * Software is using wrong flow for multiple header recording
421 * Following behavior in PCIe r6.0 and assuming multiple
422 * header support. Implementation defined choice to clear all
423 * matching records if more than one bit set - which corresponds
424 * closest to behavior of hardware not capable of multiple
425 * header recording.
427 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
428 if ((1 << cxl_err->type) & value) {
429 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
430 g_free(cxl_err);
433 } else {
434 /* Done with previous FE, so drop from list */
435 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
436 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
437 g_free(cxl_err);
441 * If there is another FE, then put that in place and update
442 * the header log
444 if (!QTAILQ_EMPTY(&ct3d->error_list)) {
445 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
446 int i;
448 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
449 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
450 stl_le_p(header_log + i, cxl_err->header[i]);
452 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
453 FIRST_ERROR_POINTER, cxl_err->type);
454 } else {
456 * If no more errors, then follow recomendation of PCI spec
457 * r6.0 6.2.4.2 to set the first error pointer to a status
458 * bit that will never be used.
460 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
461 FIRST_ERROR_POINTER,
462 CXL_RAS_UNC_ERR_CXL_UNUSED);
464 stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
466 unc_err = 0;
467 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
468 unc_err |= 1 << cxl_err->type;
470 stl_le_p((uint8_t *)cache_mem + offset, unc_err);
472 return;
474 case A_CXL_RAS_COR_ERR_STATUS:
476 uint32_t rw1c = value;
477 uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
478 temp &= ~rw1c;
479 stl_le_p((uint8_t *)cache_mem + offset, temp);
480 return;
482 default:
483 break;
486 stl_le_p((uint8_t *)cache_mem + offset, value);
487 if (should_commit) {
488 hdm_decoder_commit(ct3d, which_hdm);
492 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
494 DeviceState *ds = DEVICE(ct3d);
495 MemoryRegion *mr;
496 char *name;
498 if (!ct3d->hostmem) {
499 error_setg(errp, "memdev property must be set");
500 return false;
503 mr = host_memory_backend_get_memory(ct3d->hostmem);
504 if (!mr) {
505 error_setg(errp, "memdev property must be set");
506 return false;
508 memory_region_set_nonvolatile(mr, true);
509 memory_region_set_enabled(mr, true);
510 host_memory_backend_set_mapped(ct3d->hostmem, true);
512 if (ds->id) {
513 name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id);
514 } else {
515 name = g_strdup("cxl-type3-dpa-space");
517 address_space_init(&ct3d->hostmem_as, mr, name);
518 g_free(name);
520 ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size;
522 if (!ct3d->lsa) {
523 error_setg(errp, "lsa property must be set");
524 return false;
527 return true;
530 static DOEProtocol doe_cdat_prot[] = {
531 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
535 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
537 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
538 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
539 ComponentRegisters *regs = &cxl_cstate->crb;
540 MemoryRegion *mr = &regs->component_registers;
541 uint8_t *pci_conf = pci_dev->config;
542 unsigned short msix_num = 1;
543 int i, rc;
545 QTAILQ_INIT(&ct3d->error_list);
547 if (!cxl_setup_memory(ct3d, errp)) {
548 return;
551 pci_config_set_prog_interface(pci_conf, 0x10);
553 pcie_endpoint_cap_init(pci_dev, 0x80);
554 if (ct3d->sn != UI64_NULL) {
555 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
556 cxl_cstate->dvsec_offset = 0x100 + 0x0c;
557 } else {
558 cxl_cstate->dvsec_offset = 0x100;
561 ct3d->cxl_cstate.pdev = pci_dev;
562 build_dvsecs(ct3d);
564 regs->special_ops = g_new0(MemoryRegionOps, 1);
565 regs->special_ops->write = ct3d_reg_write;
567 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
568 TYPE_CXL_TYPE3);
570 pci_register_bar(
571 pci_dev, CXL_COMPONENT_REG_BAR_IDX,
572 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
574 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
575 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
576 PCI_BASE_ADDRESS_SPACE_MEMORY |
577 PCI_BASE_ADDRESS_MEM_TYPE_64,
578 &ct3d->cxl_dstate.device_registers);
580 /* MSI(-X) Initailization */
581 rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
582 if (rc) {
583 goto err_address_space_free;
585 for (i = 0; i < msix_num; i++) {
586 msix_vector_use(pci_dev, i);
589 /* DOE Initailization */
590 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
592 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
593 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
594 cxl_cstate->cdat.private = ct3d;
595 cxl_doe_cdat_init(cxl_cstate, errp);
597 pcie_cap_deverr_init(pci_dev);
598 /* Leave a bit of room for expansion */
599 rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
600 if (rc) {
601 goto err_release_cdat;
604 return;
606 err_release_cdat:
607 cxl_doe_cdat_release(cxl_cstate);
608 g_free(regs->special_ops);
609 err_address_space_free:
610 address_space_destroy(&ct3d->hostmem_as);
611 return;
614 static void ct3_exit(PCIDevice *pci_dev)
616 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
617 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
618 ComponentRegisters *regs = &cxl_cstate->crb;
620 pcie_aer_exit(pci_dev);
621 cxl_doe_cdat_release(cxl_cstate);
622 g_free(regs->special_ops);
623 address_space_destroy(&ct3d->hostmem_as);
626 /* TODO: Support multiple HDM decoders and DPA skip */
627 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
629 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
630 uint64_t decoder_base, decoder_size, hpa_offset;
631 uint32_t hdm0_ctrl;
632 int ig, iw;
634 decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
635 cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
636 if ((uint64_t)host_addr < decoder_base) {
637 return false;
640 hpa_offset = (uint64_t)host_addr - decoder_base;
642 decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
643 cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
644 if (hpa_offset >= decoder_size) {
645 return false;
648 hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
649 iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
650 ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
652 *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
653 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
655 return true;
658 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
659 unsigned size, MemTxAttrs attrs)
661 CXLType3Dev *ct3d = CXL_TYPE3(d);
662 uint64_t dpa_offset;
663 MemoryRegion *mr;
665 /* TODO support volatile region */
666 mr = host_memory_backend_get_memory(ct3d->hostmem);
667 if (!mr) {
668 return MEMTX_ERROR;
671 if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
672 return MEMTX_ERROR;
675 if (dpa_offset > int128_get64(mr->size)) {
676 return MEMTX_ERROR;
679 return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size);
682 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
683 unsigned size, MemTxAttrs attrs)
685 CXLType3Dev *ct3d = CXL_TYPE3(d);
686 uint64_t dpa_offset;
687 MemoryRegion *mr;
689 mr = host_memory_backend_get_memory(ct3d->hostmem);
690 if (!mr) {
691 return MEMTX_OK;
694 if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
695 return MEMTX_OK;
698 if (dpa_offset > int128_get64(mr->size)) {
699 return MEMTX_OK;
701 return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs,
702 &data, size);
705 static void ct3d_reset(DeviceState *dev)
707 CXLType3Dev *ct3d = CXL_TYPE3(dev);
708 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
709 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
711 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
712 cxl_device_register_init_common(&ct3d->cxl_dstate);
715 static Property ct3_props[] = {
716 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
717 HostMemoryBackend *),
718 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
719 HostMemoryBackend *),
720 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
721 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
722 DEFINE_PROP_END_OF_LIST(),
725 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
727 MemoryRegion *mr;
729 mr = host_memory_backend_get_memory(ct3d->lsa);
730 return memory_region_size(mr);
733 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
734 uint64_t offset)
736 assert(offset + size <= memory_region_size(mr));
737 assert(offset + size > offset);
740 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
741 uint64_t offset)
743 MemoryRegion *mr;
744 void *lsa;
746 mr = host_memory_backend_get_memory(ct3d->lsa);
747 validate_lsa_access(mr, size, offset);
749 lsa = memory_region_get_ram_ptr(mr) + offset;
750 memcpy(buf, lsa, size);
752 return size;
755 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
756 uint64_t offset)
758 MemoryRegion *mr;
759 void *lsa;
761 mr = host_memory_backend_get_memory(ct3d->lsa);
762 validate_lsa_access(mr, size, offset);
764 lsa = memory_region_get_ram_ptr(mr) + offset;
765 memcpy(lsa, buf, size);
766 memory_region_set_dirty(mr, offset, size);
769 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
770 * updates will get lost.
774 /* For uncorrectable errors include support for multiple header recording */
775 void qmp_cxl_inject_uncorrectable_errors(const char *path,
776 CXLUncorErrorRecordList *errors,
777 Error **errp)
779 Object *obj = object_resolve_path(path, NULL);
780 static PCIEAERErr err = {};
781 CXLType3Dev *ct3d;
782 CXLError *cxl_err;
783 uint32_t *reg_state;
784 uint32_t unc_err;
785 bool first;
787 if (!obj) {
788 error_setg(errp, "Unable to resolve path");
789 return;
792 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
793 error_setg(errp, "Path does not point to a CXL type 3 device");
794 return;
797 err.status = PCI_ERR_UNC_INTN;
798 err.source_id = pci_requester_id(PCI_DEVICE(obj));
799 err.flags = 0;
801 ct3d = CXL_TYPE3(obj);
803 first = QTAILQ_EMPTY(&ct3d->error_list);
804 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
805 while (errors) {
806 uint32List *header = errors->value->header;
807 uint8_t header_count = 0;
808 int cxl_err_code;
810 cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
811 if (cxl_err_code < 0) {
812 error_setg(errp, "Unknown error code");
813 return;
816 /* If the error is masked, nothing to do here */
817 if (!((1 << cxl_err_code) &
818 ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
819 errors = errors->next;
820 continue;
823 cxl_err = g_malloc0(sizeof(*cxl_err));
824 if (!cxl_err) {
825 return;
828 cxl_err->type = cxl_err_code;
829 while (header && header_count < 32) {
830 cxl_err->header[header_count++] = header->value;
831 header = header->next;
833 if (header_count > 32) {
834 error_setg(errp, "Header must be 32 DWORD or less");
835 return;
837 QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
839 errors = errors->next;
842 if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
843 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
844 uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
845 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
846 int i;
848 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
849 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
850 stl_le_p(header_log + i, cxl_err->header[i]);
853 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
854 FIRST_ERROR_POINTER, cxl_err->type);
855 stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
858 unc_err = 0;
859 QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
860 unc_err |= (1 << cxl_err->type);
862 if (!unc_err) {
863 return;
866 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
867 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
869 return;
872 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
873 Error **errp)
875 static PCIEAERErr err = {};
876 Object *obj = object_resolve_path(path, NULL);
877 CXLType3Dev *ct3d;
878 uint32_t *reg_state;
879 uint32_t cor_err;
880 int cxl_err_type;
882 if (!obj) {
883 error_setg(errp, "Unable to resolve path");
884 return;
886 if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
887 error_setg(errp, "Path does not point to a CXL type 3 device");
888 return;
891 err.status = PCI_ERR_COR_INTERNAL;
892 err.source_id = pci_requester_id(PCI_DEVICE(obj));
893 err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
895 ct3d = CXL_TYPE3(obj);
896 reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
897 cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
899 cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
900 if (cxl_err_type < 0) {
901 error_setg(errp, "Invalid COR error");
902 return;
904 /* If the error is masked, nothting to do here */
905 if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
906 return;
909 cor_err |= (1 << cxl_err_type);
910 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
912 pcie_aer_inject_error(PCI_DEVICE(obj), &err);
915 static void ct3_class_init(ObjectClass *oc, void *data)
917 DeviceClass *dc = DEVICE_CLASS(oc);
918 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
919 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
921 pc->realize = ct3_realize;
922 pc->exit = ct3_exit;
923 pc->class_id = PCI_CLASS_MEMORY_CXL;
924 pc->vendor_id = PCI_VENDOR_ID_INTEL;
925 pc->device_id = 0xd93; /* LVF for now */
926 pc->revision = 1;
928 pc->config_write = ct3d_config_write;
929 pc->config_read = ct3d_config_read;
931 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
932 dc->desc = "CXL PMEM Device (Type 3)";
933 dc->reset = ct3d_reset;
934 device_class_set_props(dc, ct3_props);
936 cvc->get_lsa_size = get_lsa_size;
937 cvc->get_lsa = get_lsa;
938 cvc->set_lsa = set_lsa;
941 static const TypeInfo ct3d_info = {
942 .name = TYPE_CXL_TYPE3,
943 .parent = TYPE_PCI_DEVICE,
944 .class_size = sizeof(struct CXLType3Class),
945 .class_init = ct3_class_init,
946 .instance_size = sizeof(CXLType3Dev),
947 .interfaces = (InterfaceInfo[]) {
948 { INTERFACE_CXL_DEVICE },
949 { INTERFACE_PCIE_DEVICE },
954 static void ct3d_registers(void)
956 type_register_static(&ct3d_info);
959 type_init(ct3d_registers);