vhost-user: Fix the virtio features negotiation flaw
[qemu.git] / hw / mem / cxl_type3.c
blobdae4fd89ca92530fa293c18ff44c4d43bfd96634
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qemu/error-report.h"
4 #include "hw/mem/memory-device.h"
5 #include "hw/mem/pc-dimm.h"
6 #include "hw/pci/pci.h"
7 #include "hw/qdev-properties.h"
8 #include "qapi/error.h"
9 #include "qemu/log.h"
10 #include "qemu/module.h"
11 #include "qemu/pmem.h"
12 #include "qemu/range.h"
13 #include "qemu/rcu.h"
14 #include "sysemu/hostmem.h"
15 #include "sysemu/numa.h"
16 #include "hw/cxl/cxl.h"
17 #include "hw/pci/msix.h"
19 #define DWORD_BYTE 4
21 /* Default CDAT entries for a memory region */
22 enum {
23 CT3_CDAT_DSMAS,
24 CT3_CDAT_DSLBIS0,
25 CT3_CDAT_DSLBIS1,
26 CT3_CDAT_DSLBIS2,
27 CT3_CDAT_DSLBIS3,
28 CT3_CDAT_DSEMTS,
29 CT3_CDAT_NUM_ENTRIES
32 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
33 int dsmad_handle, MemoryRegion *mr)
35 g_autofree CDATDsmas *dsmas = NULL;
36 g_autofree CDATDslbis *dslbis0 = NULL;
37 g_autofree CDATDslbis *dslbis1 = NULL;
38 g_autofree CDATDslbis *dslbis2 = NULL;
39 g_autofree CDATDslbis *dslbis3 = NULL;
40 g_autofree CDATDsemts *dsemts = NULL;
42 dsmas = g_malloc(sizeof(*dsmas));
43 if (!dsmas) {
44 return -ENOMEM;
46 *dsmas = (CDATDsmas) {
47 .header = {
48 .type = CDAT_TYPE_DSMAS,
49 .length = sizeof(*dsmas),
51 .DSMADhandle = dsmad_handle,
52 .flags = CDAT_DSMAS_FLAG_NV,
53 .DPA_base = 0,
54 .DPA_length = int128_get64(mr->size),
57 /* For now, no memory side cache, plausiblish numbers */
58 dslbis0 = g_malloc(sizeof(*dslbis0));
59 if (!dslbis0) {
60 return -ENOMEM;
62 *dslbis0 = (CDATDslbis) {
63 .header = {
64 .type = CDAT_TYPE_DSLBIS,
65 .length = sizeof(*dslbis0),
67 .handle = dsmad_handle,
68 .flags = HMAT_LB_MEM_MEMORY,
69 .data_type = HMAT_LB_DATA_READ_LATENCY,
70 .entry_base_unit = 10000, /* 10ns base */
71 .entry[0] = 15, /* 150ns */
74 dslbis1 = g_malloc(sizeof(*dslbis1));
75 if (!dslbis1) {
76 return -ENOMEM;
78 *dslbis1 = (CDATDslbis) {
79 .header = {
80 .type = CDAT_TYPE_DSLBIS,
81 .length = sizeof(*dslbis1),
83 .handle = dsmad_handle,
84 .flags = HMAT_LB_MEM_MEMORY,
85 .data_type = HMAT_LB_DATA_WRITE_LATENCY,
86 .entry_base_unit = 10000,
87 .entry[0] = 25, /* 250ns */
90 dslbis2 = g_malloc(sizeof(*dslbis2));
91 if (!dslbis2) {
92 return -ENOMEM;
94 *dslbis2 = (CDATDslbis) {
95 .header = {
96 .type = CDAT_TYPE_DSLBIS,
97 .length = sizeof(*dslbis2),
99 .handle = dsmad_handle,
100 .flags = HMAT_LB_MEM_MEMORY,
101 .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
102 .entry_base_unit = 1000, /* GB/s */
103 .entry[0] = 16,
106 dslbis3 = g_malloc(sizeof(*dslbis3));
107 if (!dslbis3) {
108 return -ENOMEM;
110 *dslbis3 = (CDATDslbis) {
111 .header = {
112 .type = CDAT_TYPE_DSLBIS,
113 .length = sizeof(*dslbis3),
115 .handle = dsmad_handle,
116 .flags = HMAT_LB_MEM_MEMORY,
117 .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
118 .entry_base_unit = 1000, /* GB/s */
119 .entry[0] = 16,
122 dsemts = g_malloc(sizeof(*dsemts));
123 if (!dsemts) {
124 return -ENOMEM;
126 *dsemts = (CDATDsemts) {
127 .header = {
128 .type = CDAT_TYPE_DSEMTS,
129 .length = sizeof(*dsemts),
131 .DSMAS_handle = dsmad_handle,
132 /* Reserved - the non volatile from DSMAS matters */
133 .EFI_memory_type_attr = 2,
134 .DPA_offset = 0,
135 .DPA_length = int128_get64(mr->size),
138 /* Header always at start of structure */
139 cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
140 cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
141 cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
142 cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
143 cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
144 cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
146 return 0;
149 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
151 g_autofree CDATSubHeader **table = NULL;
152 MemoryRegion *nonvolatile_mr;
153 CXLType3Dev *ct3d = priv;
154 int dsmad_handle = 0;
155 int rc;
157 if (!ct3d->hostmem) {
158 return 0;
161 nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem);
162 if (!nonvolatile_mr) {
163 return -EINVAL;
166 table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table));
167 if (!table) {
168 return -ENOMEM;
171 rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr);
172 if (rc < 0) {
173 return rc;
176 *cdat_table = g_steal_pointer(&table);
178 return CT3_CDAT_NUM_ENTRIES;
181 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
183 int i;
185 for (i = 0; i < num; i++) {
186 g_free(cdat_table[i]);
188 g_free(cdat_table);
191 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
193 CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
194 uint16_t ent;
195 void *base;
196 uint32_t len;
197 CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
198 CDATRsp rsp;
200 assert(cdat->entry_len);
202 /* Discard if request length mismatched */
203 if (pcie_doe_get_obj_len(req) <
204 DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
205 return false;
208 ent = req->entry_handle;
209 base = cdat->entry[ent].base;
210 len = cdat->entry[ent].length;
212 rsp = (CDATRsp) {
213 .header = {
214 .vendor_id = CXL_VENDOR_ID,
215 .data_obj_type = CXL_DOE_TABLE_ACCESS,
216 .reserved = 0x0,
217 .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
219 .rsp_code = CXL_DOE_TAB_RSP,
220 .table_type = CXL_DOE_TAB_TYPE_CDAT,
221 .entry_handle = (ent < cdat->entry_len - 1) ?
222 ent + 1 : CXL_DOE_TAB_ENT_MAX,
225 memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
226 memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
227 base, len);
229 doe_cap->read_mbox_len += rsp.header.length;
231 return true;
234 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
236 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
237 uint32_t val;
239 if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
240 return val;
243 return pci_default_read_config(pci_dev, addr, size);
246 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
247 int size)
249 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
251 pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
252 pci_default_write_config(pci_dev, addr, val, size);
256 * Null value of all Fs suggested by IEEE RA guidelines for use of
257 * EU, OUI and CID
259 #define UI64_NULL ~(0ULL)
261 static void build_dvsecs(CXLType3Dev *ct3d)
263 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
264 uint8_t *dvsec;
266 dvsec = (uint8_t *)&(CXLDVSECDevice){
267 .cap = 0x1e,
268 .ctrl = 0x2,
269 .status2 = 0x2,
270 .range1_size_hi = ct3d->hostmem->size >> 32,
271 .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
272 (ct3d->hostmem->size & 0xF0000000),
273 .range1_base_hi = 0,
274 .range1_base_lo = 0,
276 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
277 PCIE_CXL_DEVICE_DVSEC_LENGTH,
278 PCIE_CXL_DEVICE_DVSEC,
279 PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
281 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
282 .rsvd = 0,
283 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
284 .reg0_base_hi = 0,
285 .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
286 .reg1_base_hi = 0,
288 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
289 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
290 REG_LOC_DVSEC_REVID, dvsec);
291 dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
292 .phase2_duration = 0x603, /* 3 seconds */
293 .phase2_power = 0x33, /* 0x33 miliwatts */
295 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
296 GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
297 GPF_DEVICE_DVSEC_REVID, dvsec);
299 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
300 .cap = 0x26, /* 68B, IO, Mem, non-MLD */
301 .ctrl = 0x02, /* IO always enabled */
302 .status = 0x26, /* same as capabilities */
303 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
305 cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
306 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
307 PCIE_FLEXBUS_PORT_DVSEC,
308 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
311 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
313 ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
314 uint32_t *cache_mem = cregs->cache_mem_registers;
316 assert(which == 0);
318 /* TODO: Sanity checks that the decoder is possible */
319 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
320 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
322 ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
325 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
326 unsigned size)
328 CXLComponentState *cxl_cstate = opaque;
329 ComponentRegisters *cregs = &cxl_cstate->crb;
330 CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
331 uint32_t *cache_mem = cregs->cache_mem_registers;
332 bool should_commit = false;
333 int which_hdm = -1;
335 assert(size == 4);
336 g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
338 switch (offset) {
339 case A_CXL_HDM_DECODER0_CTRL:
340 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
341 which_hdm = 0;
342 break;
343 default:
344 break;
347 stl_le_p((uint8_t *)cache_mem + offset, value);
348 if (should_commit) {
349 hdm_decoder_commit(ct3d, which_hdm);
353 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
355 DeviceState *ds = DEVICE(ct3d);
356 MemoryRegion *mr;
357 char *name;
359 if (!ct3d->hostmem) {
360 error_setg(errp, "memdev property must be set");
361 return false;
364 mr = host_memory_backend_get_memory(ct3d->hostmem);
365 if (!mr) {
366 error_setg(errp, "memdev property must be set");
367 return false;
369 memory_region_set_nonvolatile(mr, true);
370 memory_region_set_enabled(mr, true);
371 host_memory_backend_set_mapped(ct3d->hostmem, true);
373 if (ds->id) {
374 name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id);
375 } else {
376 name = g_strdup("cxl-type3-dpa-space");
378 address_space_init(&ct3d->hostmem_as, mr, name);
379 g_free(name);
381 ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size;
383 if (!ct3d->lsa) {
384 error_setg(errp, "lsa property must be set");
385 return false;
388 return true;
391 static DOEProtocol doe_cdat_prot[] = {
392 { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
396 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
398 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
399 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
400 ComponentRegisters *regs = &cxl_cstate->crb;
401 MemoryRegion *mr = &regs->component_registers;
402 uint8_t *pci_conf = pci_dev->config;
403 unsigned short msix_num = 1;
404 int i;
406 if (!cxl_setup_memory(ct3d, errp)) {
407 return;
410 pci_config_set_prog_interface(pci_conf, 0x10);
411 pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL);
413 pcie_endpoint_cap_init(pci_dev, 0x80);
414 if (ct3d->sn != UI64_NULL) {
415 pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
416 cxl_cstate->dvsec_offset = 0x100 + 0x0c;
417 } else {
418 cxl_cstate->dvsec_offset = 0x100;
421 ct3d->cxl_cstate.pdev = pci_dev;
422 build_dvsecs(ct3d);
424 regs->special_ops = g_new0(MemoryRegionOps, 1);
425 regs->special_ops->write = ct3d_reg_write;
427 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
428 TYPE_CXL_TYPE3);
430 pci_register_bar(
431 pci_dev, CXL_COMPONENT_REG_BAR_IDX,
432 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
434 cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
435 pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
436 PCI_BASE_ADDRESS_SPACE_MEMORY |
437 PCI_BASE_ADDRESS_MEM_TYPE_64,
438 &ct3d->cxl_dstate.device_registers);
440 /* MSI(-X) Initailization */
441 msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
442 for (i = 0; i < msix_num; i++) {
443 msix_vector_use(pci_dev, i);
446 /* DOE Initailization */
447 pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
449 cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
450 cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
451 cxl_cstate->cdat.private = ct3d;
452 cxl_doe_cdat_init(cxl_cstate, errp);
455 static void ct3_exit(PCIDevice *pci_dev)
457 CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
458 CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
459 ComponentRegisters *regs = &cxl_cstate->crb;
461 cxl_doe_cdat_release(cxl_cstate);
462 g_free(regs->special_ops);
463 address_space_destroy(&ct3d->hostmem_as);
466 /* TODO: Support multiple HDM decoders and DPA skip */
467 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
469 uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
470 uint64_t decoder_base, decoder_size, hpa_offset;
471 uint32_t hdm0_ctrl;
472 int ig, iw;
474 decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
475 cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
476 if ((uint64_t)host_addr < decoder_base) {
477 return false;
480 hpa_offset = (uint64_t)host_addr - decoder_base;
482 decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
483 cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
484 if (hpa_offset >= decoder_size) {
485 return false;
488 hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
489 iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
490 ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
492 *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
493 ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
495 return true;
498 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
499 unsigned size, MemTxAttrs attrs)
501 CXLType3Dev *ct3d = CXL_TYPE3(d);
502 uint64_t dpa_offset;
503 MemoryRegion *mr;
505 /* TODO support volatile region */
506 mr = host_memory_backend_get_memory(ct3d->hostmem);
507 if (!mr) {
508 return MEMTX_ERROR;
511 if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
512 return MEMTX_ERROR;
515 if (dpa_offset > int128_get64(mr->size)) {
516 return MEMTX_ERROR;
519 return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size);
522 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
523 unsigned size, MemTxAttrs attrs)
525 CXLType3Dev *ct3d = CXL_TYPE3(d);
526 uint64_t dpa_offset;
527 MemoryRegion *mr;
529 mr = host_memory_backend_get_memory(ct3d->hostmem);
530 if (!mr) {
531 return MEMTX_OK;
534 if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
535 return MEMTX_OK;
538 if (dpa_offset > int128_get64(mr->size)) {
539 return MEMTX_OK;
541 return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs,
542 &data, size);
545 static void ct3d_reset(DeviceState *dev)
547 CXLType3Dev *ct3d = CXL_TYPE3(dev);
548 uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
549 uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
551 cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
552 cxl_device_register_init_common(&ct3d->cxl_dstate);
555 static Property ct3_props[] = {
556 DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
557 HostMemoryBackend *),
558 DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
559 HostMemoryBackend *),
560 DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
561 DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
562 DEFINE_PROP_END_OF_LIST(),
565 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
567 MemoryRegion *mr;
569 mr = host_memory_backend_get_memory(ct3d->lsa);
570 return memory_region_size(mr);
573 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
574 uint64_t offset)
576 assert(offset + size <= memory_region_size(mr));
577 assert(offset + size > offset);
580 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
581 uint64_t offset)
583 MemoryRegion *mr;
584 void *lsa;
586 mr = host_memory_backend_get_memory(ct3d->lsa);
587 validate_lsa_access(mr, size, offset);
589 lsa = memory_region_get_ram_ptr(mr) + offset;
590 memcpy(buf, lsa, size);
592 return size;
595 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
596 uint64_t offset)
598 MemoryRegion *mr;
599 void *lsa;
601 mr = host_memory_backend_get_memory(ct3d->lsa);
602 validate_lsa_access(mr, size, offset);
604 lsa = memory_region_get_ram_ptr(mr) + offset;
605 memcpy(lsa, buf, size);
606 memory_region_set_dirty(mr, offset, size);
609 * Just like the PMEM, if the guest is not allowed to exit gracefully, label
610 * updates will get lost.
614 static void ct3_class_init(ObjectClass *oc, void *data)
616 DeviceClass *dc = DEVICE_CLASS(oc);
617 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
618 CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
620 pc->realize = ct3_realize;
621 pc->exit = ct3_exit;
622 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
623 pc->vendor_id = PCI_VENDOR_ID_INTEL;
624 pc->device_id = 0xd93; /* LVF for now */
625 pc->revision = 1;
627 pc->config_write = ct3d_config_write;
628 pc->config_read = ct3d_config_read;
630 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
631 dc->desc = "CXL PMEM Device (Type 3)";
632 dc->reset = ct3d_reset;
633 device_class_set_props(dc, ct3_props);
635 cvc->get_lsa_size = get_lsa_size;
636 cvc->get_lsa = get_lsa;
637 cvc->set_lsa = set_lsa;
640 static const TypeInfo ct3d_info = {
641 .name = TYPE_CXL_TYPE3,
642 .parent = TYPE_PCI_DEVICE,
643 .class_size = sizeof(struct CXLType3Class),
644 .class_init = ct3_class_init,
645 .instance_size = sizeof(CXLType3Dev),
646 .interfaces = (InterfaceInfo[]) {
647 { INTERFACE_CXL_DEVICE },
648 { INTERFACE_PCIE_DEVICE },
653 static void ct3d_registers(void)
655 type_register_static(&ct3d_info);
658 type_init(ct3d_registers);