2 * CXL Utility library for components
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/pci/pci.h"
14 #include "hw/cxl/cxl.h"
16 /* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */
17 int cxl_decoder_count_enc(int count
)
26 /* Switches and Host Bridges may have more than 10 decoders */
38 int cxl_decoder_count_dec(int enc_cnt
)
47 /* Switches and Host Bridges may have more than 10 decoders */
59 hwaddr
cxl_decode_ig(int ig
)
61 return 1ULL << (ig
+ 8);
64 static uint64_t cxl_cache_mem_read_reg(void *opaque
, hwaddr offset
,
67 CXLComponentState
*cxl_cstate
= opaque
;
68 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
72 if (cregs
->special_ops
&& cregs
->special_ops
->read
) {
73 return cregs
->special_ops
->read(cxl_cstate
, offset
, 4);
75 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_registers
) != 4);
76 return cregs
->cache_mem_registers
[offset
/ 4];
79 qemu_log_mask(LOG_UNIMP
,
80 "CXL 8 byte cache mem registers not implemented\n");
84 * In line with specification limitaions on access sizes, this
85 * routine is not called with other sizes.
87 g_assert_not_reached();
91 static void dumb_hdm_handler(CXLComponentState
*cxl_cstate
, hwaddr offset
,
94 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
95 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
96 bool should_commit
= false;
97 bool should_uncommit
= false;
100 case A_CXL_HDM_DECODER0_CTRL
:
101 case A_CXL_HDM_DECODER1_CTRL
:
102 case A_CXL_HDM_DECODER2_CTRL
:
103 case A_CXL_HDM_DECODER3_CTRL
:
104 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
105 should_uncommit
= !should_commit
;
112 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
113 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
114 } else if (should_uncommit
) {
115 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
116 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 0);
118 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
121 static void cxl_cache_mem_write_reg(void *opaque
, hwaddr offset
, uint64_t value
,
124 CXLComponentState
*cxl_cstate
= opaque
;
125 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
130 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_regs_write_mask
) != 4);
131 QEMU_BUILD_BUG_ON(sizeof(*cregs
->cache_mem_registers
) != 4);
132 mask
= cregs
->cache_mem_regs_write_mask
[offset
/ 4];
134 /* RO bits should remain constant. Done by reading existing value */
135 value
|= ~mask
& cregs
->cache_mem_registers
[offset
/ 4];
136 if (cregs
->special_ops
&& cregs
->special_ops
->write
) {
137 cregs
->special_ops
->write(cxl_cstate
, offset
, value
, size
);
141 if (offset
>= A_CXL_HDM_DECODER_CAPABILITY
&&
142 offset
<= A_CXL_HDM_DECODER3_TARGET_LIST_HI
) {
143 dumb_hdm_handler(cxl_cstate
, offset
, value
);
145 cregs
->cache_mem_registers
[offset
/ 4] = value
;
150 qemu_log_mask(LOG_UNIMP
,
151 "CXL 8 byte cache mem registers not implemented\n");
155 * In line with specification limitaions on access sizes, this
156 * routine is not called with other sizes.
158 g_assert_not_reached();
164 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
165 * Component Registers.
168 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
169 * reads are not permitted.
170 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
171 * reads are not permitted.
173 * As of the spec defined today, only 4 byte registers exist.
175 static const MemoryRegionOps cache_mem_ops
= {
176 .read
= cxl_cache_mem_read_reg
,
177 .write
= cxl_cache_mem_write_reg
,
178 .endianness
= DEVICE_LITTLE_ENDIAN
,
180 .min_access_size
= 4,
181 .max_access_size
= 8,
185 .min_access_size
= 4,
186 .max_access_size
= 8,
190 void cxl_component_register_block_init(Object
*obj
,
191 CXLComponentState
*cxl_cstate
,
194 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
196 memory_region_init(&cregs
->component_registers
, obj
, type
,
197 CXL2_COMPONENT_BLOCK_SIZE
);
199 /* io registers controls link which we don't care about in QEMU */
200 memory_region_init_io(&cregs
->io
, obj
, NULL
, cregs
, ".io",
201 CXL2_COMPONENT_IO_REGION_SIZE
);
202 memory_region_init_io(&cregs
->cache_mem
, obj
, &cache_mem_ops
, cregs
,
203 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE
);
205 memory_region_add_subregion(&cregs
->component_registers
, 0, &cregs
->io
);
206 memory_region_add_subregion(&cregs
->component_registers
,
207 CXL2_COMPONENT_IO_REGION_SIZE
,
211 static void ras_init_common(uint32_t *reg_state
, uint32_t *write_msk
)
214 * Error status is RW1C but given bits are not yet set, it can
217 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, 0);
218 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_STATUS
, 0x1cfff);
219 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
220 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
221 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
222 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
223 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
224 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, 0);
225 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_STATUS
, 0x7f);
226 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
227 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
228 /* CXL switches and devices must set */
229 stl_le_p(reg_state
+ R_CXL_RAS_ERR_CAP_CTRL
, 0x200);
232 static void hdm_init_common(uint32_t *reg_state
, uint32_t *write_msk
,
235 int decoder_count
= CXL_HDM_DECODER_COUNT
;
236 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
239 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, DECODER_COUNT
,
240 cxl_decoder_count_enc(decoder_count
));
241 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, TARGET_COUNT
, 1);
242 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_256B
, 1);
243 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_4K
, 1);
244 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
,
245 POISON_ON_ERR_CAP
, 0);
246 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_GLOBAL_CONTROL
,
247 HDM_DECODER_ENABLE
, 0);
248 write_msk
[R_CXL_HDM_DECODER_GLOBAL_CONTROL
] = 0x3;
249 for (i
= 0; i
< decoder_count
; i
++) {
250 write_msk
[R_CXL_HDM_DECODER0_BASE_LO
+ i
* hdm_inc
] = 0xf0000000;
251 write_msk
[R_CXL_HDM_DECODER0_BASE_HI
+ i
* hdm_inc
] = 0xffffffff;
252 write_msk
[R_CXL_HDM_DECODER0_SIZE_LO
+ i
* hdm_inc
] = 0xf0000000;
253 write_msk
[R_CXL_HDM_DECODER0_SIZE_HI
+ i
* hdm_inc
] = 0xffffffff;
254 write_msk
[R_CXL_HDM_DECODER0_CTRL
+ i
* hdm_inc
] = 0x13ff;
255 if (type
== CXL2_DEVICE
||
256 type
== CXL2_TYPE3_DEVICE
||
257 type
== CXL2_LOGICAL_DEVICE
) {
258 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
261 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
264 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_HI
+ i
* hdm_inc
] = 0xffffffff;
268 void cxl_component_register_init_common(uint32_t *reg_state
,
275 * In CXL 2.0 the capabilities required for each CXL component are such
276 * that, with the ordering chosen here, a single number can be used to
277 * define which capabilities should be provided.
280 case CXL2_DOWNSTREAM_PORT
:
285 case CXL2_UPSTREAM_PORT
:
286 case CXL2_TYPE3_DEVICE
:
287 case CXL2_LOGICAL_DEVICE
:
292 /* + Extended Security, + Snoop */
299 memset(reg_state
, 0, CXL2_COMPONENT_CM_REGION_SIZE
);
301 /* CXL Capability Header Register */
302 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ID
, 1);
303 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, VERSION
, 1);
304 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, CACHE_MEM_VERSION
, 1);
305 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ARRAY_SIZE
, caps
);
307 #define init_cap_reg(reg, id, version) \
309 int which = R_CXL_##reg##_CAPABILITY_HEADER; \
310 reg_state[which] = FIELD_DP32(reg_state[which], \
311 CXL_##reg##_CAPABILITY_HEADER, ID, id); \
313 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
316 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
317 CXL_##reg##_REGISTERS_OFFSET); \
320 init_cap_reg(RAS
, 2, 2);
321 ras_init_common(reg_state
, write_msk
);
323 init_cap_reg(LINK
, 4, 2);
329 init_cap_reg(HDM
, 5, 1);
330 hdm_init_common(reg_state
, write_msk
, type
);
336 init_cap_reg(EXTSEC
, 6, 1);
337 init_cap_reg(SNOOP
, 8, 1);
343 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
344 * for tracking the valid offset.
346 * This function will build the DVSEC header on behalf of the caller and then
347 * copy in the remaining data for the vendor specific bits.
348 * It will also set up appropriate write masks.
350 void cxl_component_create_dvsec(CXLComponentState
*cxl
,
351 enum reg_type cxl_dev_type
, uint16_t length
,
352 uint16_t type
, uint8_t rev
, uint8_t *body
)
354 PCIDevice
*pdev
= cxl
->pdev
;
355 uint16_t offset
= cxl
->dvsec_offset
;
356 uint8_t *wmask
= pdev
->wmask
;
358 assert(offset
>= PCI_CFG_SPACE_SIZE
&&
359 ((offset
+ length
) < PCI_CFG_SPACE_EXP_SIZE
));
360 assert((length
& 0xf000) == 0);
361 assert((rev
& ~0xf) == 0);
363 /* Create the DVSEC in the MCFG space */
364 pcie_add_capability(pdev
, PCI_EXT_CAP_ID_DVSEC
, 1, offset
, length
);
365 pci_set_long(pdev
->config
+ offset
+ PCIE_DVSEC_HEADER1_OFFSET
,
366 (length
<< 20) | (rev
<< 16) | CXL_VENDOR_ID
);
367 pci_set_word(pdev
->config
+ offset
+ PCIE_DVSEC_ID_OFFSET
, type
);
368 memcpy(pdev
->config
+ offset
+ sizeof(DVSECHeader
),
369 body
+ sizeof(DVSECHeader
),
370 length
- sizeof(DVSECHeader
));
372 /* Configure write masks */
374 case PCIE_CXL_DEVICE_DVSEC
:
375 /* Cntrl RW Lock - so needs explicit blocking when lock is set */
376 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
)] = 0xFD;
377 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
) + 1] = 0x4F;
378 /* Status is RW1CS */
379 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl2
)] = 0x0F;
380 /* Lock is RW Once */
381 wmask
[offset
+ offsetof(CXLDVSECDevice
, lock
)] = 0x01;
382 /* range1/2_base_high/low is RW Lock */
383 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
)] = 0xFF;
384 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 1] = 0xFF;
385 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 2] = 0xFF;
386 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 3] = 0xFF;
387 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_lo
) + 3] = 0xF0;
388 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
)] = 0xFF;
389 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 1] = 0xFF;
390 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 2] = 0xFF;
391 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 3] = 0xFF;
392 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_lo
) + 3] = 0xF0;
394 case NON_CXL_FUNCTION_MAP_DVSEC
:
395 break; /* Not yet implemented */
396 case EXTENSIONS_PORT_DVSEC
:
397 wmask
[offset
+ offsetof(CXLDVSECPortExt
, control
)] = 0x0F;
398 wmask
[offset
+ offsetof(CXLDVSECPortExt
, control
) + 1] = 0x40;
399 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_bus_base
)] = 0xFF;
400 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_bus_limit
)] = 0xFF;
401 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_base
)] = 0xF0;
402 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_base
) + 1] = 0xFF;
403 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_limit
)] = 0xF0;
404 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_memory_limit
) + 1] = 0xFF;
405 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base
)] = 0xF0;
406 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base
) + 1] = 0xFF;
407 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit
)] = 0xF0;
408 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit
) + 1] =
410 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
)] =
412 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 1] =
414 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 2] =
416 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_base_high
) + 3] =
418 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
)] =
420 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 1] =
422 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 2] =
424 wmask
[offset
+ offsetof(CXLDVSECPortExt
, alt_prefetch_limit_high
) + 3] =
428 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
)] = 0x0F;
429 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
) + 1] = 0x0F;
430 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
)] = 0x0F;
431 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
) + 1] = 0x0F;
433 case GPF_DEVICE_DVSEC
:
434 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
)] = 0x0F;
435 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
) + 1] = 0x0F;
436 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
)] = 0xFF;
437 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 1] = 0xFF;
438 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 2] = 0xFF;
439 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 3] = 0xFF;
441 case PCIE_FLEXBUS_PORT_DVSEC
:
442 switch (cxl_dev_type
) {
445 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xbd;
447 case CXL2_DOWNSTREAM_PORT
:
448 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xfd;
450 default: /* Registers are RO for other component types */
453 /* There are rw1cs bits in the status register but never set */
457 /* Update state for future DVSEC additions */
458 range_init_nofail(&cxl
->dvsecs
[type
], cxl
->dvsec_offset
, length
);
459 cxl
->dvsec_offset
+= length
;
462 /* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */
463 uint8_t cxl_interleave_ways_enc(int iw
, Error
**errp
)
475 error_setg(errp
, "Interleave ways: %d not supported", iw
);
480 int cxl_interleave_ways_dec(uint8_t iw_enc
, Error
**errp
)
492 error_setg(errp
, "Encoded interleave ways: %d not supported", iw_enc
);
497 uint8_t cxl_interleave_granularity_enc(uint64_t gran
, Error
**errp
)
506 case 16384: return 6;
508 error_setg(errp
, "Interleave granularity: %" PRIu64
" invalid", gran
);