2 * CXL Utility library for components
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/pci/pci.h"
14 #include "hw/cxl/cxl.h"
16 /* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */
17 int cxl_decoder_count_enc(int count
)
26 /* Switches and Host Bridges may have more than 10 decoders */
38 int cxl_decoder_count_dec(int enc_cnt
)
47 /* Switches and Host Bridges may have more than 10 decoders */
59 hwaddr
cxl_decode_ig(int ig
)
61 return 1ULL << (ig
+ 8);
64 static uint64_t cxl_cache_mem_read_reg(void *opaque
, hwaddr offset
,
67 CXLComponentState
*cxl_cstate
= opaque
;
68 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
71 qemu_log_mask(LOG_UNIMP
,
72 "CXL 8 byte cache mem registers not implemented\n");
76 if (cregs
->special_ops
&& cregs
->special_ops
->read
) {
77 return cregs
->special_ops
->read(cxl_cstate
, offset
, size
);
79 return cregs
->cache_mem_registers
[offset
/ sizeof(*cregs
->cache_mem_registers
)];
83 static void dumb_hdm_handler(CXLComponentState
*cxl_cstate
, hwaddr offset
,
86 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
87 uint32_t *cache_mem
= cregs
->cache_mem_registers
;
88 bool should_commit
= false;
89 bool should_uncommit
= false;
92 case A_CXL_HDM_DECODER0_CTRL
:
93 case A_CXL_HDM_DECODER1_CTRL
:
94 case A_CXL_HDM_DECODER2_CTRL
:
95 case A_CXL_HDM_DECODER3_CTRL
:
96 should_commit
= FIELD_EX32(value
, CXL_HDM_DECODER0_CTRL
, COMMIT
);
97 should_uncommit
= !should_commit
;
104 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
105 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 1);
106 } else if (should_uncommit
) {
107 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, ERR
, 0);
108 value
= FIELD_DP32(value
, CXL_HDM_DECODER0_CTRL
, COMMITTED
, 0);
110 stl_le_p((uint8_t *)cache_mem
+ offset
, value
);
113 static void cxl_cache_mem_write_reg(void *opaque
, hwaddr offset
, uint64_t value
,
116 CXLComponentState
*cxl_cstate
= opaque
;
117 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
121 qemu_log_mask(LOG_UNIMP
,
122 "CXL 8 byte cache mem registers not implemented\n");
125 mask
= cregs
->cache_mem_regs_write_mask
[offset
/ sizeof(*cregs
->cache_mem_regs_write_mask
)];
127 /* RO bits should remain constant. Done by reading existing value */
128 value
|= ~mask
& cregs
->cache_mem_registers
[offset
/ sizeof(*cregs
->cache_mem_registers
)];
129 if (cregs
->special_ops
&& cregs
->special_ops
->write
) {
130 cregs
->special_ops
->write(cxl_cstate
, offset
, value
, size
);
134 if (offset
>= A_CXL_HDM_DECODER_CAPABILITY
&&
135 offset
<= A_CXL_HDM_DECODER3_TARGET_LIST_HI
) {
136 dumb_hdm_handler(cxl_cstate
, offset
, value
);
138 cregs
->cache_mem_registers
[offset
/ sizeof(*cregs
->cache_mem_registers
)] = value
;
144 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
145 * Component Registers.
148 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
149 * reads are not permitted.
150 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
151 * reads are not permitted.
153 * As of the spec defined today, only 4 byte registers exist.
155 static const MemoryRegionOps cache_mem_ops
= {
156 .read
= cxl_cache_mem_read_reg
,
157 .write
= cxl_cache_mem_write_reg
,
158 .endianness
= DEVICE_LITTLE_ENDIAN
,
160 .min_access_size
= 4,
161 .max_access_size
= 8,
165 .min_access_size
= 4,
166 .max_access_size
= 8,
170 void cxl_component_register_block_init(Object
*obj
,
171 CXLComponentState
*cxl_cstate
,
174 ComponentRegisters
*cregs
= &cxl_cstate
->crb
;
176 memory_region_init(&cregs
->component_registers
, obj
, type
,
177 CXL2_COMPONENT_BLOCK_SIZE
);
179 /* io registers controls link which we don't care about in QEMU */
180 memory_region_init_io(&cregs
->io
, obj
, NULL
, cregs
, ".io",
181 CXL2_COMPONENT_IO_REGION_SIZE
);
182 memory_region_init_io(&cregs
->cache_mem
, obj
, &cache_mem_ops
, cregs
,
183 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE
);
185 memory_region_add_subregion(&cregs
->component_registers
, 0, &cregs
->io
);
186 memory_region_add_subregion(&cregs
->component_registers
,
187 CXL2_COMPONENT_IO_REGION_SIZE
,
191 static void ras_init_common(uint32_t *reg_state
, uint32_t *write_msk
)
194 * Error status is RW1C but given bits are not yet set, it can
197 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_STATUS
, 0);
198 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_STATUS
, 0x1cfff);
199 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
200 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
201 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_MASK
, 0x1cfff);
202 stl_le_p(reg_state
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
203 stl_le_p(write_msk
+ R_CXL_RAS_UNC_ERR_SEVERITY
, 0x1cfff);
204 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_STATUS
, 0);
205 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_STATUS
, 0x7f);
206 stl_le_p(reg_state
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
207 stl_le_p(write_msk
+ R_CXL_RAS_COR_ERR_MASK
, 0x7f);
208 /* CXL switches and devices must set */
209 stl_le_p(reg_state
+ R_CXL_RAS_ERR_CAP_CTRL
, 0x200);
212 static void hdm_init_common(uint32_t *reg_state
, uint32_t *write_msk
,
215 int decoder_count
= CXL_HDM_DECODER_COUNT
;
216 int hdm_inc
= R_CXL_HDM_DECODER1_BASE_LO
- R_CXL_HDM_DECODER0_BASE_LO
;
219 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, DECODER_COUNT
,
220 cxl_decoder_count_enc(decoder_count
));
221 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, TARGET_COUNT
, 1);
222 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_256B
, 1);
223 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, INTERLEAVE_4K
, 1);
224 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_CAPABILITY
, POISON_ON_ERR_CAP
, 0);
225 ARRAY_FIELD_DP32(reg_state
, CXL_HDM_DECODER_GLOBAL_CONTROL
,
226 HDM_DECODER_ENABLE
, 0);
227 write_msk
[R_CXL_HDM_DECODER_GLOBAL_CONTROL
] = 0x3;
228 for (i
= 0; i
< decoder_count
; i
++) {
229 write_msk
[R_CXL_HDM_DECODER0_BASE_LO
+ i
* hdm_inc
] = 0xf0000000;
230 write_msk
[R_CXL_HDM_DECODER0_BASE_HI
+ i
* hdm_inc
] = 0xffffffff;
231 write_msk
[R_CXL_HDM_DECODER0_SIZE_LO
+ i
* hdm_inc
] = 0xf0000000;
232 write_msk
[R_CXL_HDM_DECODER0_SIZE_HI
+ i
* hdm_inc
] = 0xffffffff;
233 write_msk
[R_CXL_HDM_DECODER0_CTRL
+ i
* hdm_inc
] = 0x13ff;
234 if (type
== CXL2_DEVICE
||
235 type
== CXL2_TYPE3_DEVICE
||
236 type
== CXL2_LOGICAL_DEVICE
) {
237 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
240 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_LO
+ i
* hdm_inc
] =
243 write_msk
[R_CXL_HDM_DECODER0_TARGET_LIST_HI
+ i
* hdm_inc
] = 0xffffffff;
247 void cxl_component_register_init_common(uint32_t *reg_state
, uint32_t *write_msk
,
253 * In CXL 2.0 the capabilities required for each CXL component are such that,
254 * with the ordering chosen here, a single number can be used to define
255 * which capabilities should be provided.
258 case CXL2_DOWNSTREAM_PORT
:
263 case CXL2_UPSTREAM_PORT
:
264 case CXL2_TYPE3_DEVICE
:
265 case CXL2_LOGICAL_DEVICE
:
270 /* + Extended Security, + Snoop */
277 memset(reg_state
, 0, CXL2_COMPONENT_CM_REGION_SIZE
);
279 /* CXL Capability Header Register */
280 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ID
, 1);
281 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, VERSION
, 1);
282 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, CACHE_MEM_VERSION
, 1);
283 ARRAY_FIELD_DP32(reg_state
, CXL_CAPABILITY_HEADER
, ARRAY_SIZE
, caps
);
285 #define init_cap_reg(reg, id, version) \
286 QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \
288 int which = R_CXL_##reg##_CAPABILITY_HEADER; \
289 reg_state[which] = FIELD_DP32(reg_state[which], \
290 CXL_##reg##_CAPABILITY_HEADER, ID, id); \
292 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
295 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
296 CXL_##reg##_REGISTERS_OFFSET); \
299 init_cap_reg(RAS
, 2, 2);
300 ras_init_common(reg_state
, write_msk
);
302 init_cap_reg(LINK
, 4, 2);
308 init_cap_reg(HDM
, 5, 1);
309 hdm_init_common(reg_state
, write_msk
, type
);
315 init_cap_reg(EXTSEC
, 6, 1);
316 init_cap_reg(SNOOP
, 8, 1);
322 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
323 * for tracking the valid offset.
325 * This function will build the DVSEC header on behalf of the caller and then
326 * copy in the remaining data for the vendor specific bits.
327 * It will also set up appropriate write masks.
329 void cxl_component_create_dvsec(CXLComponentState
*cxl
,
330 enum reg_type cxl_dev_type
, uint16_t length
,
331 uint16_t type
, uint8_t rev
, uint8_t *body
)
333 PCIDevice
*pdev
= cxl
->pdev
;
334 uint16_t offset
= cxl
->dvsec_offset
;
335 uint8_t *wmask
= pdev
->wmask
;
337 assert(offset
>= PCI_CFG_SPACE_SIZE
&&
338 ((offset
+ length
) < PCI_CFG_SPACE_EXP_SIZE
));
339 assert((length
& 0xf000) == 0);
340 assert((rev
& ~0xf) == 0);
342 /* Create the DVSEC in the MCFG space */
343 pcie_add_capability(pdev
, PCI_EXT_CAP_ID_DVSEC
, 1, offset
, length
);
344 pci_set_long(pdev
->config
+ offset
+ PCIE_DVSEC_HEADER1_OFFSET
,
345 (length
<< 20) | (rev
<< 16) | CXL_VENDOR_ID
);
346 pci_set_word(pdev
->config
+ offset
+ PCIE_DVSEC_ID_OFFSET
, type
);
347 memcpy(pdev
->config
+ offset
+ sizeof(DVSECHeader
),
348 body
+ sizeof(DVSECHeader
),
349 length
- sizeof(DVSECHeader
));
351 /* Configure write masks */
353 case PCIE_CXL_DEVICE_DVSEC
:
354 /* Cntrl RW Lock - so needs explicit blocking when lock is set */
355 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
)] = 0xFD;
356 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl
) + 1] = 0x4F;
357 /* Status is RW1CS */
358 wmask
[offset
+ offsetof(CXLDVSECDevice
, ctrl2
)] = 0x0F;
359 /* Lock is RW Once */
360 wmask
[offset
+ offsetof(CXLDVSECDevice
, lock
)] = 0x01;
361 /* range1/2_base_high/low is RW Lock */
362 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
)] = 0xFF;
363 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 1] = 0xFF;
364 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 2] = 0xFF;
365 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_hi
) + 3] = 0xFF;
366 wmask
[offset
+ offsetof(CXLDVSECDevice
, range1_base_lo
) + 3] = 0xF0;
367 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
)] = 0xFF;
368 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 1] = 0xFF;
369 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 2] = 0xFF;
370 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_hi
) + 3] = 0xFF;
371 wmask
[offset
+ offsetof(CXLDVSECDevice
, range2_base_lo
) + 3] = 0xF0;
373 case NON_CXL_FUNCTION_MAP_DVSEC
:
374 break; /* Not yet implemented */
375 case EXTENSIONS_PORT_DVSEC
:
376 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, control
)] = 0x0F;
377 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, control
) + 1] = 0x40;
378 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_bus_base
)] = 0xFF;
379 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_bus_limit
)] = 0xFF;
380 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_memory_base
)] = 0xF0;
381 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_memory_base
) + 1] = 0xFF;
382 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_memory_limit
)] = 0xF0;
383 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_memory_limit
) + 1] = 0xFF;
384 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base
)] = 0xF0;
385 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base
) + 1] = 0xFF;
386 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit
)] = 0xF0;
387 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit
) + 1] = 0xFF;
388 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base_high
)] = 0xFF;
389 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base_high
) + 1] = 0xFF;
390 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base_high
) + 2] = 0xFF;
391 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_base_high
) + 3] = 0xFF;
392 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit_high
)] = 0xFF;
393 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit_high
) + 1] = 0xFF;
394 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit_high
) + 2] = 0xFF;
395 wmask
[offset
+ offsetof(CXLDVSECPortExtensions
, alt_prefetch_limit_high
) + 3] = 0xFF;
398 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
)] = 0x0F;
399 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase1_ctrl
) + 1] = 0x0F;
400 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
)] = 0x0F;
401 wmask
[offset
+ offsetof(CXLDVSECPortGPF
, phase2_ctrl
) + 1] = 0x0F;
403 case GPF_DEVICE_DVSEC
:
404 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
)] = 0x0F;
405 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_duration
) + 1] = 0x0F;
406 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
)] = 0xFF;
407 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 1] = 0xFF;
408 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 2] = 0xFF;
409 wmask
[offset
+ offsetof(CXLDVSECDeviceGPF
, phase2_power
) + 3] = 0xFF;
411 case PCIE_FLEXBUS_PORT_DVSEC
:
412 switch (cxl_dev_type
) {
415 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xbd;
417 case CXL2_DOWNSTREAM_PORT
:
418 wmask
[offset
+ offsetof(CXLDVSECPortFlexBus
, ctrl
)] = 0xfd;
420 default: /* Registers are RO for other component types */
423 /* There are rw1cs bits in the status register but never set currently */
427 /* Update state for future DVSEC additions */
428 range_init_nofail(&cxl
->dvsecs
[type
], cxl
->dvsec_offset
, length
);
429 cxl
->dvsec_offset
+= length
;
432 /* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */
433 uint8_t cxl_interleave_ways_enc(int iw
, Error
**errp
)
445 error_setg(errp
, "Interleave ways: %d not supported", iw
);
450 int cxl_interleave_ways_dec(uint8_t iw_enc
, Error
**errp
)
462 error_setg(errp
, "Encoded interleave ways: %d not supported", iw_enc
);
467 uint8_t cxl_interleave_granularity_enc(uint64_t gran
, Error
**errp
)
476 case 16384: return 6;
478 error_setg(errp
, "Interleave granularity: %" PRIu64
" invalid", gran
);