MAINTAINERS: Add artist.c to the hppa machine section
[qemu/kevin.git] / hw / cxl / cxl-component-utils.c
blobf3bbf0fd1311f16bc515eae0958e72e1cba54cdd
1 /*
2 * CXL Utility library for components
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "hw/pci/pci.h"
14 #include "hw/cxl/cxl.h"
16 /* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */
17 int cxl_decoder_count_enc(int count)
19 switch (count) {
20 case 1: return 0x0;
21 case 2: return 0x1;
22 case 4: return 0x2;
23 case 6: return 0x3;
24 case 8: return 0x4;
25 case 10: return 0x5;
26 /* Switches and Host Bridges may have more than 10 decoders */
27 case 12: return 0x6;
28 case 14: return 0x7;
29 case 16: return 0x8;
30 case 20: return 0x9;
31 case 24: return 0xa;
32 case 28: return 0xb;
33 case 32: return 0xc;
35 return 0;
38 int cxl_decoder_count_dec(int enc_cnt)
40 switch (enc_cnt) {
41 case 0x0: return 1;
42 case 0x1: return 2;
43 case 0x2: return 4;
44 case 0x3: return 6;
45 case 0x4: return 8;
46 case 0x5: return 10;
47 /* Switches and Host Bridges may have more than 10 decoders */
48 case 0x6: return 12;
49 case 0x7: return 14;
50 case 0x8: return 16;
51 case 0x9: return 20;
52 case 0xa: return 24;
53 case 0xb: return 28;
54 case 0xc: return 32;
56 return 0;
59 hwaddr cxl_decode_ig(int ig)
61 return 1ULL << (ig + 8);
64 static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
65 unsigned size)
67 CXLComponentState *cxl_cstate = opaque;
68 ComponentRegisters *cregs = &cxl_cstate->crb;
70 if (size == 8) {
71 qemu_log_mask(LOG_UNIMP,
72 "CXL 8 byte cache mem registers not implemented\n");
73 return 0;
76 if (cregs->special_ops && cregs->special_ops->read) {
77 return cregs->special_ops->read(cxl_cstate, offset, size);
78 } else {
79 return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
83 static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
84 uint32_t value)
86 ComponentRegisters *cregs = &cxl_cstate->crb;
87 uint32_t *cache_mem = cregs->cache_mem_registers;
88 bool should_commit = false;
89 bool should_uncommit = false;
91 switch (offset) {
92 case A_CXL_HDM_DECODER0_CTRL:
93 case A_CXL_HDM_DECODER1_CTRL:
94 case A_CXL_HDM_DECODER2_CTRL:
95 case A_CXL_HDM_DECODER3_CTRL:
96 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
97 should_uncommit = !should_commit;
98 break;
99 default:
100 break;
103 if (should_commit) {
104 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
105 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
106 } else if (should_uncommit) {
107 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
108 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
110 stl_le_p((uint8_t *)cache_mem + offset, value);
113 static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
114 unsigned size)
116 CXLComponentState *cxl_cstate = opaque;
117 ComponentRegisters *cregs = &cxl_cstate->crb;
118 uint32_t mask;
120 if (size == 8) {
121 qemu_log_mask(LOG_UNIMP,
122 "CXL 8 byte cache mem registers not implemented\n");
123 return;
125 mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)];
126 value &= mask;
127 /* RO bits should remain constant. Done by reading existing value */
128 value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
129 if (cregs->special_ops && cregs->special_ops->write) {
130 cregs->special_ops->write(cxl_cstate, offset, value, size);
131 return;
134 if (offset >= A_CXL_HDM_DECODER_CAPABILITY &&
135 offset <= A_CXL_HDM_DECODER3_TARGET_LIST_HI) {
136 dumb_hdm_handler(cxl_cstate, offset, value);
137 } else {
138 cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value;
143 * 8.2.3
144 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
145 * Component Registers.
147 * 8.2.2
148 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
149 * reads are not permitted.
150 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
151 * reads are not permitted.
153 * As of the spec defined today, only 4 byte registers exist.
155 static const MemoryRegionOps cache_mem_ops = {
156 .read = cxl_cache_mem_read_reg,
157 .write = cxl_cache_mem_write_reg,
158 .endianness = DEVICE_LITTLE_ENDIAN,
159 .valid = {
160 .min_access_size = 4,
161 .max_access_size = 8,
162 .unaligned = false,
164 .impl = {
165 .min_access_size = 4,
166 .max_access_size = 8,
170 void cxl_component_register_block_init(Object *obj,
171 CXLComponentState *cxl_cstate,
172 const char *type)
174 ComponentRegisters *cregs = &cxl_cstate->crb;
176 memory_region_init(&cregs->component_registers, obj, type,
177 CXL2_COMPONENT_BLOCK_SIZE);
179 /* io registers controls link which we don't care about in QEMU */
180 memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io",
181 CXL2_COMPONENT_IO_REGION_SIZE);
182 memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs,
183 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
185 memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
186 memory_region_add_subregion(&cregs->component_registers,
187 CXL2_COMPONENT_IO_REGION_SIZE,
188 &cregs->cache_mem);
191 static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk)
194 * Error status is RW1C but given bits are not yet set, it can
195 * be handled as RO.
197 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
198 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
199 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
200 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
201 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
202 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
203 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
204 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
205 stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
206 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
207 stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
208 /* CXL switches and devices must set */
209 stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
212 static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
213 enum reg_type type)
215 int decoder_count = CXL_HDM_DECODER_COUNT;
216 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
217 int i;
219 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT,
220 cxl_decoder_count_enc(decoder_count));
221 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1);
222 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1);
223 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
224 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0);
225 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
226 HDM_DECODER_ENABLE, 0);
227 write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
228 for (i = 0; i < decoder_count; i++) {
229 write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc] = 0xf0000000;
230 write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc] = 0xffffffff;
231 write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc] = 0xf0000000;
232 write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc] = 0xffffffff;
233 write_msk[R_CXL_HDM_DECODER0_CTRL + i * hdm_inc] = 0x13ff;
234 if (type == CXL2_DEVICE ||
235 type == CXL2_TYPE3_DEVICE ||
236 type == CXL2_LOGICAL_DEVICE) {
237 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
238 0xf0000000;
239 } else {
240 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
241 0xffffffff;
243 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * hdm_inc] = 0xffffffff;
247 void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk,
248 enum reg_type type)
250 int caps = 0;
253 * In CXL 2.0 the capabilities required for each CXL component are such that,
254 * with the ordering chosen here, a single number can be used to define
255 * which capabilities should be provided.
257 switch (type) {
258 case CXL2_DOWNSTREAM_PORT:
259 case CXL2_DEVICE:
260 /* RAS, Link */
261 caps = 2;
262 break;
263 case CXL2_UPSTREAM_PORT:
264 case CXL2_TYPE3_DEVICE:
265 case CXL2_LOGICAL_DEVICE:
266 /* + HDM */
267 caps = 3;
268 break;
269 case CXL2_ROOT_PORT:
270 /* + Extended Security, + Snoop */
271 caps = 5;
272 break;
273 default:
274 abort();
277 memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE);
279 /* CXL Capability Header Register */
280 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
281 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1);
282 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
283 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
285 #define init_cap_reg(reg, id, version) \
286 QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \
287 do { \
288 int which = R_CXL_##reg##_CAPABILITY_HEADER; \
289 reg_state[which] = FIELD_DP32(reg_state[which], \
290 CXL_##reg##_CAPABILITY_HEADER, ID, id); \
291 reg_state[which] = \
292 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
293 VERSION, version); \
294 reg_state[which] = \
295 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
296 CXL_##reg##_REGISTERS_OFFSET); \
297 } while (0)
299 init_cap_reg(RAS, 2, 2);
300 ras_init_common(reg_state, write_msk);
302 init_cap_reg(LINK, 4, 2);
304 if (caps < 3) {
305 return;
308 init_cap_reg(HDM, 5, 1);
309 hdm_init_common(reg_state, write_msk, type);
311 if (caps < 5) {
312 return;
315 init_cap_reg(EXTSEC, 6, 1);
316 init_cap_reg(SNOOP, 8, 1);
318 #undef init_cap_reg
322 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
323 * for tracking the valid offset.
325 * This function will build the DVSEC header on behalf of the caller and then
326 * copy in the remaining data for the vendor specific bits.
327 * It will also set up appropriate write masks.
329 void cxl_component_create_dvsec(CXLComponentState *cxl,
330 enum reg_type cxl_dev_type, uint16_t length,
331 uint16_t type, uint8_t rev, uint8_t *body)
333 PCIDevice *pdev = cxl->pdev;
334 uint16_t offset = cxl->dvsec_offset;
335 uint8_t *wmask = pdev->wmask;
337 assert(offset >= PCI_CFG_SPACE_SIZE &&
338 ((offset + length) < PCI_CFG_SPACE_EXP_SIZE));
339 assert((length & 0xf000) == 0);
340 assert((rev & ~0xf) == 0);
342 /* Create the DVSEC in the MCFG space */
343 pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length);
344 pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET,
345 (length << 20) | (rev << 16) | CXL_VENDOR_ID);
346 pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type);
347 memcpy(pdev->config + offset + sizeof(DVSECHeader),
348 body + sizeof(DVSECHeader),
349 length - sizeof(DVSECHeader));
351 /* Configure write masks */
352 switch (type) {
353 case PCIE_CXL_DEVICE_DVSEC:
354 /* Cntrl RW Lock - so needs explicit blocking when lock is set */
355 wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD;
356 wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F;
357 /* Status is RW1CS */
358 wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F;
359 /* Lock is RW Once */
360 wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01;
361 /* range1/2_base_high/low is RW Lock */
362 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF;
363 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF;
364 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF;
365 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF;
366 wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0;
367 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF;
368 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF;
369 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF;
370 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF;
371 wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0;
372 break;
373 case NON_CXL_FUNCTION_MAP_DVSEC:
374 break; /* Not yet implemented */
375 case EXTENSIONS_PORT_DVSEC:
376 wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F;
377 wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40;
378 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF;
379 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF;
380 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0;
381 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF;
382 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0;
383 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF;
384 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0;
385 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF;
386 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0;
387 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF;
388 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF;
389 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF;
390 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF;
391 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF;
392 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF;
393 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF;
394 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF;
395 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF;
396 break;
397 case GPF_PORT_DVSEC:
398 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F;
399 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F;
400 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F;
401 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F;
402 break;
403 case GPF_DEVICE_DVSEC:
404 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F;
405 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F;
406 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF;
407 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF;
408 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF;
409 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF;
410 break;
411 case PCIE_FLEXBUS_PORT_DVSEC:
412 switch (cxl_dev_type) {
413 case CXL2_ROOT_PORT:
414 /* No MLD */
415 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd;
416 break;
417 case CXL2_DOWNSTREAM_PORT:
418 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd;
419 break;
420 default: /* Registers are RO for other component types */
421 break;
423 /* There are rw1cs bits in the status register but never set currently */
424 break;
427 /* Update state for future DVSEC additions */
428 range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length);
429 cxl->dvsec_offset += length;
432 /* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */
433 uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
435 switch (iw) {
436 case 1: return 0x0;
437 case 2: return 0x1;
438 case 4: return 0x2;
439 case 8: return 0x3;
440 case 16: return 0x4;
441 case 3: return 0x8;
442 case 6: return 0x9;
443 case 12: return 0xa;
444 default:
445 error_setg(errp, "Interleave ways: %d not supported", iw);
446 return 0;
450 int cxl_interleave_ways_dec(uint8_t iw_enc, Error **errp)
452 switch (iw_enc) {
453 case 0x0: return 1;
454 case 0x1: return 2;
455 case 0x2: return 4;
456 case 0x3: return 8;
457 case 0x4: return 16;
458 case 0x8: return 3;
459 case 0x9: return 6;
460 case 0xa: return 12;
461 default:
462 error_setg(errp, "Encoded interleave ways: %d not supported", iw_enc);
463 return 0;
467 uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp)
469 switch (gran) {
470 case 256: return 0;
471 case 512: return 1;
472 case 1024: return 2;
473 case 2048: return 3;
474 case 4096: return 4;
475 case 8192: return 5;
476 case 16384: return 6;
477 default:
478 error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran);
479 return 0;