2 * CXL Utility library for devices
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "hw/cxl/cxl.h"
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in CXL r3.1 Section 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
31 static uint64_t caps_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
33 CXLDeviceState
*cxl_dstate
= opaque
;
37 return cxl_dstate
->caps_reg_state32
[offset
/ size
];
39 return cxl_dstate
->caps_reg_state64
[offset
/ size
];
41 g_assert_not_reached();
45 static uint64_t dev_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
47 CXLDeviceState
*cxl_dstate
= opaque
;
51 return cxl_dstate
->dev_reg_state
[offset
];
53 return cxl_dstate
->dev_reg_state16
[offset
/ size
];
55 return cxl_dstate
->dev_reg_state32
[offset
/ size
];
57 return cxl_dstate
->dev_reg_state64
[offset
/ size
];
59 g_assert_not_reached();
63 static uint64_t mailbox_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
65 CXLDeviceState
*cxl_dstate
;
68 if (object_dynamic_cast(OBJECT(cci
->intf
), TYPE_CXL_TYPE3
)) {
69 cxl_dstate
= &CXL_TYPE3(cci
->intf
)->cxl_dstate
;
70 } else if (object_dynamic_cast(OBJECT(cci
->intf
),
71 TYPE_CXL_SWITCH_MAILBOX_CCI
)) {
72 cxl_dstate
= &CXL_SWITCH_MAILBOX_CCI(cci
->intf
)->cxl_dstate
;
79 return cxl_dstate
->mbox_reg_state
[offset
];
81 return cxl_dstate
->mbox_reg_state16
[offset
/ size
];
83 return cxl_dstate
->mbox_reg_state32
[offset
/ size
];
85 if (offset
== A_CXL_DEV_BG_CMD_STS
) {
86 uint64_t bg_status_reg
;
87 bg_status_reg
= FIELD_DP64(0, CXL_DEV_BG_CMD_STS
, OP
,
89 bg_status_reg
= FIELD_DP64(bg_status_reg
, CXL_DEV_BG_CMD_STS
,
90 PERCENTAGE_COMP
, cci
->bg
.complete_pct
);
91 bg_status_reg
= FIELD_DP64(bg_status_reg
, CXL_DEV_BG_CMD_STS
,
92 RET_CODE
, cci
->bg
.ret_code
);
94 cxl_dstate
->mbox_reg_state64
[offset
/ size
] = bg_status_reg
;
96 if (offset
== A_CXL_DEV_MAILBOX_STS
) {
97 uint64_t status_reg
= cxl_dstate
->mbox_reg_state64
[offset
/ size
];
98 if (cci
->bg
.complete_pct
) {
99 status_reg
= FIELD_DP64(status_reg
, CXL_DEV_MAILBOX_STS
, BG_OP
,
101 cxl_dstate
->mbox_reg_state64
[offset
/ size
] = status_reg
;
104 return cxl_dstate
->mbox_reg_state64
[offset
/ size
];
106 g_assert_not_reached();
110 static void mailbox_mem_writel(uint32_t *reg_state
, hwaddr offset
,
114 case A_CXL_DEV_MAILBOX_CTRL
:
116 case A_CXL_DEV_MAILBOX_CAP
:
120 qemu_log_mask(LOG_UNIMP
,
121 "%s Unexpected 32-bit access to 0x%" PRIx64
" (WI)\n",
126 reg_state
[offset
/ sizeof(*reg_state
)] = value
;
129 static void mailbox_mem_writeq(uint64_t *reg_state
, hwaddr offset
,
133 case A_CXL_DEV_MAILBOX_CMD
:
135 case A_CXL_DEV_BG_CMD_STS
:
137 case A_CXL_DEV_MAILBOX_STS
:
138 /* Read only register, will get updated by the state machine */
141 qemu_log_mask(LOG_UNIMP
,
142 "%s Unexpected 64-bit access to 0x%" PRIx64
" (WI)\n",
148 reg_state
[offset
/ sizeof(*reg_state
)] = value
;
151 static void mailbox_reg_write(void *opaque
, hwaddr offset
, uint64_t value
,
154 CXLDeviceState
*cxl_dstate
;
155 CXLCCI
*cci
= opaque
;
157 if (object_dynamic_cast(OBJECT(cci
->intf
), TYPE_CXL_TYPE3
)) {
158 cxl_dstate
= &CXL_TYPE3(cci
->intf
)->cxl_dstate
;
159 } else if (object_dynamic_cast(OBJECT(cci
->intf
),
160 TYPE_CXL_SWITCH_MAILBOX_CCI
)) {
161 cxl_dstate
= &CXL_SWITCH_MAILBOX_CCI(cci
->intf
)->cxl_dstate
;
166 if (offset
>= A_CXL_DEV_CMD_PAYLOAD
) {
167 memcpy(cxl_dstate
->mbox_reg_state
+ offset
, &value
, size
);
173 mailbox_mem_writel(cxl_dstate
->mbox_reg_state32
, offset
, value
);
176 mailbox_mem_writeq(cxl_dstate
->mbox_reg_state64
, offset
, value
);
179 g_assert_not_reached();
182 if (ARRAY_FIELD_EX32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CTRL
,
184 uint64_t command_reg
=
185 cxl_dstate
->mbox_reg_state64
[R_CXL_DEV_MAILBOX_CMD
];
186 uint8_t cmd_set
= FIELD_EX64(command_reg
, CXL_DEV_MAILBOX_CMD
,
188 uint8_t cmd
= FIELD_EX64(command_reg
, CXL_DEV_MAILBOX_CMD
, COMMAND
);
189 size_t len_in
= FIELD_EX64(command_reg
, CXL_DEV_MAILBOX_CMD
, LENGTH
);
190 uint8_t *pl
= cxl_dstate
->mbox_reg_state
+ A_CXL_DEV_CMD_PAYLOAD
;
192 * Copy taken to avoid need for individual command handlers to care
195 g_autofree
uint8_t *pl_in_copy
= NULL
;
198 bool bg_started
= false;
201 pl_in_copy
= g_memdup2(pl
, len_in
);
202 if (len_in
== 0 || pl_in_copy
) {
203 /* Avoid stale data - including from earlier cmds */
204 memset(pl
, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
205 rc
= cxl_process_cci_message(cci
, cmd_set
, cmd
, len_in
, pl_in_copy
,
206 &len_out
, pl
, &bg_started
);
208 rc
= CXL_MBOX_INTERNAL_ERROR
;
211 /* Set bg and the return code */
212 status_reg
= FIELD_DP64(0, CXL_DEV_MAILBOX_STS
, BG_OP
,
214 status_reg
= FIELD_DP64(status_reg
, CXL_DEV_MAILBOX_STS
, ERRNO
, rc
);
215 /* Set the return length */
216 command_reg
= FIELD_DP64(0, CXL_DEV_MAILBOX_CMD
, COMMAND_SET
, cmd_set
);
217 command_reg
= FIELD_DP64(command_reg
, CXL_DEV_MAILBOX_CMD
,
219 command_reg
= FIELD_DP64(command_reg
, CXL_DEV_MAILBOX_CMD
,
222 cxl_dstate
->mbox_reg_state64
[R_CXL_DEV_MAILBOX_CMD
] = command_reg
;
223 cxl_dstate
->mbox_reg_state64
[R_CXL_DEV_MAILBOX_STS
] = status_reg
;
224 /* Tell the host we're done */
225 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CTRL
,
230 static uint64_t mdev_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
232 CXLDeviceState
*cxl_dstate
= opaque
;
234 return cxl_dstate
->memdev_status
;
237 static void ro_reg_write(void *opaque
, hwaddr offset
, uint64_t value
,
240 /* Many register sets are read only */
243 static const MemoryRegionOps mdev_ops
= {
244 .read
= mdev_reg_read
,
245 .write
= ro_reg_write
,
246 .endianness
= DEVICE_LITTLE_ENDIAN
,
248 .min_access_size
= 1,
249 .max_access_size
= 8,
253 .min_access_size
= 8,
254 .max_access_size
= 8,
258 static const MemoryRegionOps mailbox_ops
= {
259 .read
= mailbox_reg_read
,
260 .write
= mailbox_reg_write
,
261 .endianness
= DEVICE_LITTLE_ENDIAN
,
263 .min_access_size
= 1,
264 .max_access_size
= 8,
268 .min_access_size
= 1,
269 .max_access_size
= 8,
273 static const MemoryRegionOps dev_ops
= {
274 .read
= dev_reg_read
,
275 .write
= ro_reg_write
,
276 .endianness
= DEVICE_LITTLE_ENDIAN
,
278 .min_access_size
= 1,
279 .max_access_size
= 8,
283 .min_access_size
= 1,
284 .max_access_size
= 8,
288 static const MemoryRegionOps caps_ops
= {
289 .read
= caps_reg_read
,
290 .write
= ro_reg_write
,
291 .endianness
= DEVICE_LITTLE_ENDIAN
,
293 .min_access_size
= 1,
294 .max_access_size
= 8,
298 .min_access_size
= 4,
299 .max_access_size
= 8,
303 void cxl_device_register_block_init(Object
*obj
, CXLDeviceState
*cxl_dstate
,
306 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
307 memory_region_init(&cxl_dstate
->device_registers
, obj
, "device-registers",
308 pow2ceil(CXL_MMIO_SIZE
));
310 memory_region_init_io(&cxl_dstate
->caps
, obj
, &caps_ops
, cxl_dstate
,
311 "cap-array", CXL_CAPS_SIZE
);
312 memory_region_init_io(&cxl_dstate
->device
, obj
, &dev_ops
, cxl_dstate
,
313 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH
);
314 memory_region_init_io(&cxl_dstate
->mailbox
, obj
, &mailbox_ops
, cci
,
315 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH
);
316 memory_region_init_io(&cxl_dstate
->memory_device
, obj
, &mdev_ops
,
317 cxl_dstate
, "memory device caps",
318 CXL_MEMORY_DEVICE_REGISTERS_LENGTH
);
320 memory_region_add_subregion(&cxl_dstate
->device_registers
, 0,
322 memory_region_add_subregion(&cxl_dstate
->device_registers
,
323 CXL_DEVICE_STATUS_REGISTERS_OFFSET
,
324 &cxl_dstate
->device
);
325 memory_region_add_subregion(&cxl_dstate
->device_registers
,
326 CXL_MAILBOX_REGISTERS_OFFSET
,
327 &cxl_dstate
->mailbox
);
328 memory_region_add_subregion(&cxl_dstate
->device_registers
,
329 CXL_MEMORY_DEVICE_REGISTERS_OFFSET
,
330 &cxl_dstate
->memory_device
);
333 void cxl_event_set_status(CXLDeviceState
*cxl_dstate
, CXLEventLogType log_type
,
337 cxl_dstate
->event_status
|= (1 << log_type
);
339 cxl_dstate
->event_status
&= ~(1 << log_type
);
342 ARRAY_FIELD_DP64(cxl_dstate
->dev_reg_state64
, CXL_DEV_EVENT_STATUS
,
343 EVENT_STATUS
, cxl_dstate
->event_status
);
346 static void device_reg_init_common(CXLDeviceState
*cxl_dstate
)
350 for (log
= 0; log
< CXL_EVENT_TYPE_MAX
; log
++) {
351 cxl_event_set_status(cxl_dstate
, log
, false);
355 static void mailbox_reg_init_common(CXLDeviceState
*cxl_dstate
)
357 const uint8_t msi_n
= 9;
359 /* 2048 payload size */
360 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CAP
,
361 PAYLOAD_SIZE
, CXL_MAILBOX_PAYLOAD_SHIFT
);
362 cxl_dstate
->payload_size
= CXL_MAILBOX_MAX_PAYLOAD_SIZE
;
364 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CAP
,
366 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CAP
,
368 cxl_dstate
->mbox_msi_n
= msi_n
;
369 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CAP
,
370 MBOX_READY_TIME
, 0); /* Not reported */
371 ARRAY_FIELD_DP32(cxl_dstate
->mbox_reg_state32
, CXL_DEV_MAILBOX_CAP
,
372 TYPE
, 0); /* Inferred from class code */
375 static void memdev_reg_init_common(CXLDeviceState
*cxl_dstate
)
377 uint64_t memdev_status_reg
;
379 memdev_status_reg
= FIELD_DP64(0, CXL_MEM_DEV_STS
, MEDIA_STATUS
, 1);
380 memdev_status_reg
= FIELD_DP64(memdev_status_reg
, CXL_MEM_DEV_STS
,
382 cxl_dstate
->memdev_status
= memdev_status_reg
;
385 void cxl_device_register_init_t3(CXLType3Dev
*ct3d
)
387 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
388 uint64_t *cap_h
= cxl_dstate
->caps_reg_state64
;
389 const int cap_count
= 3;
391 /* CXL Device Capabilities Array Register */
392 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_ID
, 0);
393 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_VERSION
, 1);
394 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_COUNT
, cap_count
);
396 cxl_device_cap_init(cxl_dstate
, DEVICE_STATUS
, 1,
397 CXL_DEVICE_STATUS_VERSION
);
398 device_reg_init_common(cxl_dstate
);
400 cxl_device_cap_init(cxl_dstate
, MAILBOX
, 2, CXL_DEV_MAILBOX_VERSION
);
401 mailbox_reg_init_common(cxl_dstate
);
403 cxl_device_cap_init(cxl_dstate
, MEMORY_DEVICE
, 0x4000,
404 CXL_MEM_DEV_STATUS_VERSION
);
405 memdev_reg_init_common(cxl_dstate
);
407 cxl_initialize_mailbox_t3(&ct3d
->cci
, DEVICE(ct3d
),
408 CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
411 void cxl_device_register_init_swcci(CSWMBCCIDev
*sw
)
413 CXLDeviceState
*cxl_dstate
= &sw
->cxl_dstate
;
414 uint64_t *cap_h
= cxl_dstate
->caps_reg_state64
;
415 const int cap_count
= 3;
417 /* CXL Device Capabilities Array Register */
418 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_ID
, 0);
419 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_VERSION
, 1);
420 ARRAY_FIELD_DP64(cap_h
, CXL_DEV_CAP_ARRAY
, CAP_COUNT
, cap_count
);
422 cxl_device_cap_init(cxl_dstate
, DEVICE_STATUS
, 1, 2);
423 device_reg_init_common(cxl_dstate
);
425 cxl_device_cap_init(cxl_dstate
, MAILBOX
, 2, 1);
426 mailbox_reg_init_common(cxl_dstate
);
428 cxl_device_cap_init(cxl_dstate
, MEMORY_DEVICE
, 0x4000, 1);
429 memdev_reg_init_common(cxl_dstate
);
432 uint64_t cxl_device_get_timestamp(CXLDeviceState
*cxl_dstate
)
434 uint64_t time
, delta
;
435 uint64_t final_time
= 0;
437 if (cxl_dstate
->timestamp
.set
) {
438 /* Find the delta from the last time the host set the time. */
439 time
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
440 delta
= time
- cxl_dstate
->timestamp
.last_set
;
441 final_time
= cxl_dstate
->timestamp
.host_set
+ delta
;