Update version for v9.2.0-rc0 release
[qemu/kevin.git] / hw / cxl / cxl-device-utils.c
blob035d034f6dd8e7fefb20affa7510c53e70776165
1 /*
2 * CXL Utility library for devices
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in CXL r3.1 Section 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
33 CXLDeviceState *cxl_dstate = opaque;
35 switch (size) {
36 case 4:
37 return cxl_dstate->caps_reg_state32[offset / size];
38 case 8:
39 return cxl_dstate->caps_reg_state64[offset / size];
40 default:
41 g_assert_not_reached();
45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
47 CXLDeviceState *cxl_dstate = opaque;
49 switch (size) {
50 case 1:
51 return cxl_dstate->dev_reg_state[offset];
52 case 2:
53 return cxl_dstate->dev_reg_state16[offset / size];
54 case 4:
55 return cxl_dstate->dev_reg_state32[offset / size];
56 case 8:
57 return cxl_dstate->dev_reg_state64[offset / size];
58 default:
59 g_assert_not_reached();
63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
65 CXLDeviceState *cxl_dstate;
66 CXLCCI *cci = opaque;
68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
70 } else if (object_dynamic_cast(OBJECT(cci->intf),
71 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
73 } else {
74 return 0;
77 switch (size) {
78 case 1:
79 return cxl_dstate->mbox_reg_state[offset];
80 case 2:
81 return cxl_dstate->mbox_reg_state16[offset / size];
82 case 4:
83 return cxl_dstate->mbox_reg_state32[offset / size];
84 case 8:
85 if (offset == A_CXL_DEV_BG_CMD_STS) {
86 uint64_t bg_status_reg;
87 bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP,
88 cci->bg.opcode);
89 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
90 PERCENTAGE_COMP, cci->bg.complete_pct);
91 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
92 RET_CODE, cci->bg.ret_code);
93 /* endian? */
94 cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg;
96 if (offset == A_CXL_DEV_MAILBOX_STS) {
97 uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
98 if (cci->bg.complete_pct) {
99 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
101 cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
104 return cxl_dstate->mbox_reg_state64[offset / size];
105 default:
106 g_assert_not_reached();
110 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
111 uint64_t value)
113 switch (offset) {
114 case A_CXL_DEV_MAILBOX_CTRL:
115 /* fallthrough */
116 case A_CXL_DEV_MAILBOX_CAP:
117 /* RO register */
118 break;
119 default:
120 qemu_log_mask(LOG_UNIMP,
121 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
122 __func__, offset);
123 return;
126 reg_state[offset / sizeof(*reg_state)] = value;
129 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
130 uint64_t value)
132 switch (offset) {
133 case A_CXL_DEV_MAILBOX_CMD:
134 break;
135 case A_CXL_DEV_BG_CMD_STS:
136 break;
137 case A_CXL_DEV_MAILBOX_STS:
138 /* Read only register, will get updated by the state machine */
139 return;
140 default:
141 qemu_log_mask(LOG_UNIMP,
142 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
143 __func__, offset);
144 return;
148 reg_state[offset / sizeof(*reg_state)] = value;
151 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
152 unsigned size)
154 CXLDeviceState *cxl_dstate;
155 CXLCCI *cci = opaque;
157 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
158 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
159 } else if (object_dynamic_cast(OBJECT(cci->intf),
160 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
161 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
162 } else {
163 return;
166 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
167 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
168 return;
171 switch (size) {
172 case 4:
173 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
174 break;
175 case 8:
176 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
177 break;
178 default:
179 g_assert_not_reached();
182 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
183 DOORBELL)) {
184 uint64_t command_reg =
185 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
186 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
187 COMMAND_SET);
188 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
189 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
190 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
192 * Copy taken to avoid need for individual command handlers to care
193 * about aliasing.
195 g_autofree uint8_t *pl_in_copy = NULL;
196 size_t len_out = 0;
197 uint64_t status_reg;
198 bool bg_started = false;
199 int rc;
201 pl_in_copy = g_memdup2(pl, len_in);
202 if (len_in == 0 || pl_in_copy) {
203 /* Avoid stale data - including from earlier cmds */
204 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
205 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
206 &len_out, pl, &bg_started);
207 } else {
208 rc = CXL_MBOX_INTERNAL_ERROR;
211 /* Set bg and the return code */
212 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
213 bg_started ? 1 : 0);
214 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
215 /* Set the return length */
216 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
217 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
218 COMMAND, cmd);
219 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
220 LENGTH, len_out);
222 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
223 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
224 /* Tell the host we're done */
225 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
226 DOORBELL, 0);
230 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
232 CXLDeviceState *cxl_dstate = opaque;
234 return cxl_dstate->memdev_status;
237 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
238 unsigned size)
240 /* Many register sets are read only */
243 static const MemoryRegionOps mdev_ops = {
244 .read = mdev_reg_read,
245 .write = ro_reg_write,
246 .endianness = DEVICE_LITTLE_ENDIAN,
247 .valid = {
248 .min_access_size = 1,
249 .max_access_size = 8,
250 .unaligned = false,
252 .impl = {
253 .min_access_size = 8,
254 .max_access_size = 8,
258 static const MemoryRegionOps mailbox_ops = {
259 .read = mailbox_reg_read,
260 .write = mailbox_reg_write,
261 .endianness = DEVICE_LITTLE_ENDIAN,
262 .valid = {
263 .min_access_size = 1,
264 .max_access_size = 8,
265 .unaligned = false,
267 .impl = {
268 .min_access_size = 1,
269 .max_access_size = 8,
273 static const MemoryRegionOps dev_ops = {
274 .read = dev_reg_read,
275 .write = ro_reg_write,
276 .endianness = DEVICE_LITTLE_ENDIAN,
277 .valid = {
278 .min_access_size = 1,
279 .max_access_size = 8,
280 .unaligned = false,
282 .impl = {
283 .min_access_size = 1,
284 .max_access_size = 8,
288 static const MemoryRegionOps caps_ops = {
289 .read = caps_reg_read,
290 .write = ro_reg_write,
291 .endianness = DEVICE_LITTLE_ENDIAN,
292 .valid = {
293 .min_access_size = 1,
294 .max_access_size = 8,
295 .unaligned = false,
297 .impl = {
298 .min_access_size = 4,
299 .max_access_size = 8,
303 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
304 CXLCCI *cci)
306 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
307 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
308 pow2ceil(CXL_MMIO_SIZE));
310 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
311 "cap-array", CXL_CAPS_SIZE);
312 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
313 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
314 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
315 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
316 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
317 cxl_dstate, "memory device caps",
318 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
320 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
321 &cxl_dstate->caps);
322 memory_region_add_subregion(&cxl_dstate->device_registers,
323 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
324 &cxl_dstate->device);
325 memory_region_add_subregion(&cxl_dstate->device_registers,
326 CXL_MAILBOX_REGISTERS_OFFSET,
327 &cxl_dstate->mailbox);
328 memory_region_add_subregion(&cxl_dstate->device_registers,
329 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
330 &cxl_dstate->memory_device);
333 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
334 bool available)
336 if (available) {
337 cxl_dstate->event_status |= (1 << log_type);
338 } else {
339 cxl_dstate->event_status &= ~(1 << log_type);
342 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
343 EVENT_STATUS, cxl_dstate->event_status);
346 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
348 CXLEventLogType log;
350 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
351 cxl_event_set_status(cxl_dstate, log, false);
355 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
357 const uint8_t msi_n = 9;
359 /* 2048 payload size */
360 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
361 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
362 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
363 /* irq support */
364 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
365 BG_INT_CAP, 1);
366 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
367 MSI_N, msi_n);
368 cxl_dstate->mbox_msi_n = msi_n;
369 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
370 MBOX_READY_TIME, 0); /* Not reported */
371 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
372 TYPE, 0); /* Inferred from class code */
375 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate)
377 uint64_t memdev_status_reg;
379 memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
380 memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS,
381 MBOX_READY, 1);
382 cxl_dstate->memdev_status = memdev_status_reg;
385 void cxl_device_register_init_t3(CXLType3Dev *ct3d)
387 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
388 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
389 const int cap_count = 3;
391 /* CXL Device Capabilities Array Register */
392 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
393 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
394 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
396 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1,
397 CXL_DEVICE_STATUS_VERSION);
398 device_reg_init_common(cxl_dstate);
400 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION);
401 mailbox_reg_init_common(cxl_dstate);
403 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000,
404 CXL_MEM_DEV_STATUS_VERSION);
405 memdev_reg_init_common(cxl_dstate);
407 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
408 CXL_MAILBOX_MAX_PAYLOAD_SIZE);
411 void cxl_device_register_init_swcci(CSWMBCCIDev *sw)
413 CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
414 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
415 const int cap_count = 3;
417 /* CXL Device Capabilities Array Register */
418 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
419 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
420 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
422 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
423 device_reg_init_common(cxl_dstate);
425 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
426 mailbox_reg_init_common(cxl_dstate);
428 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
429 memdev_reg_init_common(cxl_dstate);
432 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
434 uint64_t time, delta;
435 uint64_t final_time = 0;
437 if (cxl_dstate->timestamp.set) {
438 /* Find the delta from the last time the host set the time. */
439 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
440 delta = time - cxl_dstate->timestamp.last_set;
441 final_time = cxl_dstate->timestamp.host_set + delta;
444 return final_time;