spapr/pci: Correct "does not support hotplugging error messages
[qemu/armbru.git] / hw / cxl / cxl-device-utils.c
blob61a3c4dc2eccb44fa88d4665987a2eab1ca6e93e
1 /*
2 * CXL Utility library for devices
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
33 CXLDeviceState *cxl_dstate = opaque;
35 switch (size) {
36 case 4:
37 return cxl_dstate->caps_reg_state32[offset / size];
38 case 8:
39 return cxl_dstate->caps_reg_state64[offset / size];
40 default:
41 g_assert_not_reached();
45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
47 CXLDeviceState *cxl_dstate = opaque;
49 switch (size) {
50 case 1:
51 return cxl_dstate->dev_reg_state[offset];
52 case 2:
53 return cxl_dstate->dev_reg_state16[offset / size];
54 case 4:
55 return cxl_dstate->dev_reg_state32[offset / size];
56 case 8:
57 return cxl_dstate->dev_reg_state64[offset / size];
58 default:
59 g_assert_not_reached();
63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
65 CXLDeviceState *cxl_dstate;
66 CXLCCI *cci = opaque;
68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
70 } else if (object_dynamic_cast(OBJECT(cci->intf),
71 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
73 } else {
74 return 0;
77 switch (size) {
78 case 1:
79 return cxl_dstate->mbox_reg_state[offset];
80 case 2:
81 return cxl_dstate->mbox_reg_state16[offset / size];
82 case 4:
83 return cxl_dstate->mbox_reg_state32[offset / size];
84 case 8:
85 if (offset == A_CXL_DEV_BG_CMD_STS) {
86 uint64_t bg_status_reg;
87 bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP,
88 cci->bg.opcode);
89 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
90 PERCENTAGE_COMP, cci->bg.complete_pct);
91 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
92 RET_CODE, cci->bg.ret_code);
93 /* endian? */
94 cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg;
96 if (offset == A_CXL_DEV_MAILBOX_STS) {
97 uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
98 if (cci->bg.complete_pct) {
99 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
101 cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
104 return cxl_dstate->mbox_reg_state64[offset / size];
105 default:
106 g_assert_not_reached();
110 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
111 uint64_t value)
113 switch (offset) {
114 case A_CXL_DEV_MAILBOX_CTRL:
115 /* fallthrough */
116 case A_CXL_DEV_MAILBOX_CAP:
117 /* RO register */
118 break;
119 default:
120 qemu_log_mask(LOG_UNIMP,
121 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
122 __func__, offset);
123 return;
126 reg_state[offset / sizeof(*reg_state)] = value;
129 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
130 uint64_t value)
132 switch (offset) {
133 case A_CXL_DEV_MAILBOX_CMD:
134 break;
135 case A_CXL_DEV_BG_CMD_STS:
136 break;
137 case A_CXL_DEV_MAILBOX_STS:
138 /* Read only register, will get updated by the state machine */
139 return;
140 default:
141 qemu_log_mask(LOG_UNIMP,
142 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
143 __func__, offset);
144 return;
148 reg_state[offset / sizeof(*reg_state)] = value;
151 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
152 unsigned size)
154 CXLDeviceState *cxl_dstate;
155 CXLCCI *cci = opaque;
157 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
158 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
159 } else if (object_dynamic_cast(OBJECT(cci->intf),
160 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
161 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
162 } else {
163 return;
166 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
167 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
168 return;
171 switch (size) {
172 case 4:
173 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
174 break;
175 case 8:
176 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
177 break;
178 default:
179 g_assert_not_reached();
182 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
183 DOORBELL)) {
184 uint64_t command_reg =
185 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
186 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
187 COMMAND_SET);
188 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
189 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
190 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
192 * Copy taken to avoid need for individual command handlers to care
193 * about aliasing.
195 g_autofree uint8_t *pl_in_copy = NULL;
196 size_t len_out = 0;
197 uint64_t status_reg;
198 bool bg_started = false;
199 int rc;
201 pl_in_copy = g_memdup2(pl, len_in);
202 if (len_in == 0 || pl_in_copy) {
203 /* Avoid stale data - including from earlier cmds */
204 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
205 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
206 &len_out, pl, &bg_started);
207 } else {
208 rc = CXL_MBOX_INTERNAL_ERROR;
211 /* Set bg and the return code */
212 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
213 bg_started ? 1 : 0);
214 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
215 /* Set the return length */
216 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
217 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
218 COMMAND, cmd);
219 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
220 LENGTH, len_out);
222 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
223 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
224 /* Tell the host we're done */
225 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
226 DOORBELL, 0);
230 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
232 uint64_t retval = 0;
234 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
235 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
237 return retval;
240 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
241 unsigned size)
243 /* Many register sets are read only */
246 static const MemoryRegionOps mdev_ops = {
247 .read = mdev_reg_read,
248 .write = ro_reg_write,
249 .endianness = DEVICE_LITTLE_ENDIAN,
250 .valid = {
251 .min_access_size = 1,
252 .max_access_size = 8,
253 .unaligned = false,
255 .impl = {
256 .min_access_size = 8,
257 .max_access_size = 8,
261 static const MemoryRegionOps mailbox_ops = {
262 .read = mailbox_reg_read,
263 .write = mailbox_reg_write,
264 .endianness = DEVICE_LITTLE_ENDIAN,
265 .valid = {
266 .min_access_size = 1,
267 .max_access_size = 8,
268 .unaligned = false,
270 .impl = {
271 .min_access_size = 1,
272 .max_access_size = 8,
276 static const MemoryRegionOps dev_ops = {
277 .read = dev_reg_read,
278 .write = ro_reg_write,
279 .endianness = DEVICE_LITTLE_ENDIAN,
280 .valid = {
281 .min_access_size = 1,
282 .max_access_size = 8,
283 .unaligned = false,
285 .impl = {
286 .min_access_size = 1,
287 .max_access_size = 8,
291 static const MemoryRegionOps caps_ops = {
292 .read = caps_reg_read,
293 .write = ro_reg_write,
294 .endianness = DEVICE_LITTLE_ENDIAN,
295 .valid = {
296 .min_access_size = 1,
297 .max_access_size = 8,
298 .unaligned = false,
300 .impl = {
301 .min_access_size = 4,
302 .max_access_size = 8,
306 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
307 CXLCCI *cci)
309 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
310 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
311 pow2ceil(CXL_MMIO_SIZE));
313 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
314 "cap-array", CXL_CAPS_SIZE);
315 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
316 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
317 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
318 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
319 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
320 cxl_dstate, "memory device caps",
321 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
323 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
324 &cxl_dstate->caps);
325 memory_region_add_subregion(&cxl_dstate->device_registers,
326 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
327 &cxl_dstate->device);
328 memory_region_add_subregion(&cxl_dstate->device_registers,
329 CXL_MAILBOX_REGISTERS_OFFSET,
330 &cxl_dstate->mailbox);
331 memory_region_add_subregion(&cxl_dstate->device_registers,
332 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
333 &cxl_dstate->memory_device);
336 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
337 bool available)
339 if (available) {
340 cxl_dstate->event_status |= (1 << log_type);
341 } else {
342 cxl_dstate->event_status &= ~(1 << log_type);
345 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
346 EVENT_STATUS, cxl_dstate->event_status);
349 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
351 CXLEventLogType log;
353 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
354 cxl_event_set_status(cxl_dstate, log, false);
358 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
360 const uint8_t msi_n = 9;
362 /* 2048 payload size */
363 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
364 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
365 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
366 /* irq support */
367 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
368 BG_INT_CAP, 1);
369 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
370 MSI_N, msi_n);
371 cxl_dstate->mbox_msi_n = msi_n;
374 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
376 void cxl_device_register_init_t3(CXLType3Dev *ct3d)
378 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
379 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
380 const int cap_count = 3;
382 /* CXL Device Capabilities Array Register */
383 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
384 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
385 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
387 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
388 device_reg_init_common(cxl_dstate);
390 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
391 mailbox_reg_init_common(cxl_dstate);
393 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
394 memdev_reg_init_common(cxl_dstate);
396 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
397 CXL_MAILBOX_MAX_PAYLOAD_SIZE);
400 void cxl_device_register_init_swcci(CSWMBCCIDev *sw)
402 CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
403 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
404 const int cap_count = 3;
406 /* CXL Device Capabilities Array Register */
407 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
408 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
409 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
411 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
412 device_reg_init_common(cxl_dstate);
414 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
415 mailbox_reg_init_common(cxl_dstate);
417 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
418 memdev_reg_init_common(cxl_dstate);
421 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
423 uint64_t time, delta;
424 uint64_t final_time = 0;
426 if (cxl_dstate->timestamp.set) {
427 /* Find the delta from the last time the host set the time. */
428 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
429 delta = time - cxl_dstate->timestamp.last_set;
430 final_time = cxl_dstate->timestamp.host_set + delta;
433 return final_time;