include/hw/arm: move BSA definitions to bsa.h
[qemu/ar7.git] / hw / cxl / cxl-device-utils.c
blobbd683280328aa88b39df3337513576ed7658bf1c
1 /*
2 * CXL Utility library for devices
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
33 CXLDeviceState *cxl_dstate = opaque;
35 if (size == 4) {
36 return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)];
37 } else {
38 return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)];
42 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
44 CXLDeviceState *cxl_dstate = opaque;
46 switch (size) {
47 case 1:
48 return cxl_dstate->dev_reg_state[offset];
49 case 2:
50 return cxl_dstate->dev_reg_state16[offset / size];
51 case 4:
52 return cxl_dstate->dev_reg_state32[offset / size];
53 case 8:
54 return cxl_dstate->dev_reg_state64[offset / size];
55 default:
56 g_assert_not_reached();
60 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
62 CXLDeviceState *cxl_dstate = opaque;
64 switch (size) {
65 case 1:
66 return cxl_dstate->mbox_reg_state[offset];
67 case 2:
68 return cxl_dstate->mbox_reg_state16[offset / size];
69 case 4:
70 return cxl_dstate->mbox_reg_state32[offset / size];
71 case 8:
72 return cxl_dstate->mbox_reg_state64[offset / size];
73 default:
74 g_assert_not_reached();
78 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
79 uint64_t value)
81 switch (offset) {
82 case A_CXL_DEV_MAILBOX_CTRL:
83 /* fallthrough */
84 case A_CXL_DEV_MAILBOX_CAP:
85 /* RO register */
86 break;
87 default:
88 qemu_log_mask(LOG_UNIMP,
89 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
90 __func__, offset);
91 return;
94 reg_state[offset / sizeof(*reg_state)] = value;
97 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
98 uint64_t value)
100 switch (offset) {
101 case A_CXL_DEV_MAILBOX_CMD:
102 break;
103 case A_CXL_DEV_BG_CMD_STS:
104 /* BG not supported */
105 /* fallthrough */
106 case A_CXL_DEV_MAILBOX_STS:
107 /* Read only register, will get updated by the state machine */
108 return;
109 default:
110 qemu_log_mask(LOG_UNIMP,
111 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
112 __func__, offset);
113 return;
117 reg_state[offset / sizeof(*reg_state)] = value;
120 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
121 unsigned size)
123 CXLDeviceState *cxl_dstate = opaque;
125 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
126 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
127 return;
130 switch (size) {
131 case 4:
132 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
133 break;
134 case 8:
135 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
136 break;
137 default:
138 g_assert_not_reached();
141 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
142 DOORBELL)) {
143 cxl_process_mailbox(cxl_dstate);
147 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
149 uint64_t retval = 0;
151 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
152 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
154 return retval;
157 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
158 unsigned size)
160 /* Many register sets are read only */
163 static const MemoryRegionOps mdev_ops = {
164 .read = mdev_reg_read,
165 .write = ro_reg_write,
166 .endianness = DEVICE_LITTLE_ENDIAN,
167 .valid = {
168 .min_access_size = 1,
169 .max_access_size = 8,
170 .unaligned = false,
172 .impl = {
173 .min_access_size = 8,
174 .max_access_size = 8,
178 static const MemoryRegionOps mailbox_ops = {
179 .read = mailbox_reg_read,
180 .write = mailbox_reg_write,
181 .endianness = DEVICE_LITTLE_ENDIAN,
182 .valid = {
183 .min_access_size = 1,
184 .max_access_size = 8,
185 .unaligned = false,
187 .impl = {
188 .min_access_size = 1,
189 .max_access_size = 8,
193 static const MemoryRegionOps dev_ops = {
194 .read = dev_reg_read,
195 .write = ro_reg_write,
196 .endianness = DEVICE_LITTLE_ENDIAN,
197 .valid = {
198 .min_access_size = 1,
199 .max_access_size = 8,
200 .unaligned = false,
202 .impl = {
203 .min_access_size = 1,
204 .max_access_size = 8,
208 static const MemoryRegionOps caps_ops = {
209 .read = caps_reg_read,
210 .write = ro_reg_write,
211 .endianness = DEVICE_LITTLE_ENDIAN,
212 .valid = {
213 .min_access_size = 1,
214 .max_access_size = 8,
215 .unaligned = false,
217 .impl = {
218 .min_access_size = 4,
219 .max_access_size = 8,
223 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
225 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
226 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
227 pow2ceil(CXL_MMIO_SIZE));
229 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
230 "cap-array", CXL_CAPS_SIZE);
231 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
232 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
233 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
234 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
235 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
236 cxl_dstate, "memory device caps",
237 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
239 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
240 &cxl_dstate->caps);
241 memory_region_add_subregion(&cxl_dstate->device_registers,
242 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
243 &cxl_dstate->device);
244 memory_region_add_subregion(&cxl_dstate->device_registers,
245 CXL_MAILBOX_REGISTERS_OFFSET,
246 &cxl_dstate->mailbox);
247 memory_region_add_subregion(&cxl_dstate->device_registers,
248 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
249 &cxl_dstate->memory_device);
252 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
253 bool available)
255 if (available) {
256 cxl_dstate->event_status |= (1 << log_type);
257 } else {
258 cxl_dstate->event_status &= ~(1 << log_type);
261 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
262 EVENT_STATUS, cxl_dstate->event_status);
265 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
267 CXLEventLogType log;
269 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
270 cxl_event_set_status(cxl_dstate, log, false);
274 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
276 /* 2048 payload size, with no interrupt or background support */
277 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
278 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
279 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
282 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
284 void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
286 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
287 const int cap_count = 3;
289 /* CXL Device Capabilities Array Register */
290 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
291 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
292 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
294 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
295 device_reg_init_common(cxl_dstate);
297 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
298 mailbox_reg_init_common(cxl_dstate);
300 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
301 memdev_reg_init_common(cxl_dstate);
303 cxl_initialize_mailbox(cxl_dstate);
306 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
308 uint64_t time, delta;
309 uint64_t final_time = 0;
311 if (cxl_dstate->timestamp.set) {
312 /* Find the delta from the last time the host set the time. */
313 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
314 delta = time - cxl_dstate->timestamp.last_set;
315 final_time = cxl_dstate->timestamp.host_set + delta;
318 return final_time;