semihosting: Split out common-semi-target.h
[qemu/rayw.git] / hw / cxl / cxl-device-utils.c
blob687759b3017b348d7176dc84e96b208a45226416
1 /*
2 * CXL Utility library for devices
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
33 CXLDeviceState *cxl_dstate = opaque;
35 if (size == 4) {
36 return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)];
37 } else {
38 return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)];
42 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
44 return 0;
47 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
49 CXLDeviceState *cxl_dstate = opaque;
51 switch (size) {
52 case 1:
53 return cxl_dstate->mbox_reg_state[offset];
54 case 2:
55 return cxl_dstate->mbox_reg_state16[offset / size];
56 case 4:
57 return cxl_dstate->mbox_reg_state32[offset / size];
58 case 8:
59 return cxl_dstate->mbox_reg_state64[offset / size];
60 default:
61 g_assert_not_reached();
65 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
66 uint64_t value)
68 switch (offset) {
69 case A_CXL_DEV_MAILBOX_CTRL:
70 /* fallthrough */
71 case A_CXL_DEV_MAILBOX_CAP:
72 /* RO register */
73 break;
74 default:
75 qemu_log_mask(LOG_UNIMP,
76 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
77 __func__, offset);
78 return;
81 reg_state[offset / sizeof(*reg_state)] = value;
84 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
85 uint64_t value)
87 switch (offset) {
88 case A_CXL_DEV_MAILBOX_CMD:
89 break;
90 case A_CXL_DEV_BG_CMD_STS:
91 /* BG not supported */
92 /* fallthrough */
93 case A_CXL_DEV_MAILBOX_STS:
94 /* Read only register, will get updated by the state machine */
95 return;
96 default:
97 qemu_log_mask(LOG_UNIMP,
98 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
99 __func__, offset);
100 return;
104 reg_state[offset / sizeof(*reg_state)] = value;
107 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
108 unsigned size)
110 CXLDeviceState *cxl_dstate = opaque;
112 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
113 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
114 return;
117 switch (size) {
118 case 4:
119 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
120 break;
121 case 8:
122 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
123 break;
124 default:
125 g_assert_not_reached();
128 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
129 DOORBELL)) {
130 cxl_process_mailbox(cxl_dstate);
134 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
136 uint64_t retval = 0;
138 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
139 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
141 return retval;
144 static const MemoryRegionOps mdev_ops = {
145 .read = mdev_reg_read,
146 .write = NULL, /* memory device register is read only */
147 .endianness = DEVICE_LITTLE_ENDIAN,
148 .valid = {
149 .min_access_size = 1,
150 .max_access_size = 8,
151 .unaligned = false,
153 .impl = {
154 .min_access_size = 8,
155 .max_access_size = 8,
159 static const MemoryRegionOps mailbox_ops = {
160 .read = mailbox_reg_read,
161 .write = mailbox_reg_write,
162 .endianness = DEVICE_LITTLE_ENDIAN,
163 .valid = {
164 .min_access_size = 1,
165 .max_access_size = 8,
166 .unaligned = false,
168 .impl = {
169 .min_access_size = 1,
170 .max_access_size = 8,
174 static const MemoryRegionOps dev_ops = {
175 .read = dev_reg_read,
176 .write = NULL, /* status register is read only */
177 .endianness = DEVICE_LITTLE_ENDIAN,
178 .valid = {
179 .min_access_size = 1,
180 .max_access_size = 8,
181 .unaligned = false,
183 .impl = {
184 .min_access_size = 1,
185 .max_access_size = 8,
189 static const MemoryRegionOps caps_ops = {
190 .read = caps_reg_read,
191 .write = NULL, /* caps registers are read only */
192 .endianness = DEVICE_LITTLE_ENDIAN,
193 .valid = {
194 .min_access_size = 1,
195 .max_access_size = 8,
196 .unaligned = false,
198 .impl = {
199 .min_access_size = 4,
200 .max_access_size = 8,
204 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
206 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
207 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
208 pow2ceil(CXL_MMIO_SIZE));
210 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
211 "cap-array", CXL_CAPS_SIZE);
212 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
213 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
214 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
215 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
216 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
217 cxl_dstate, "memory device caps",
218 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
220 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
221 &cxl_dstate->caps);
222 memory_region_add_subregion(&cxl_dstate->device_registers,
223 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
224 &cxl_dstate->device);
225 memory_region_add_subregion(&cxl_dstate->device_registers,
226 CXL_MAILBOX_REGISTERS_OFFSET,
227 &cxl_dstate->mailbox);
228 memory_region_add_subregion(&cxl_dstate->device_registers,
229 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
230 &cxl_dstate->memory_device);
233 static void device_reg_init_common(CXLDeviceState *cxl_dstate) { }
235 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
237 /* 2048 payload size, with no interrupt or background support */
238 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
239 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
240 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
243 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
245 void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
247 uint64_t *cap_hdrs = cxl_dstate->caps_reg_state64;
248 const int cap_count = 3;
250 /* CXL Device Capabilities Array Register */
251 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
252 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
253 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
255 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1);
256 device_reg_init_common(cxl_dstate);
258 cxl_device_cap_init(cxl_dstate, MAILBOX, 2);
259 mailbox_reg_init_common(cxl_dstate);
261 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000);
262 memdev_reg_init_common(cxl_dstate);
264 assert(cxl_initialize_mailbox(cxl_dstate) == 0);