MAINTAINERS: Add artist.c to the hppa machine section
[qemu/kevin.git] / hw / cxl / cxl-mailbox-utils.c
blob434ccc5f6e4a5d88630e91ef790be62897b46819
1 /*
2 * CXL Utility library for mailbox interface
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "hw/cxl/cxl.h"
12 #include "hw/cxl/cxl_events.h"
13 #include "hw/pci/pci.h"
14 #include "qemu/cutils.h"
15 #include "qemu/log.h"
16 #include "qemu/units.h"
17 #include "qemu/uuid.h"
19 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
22 * How to add a new command, example. The command set FOO, with cmd BAR.
23 * 1. Add the command set and cmd to the enum.
24 * FOO = 0x7f,
25 * #define BAR 0
26 * 2. Implement the handler
27 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
28 * CXLDeviceState *cxl_dstate, uint16_t *len)
29 * 3. Add the command to the cxl_cmd_set[][]
30 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
31 * 4. Implement your handler
32 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
35 * Writing the handler:
36 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
37 * in/out length of the payload. The handler is responsible for consuming the
38 * payload from cmd->payload and operating upon it as necessary. It must then
39 * fill the output data into cmd->payload (overwriting what was there),
40 * setting the length, and returning a valid return code.
42 * XXX: The handler need not worry about endianness. The payload is read out of
43 * a register interface that already deals with it.
46 enum {
47 EVENTS = 0x01,
48 #define GET_RECORDS 0x0
49 #define CLEAR_RECORDS 0x1
50 #define GET_INTERRUPT_POLICY 0x2
51 #define SET_INTERRUPT_POLICY 0x3
52 FIRMWARE_UPDATE = 0x02,
53 #define GET_INFO 0x0
54 TIMESTAMP = 0x03,
55 #define GET 0x0
56 #define SET 0x1
57 LOGS = 0x04,
58 #define GET_SUPPORTED 0x0
59 #define GET_LOG 0x1
60 IDENTIFY = 0x40,
61 #define MEMORY_DEVICE 0x0
62 CCLS = 0x41,
63 #define GET_PARTITION_INFO 0x0
64 #define GET_LSA 0x2
65 #define SET_LSA 0x3
66 MEDIA_AND_POISON = 0x43,
67 #define GET_POISON_LIST 0x0
68 #define INJECT_POISON 0x1
69 #define CLEAR_POISON 0x2
72 struct cxl_cmd;
73 typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd,
74 CXLDeviceState *cxl_dstate, uint16_t *len);
75 struct cxl_cmd {
76 const char *name;
77 opcode_handler handler;
78 ssize_t in;
79 uint16_t effect; /* Reported in CEL */
80 uint8_t *payload;
83 static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd,
84 CXLDeviceState *cxlds,
85 uint16_t *len)
87 CXLGetEventPayload *pl;
88 uint8_t log_type;
89 int max_recs;
91 if (cmd->in < sizeof(log_type)) {
92 return CXL_MBOX_INVALID_INPUT;
95 log_type = *((uint8_t *)cmd->payload);
97 pl = (CXLGetEventPayload *)cmd->payload;
98 memset(pl, 0, sizeof(*pl));
100 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
101 CXL_EVENT_RECORD_SIZE;
102 if (max_recs > 0xFFFF) {
103 max_recs = 0xFFFF;
106 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len);
109 static CXLRetCode cmd_events_clear_records(struct cxl_cmd *cmd,
110 CXLDeviceState *cxlds,
111 uint16_t *len)
113 CXLClearEventPayload *pl;
115 pl = (CXLClearEventPayload *)cmd->payload;
116 *len = 0;
117 return cxl_event_clear_records(cxlds, pl);
120 static CXLRetCode cmd_events_get_interrupt_policy(struct cxl_cmd *cmd,
121 CXLDeviceState *cxlds,
122 uint16_t *len)
124 CXLEventInterruptPolicy *policy;
125 CXLEventLog *log;
127 policy = (CXLEventInterruptPolicy *)cmd->payload;
128 memset(policy, 0, sizeof(*policy));
130 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
131 if (log->irq_enabled) {
132 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
135 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
136 if (log->irq_enabled) {
137 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
140 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
141 if (log->irq_enabled) {
142 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
145 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
146 if (log->irq_enabled) {
147 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
150 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
151 if (log->irq_enabled) {
152 /* Dynamic Capacity borrows the same vector as info */
153 policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
156 *len = sizeof(*policy);
157 return CXL_MBOX_SUCCESS;
160 static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd,
161 CXLDeviceState *cxlds,
162 uint16_t *len)
164 CXLEventInterruptPolicy *policy;
165 CXLEventLog *log;
167 if (*len < CXL_EVENT_INT_SETTING_MIN_LEN) {
168 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
171 policy = (CXLEventInterruptPolicy *)cmd->payload;
173 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
174 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
175 CXL_INT_MSI_MSIX;
177 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
178 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
179 CXL_INT_MSI_MSIX;
181 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
182 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
183 CXL_INT_MSI_MSIX;
185 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
186 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
187 CXL_INT_MSI_MSIX;
189 /* DCD is optional */
190 if (*len < sizeof(*policy)) {
191 return CXL_MBOX_SUCCESS;
194 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
195 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
196 CXL_INT_MSI_MSIX;
198 *len = sizeof(*policy);
199 return CXL_MBOX_SUCCESS;
202 /* 8.2.9.2.1 */
203 static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd,
204 CXLDeviceState *cxl_dstate,
205 uint16_t *len)
207 struct {
208 uint8_t slots_supported;
209 uint8_t slot_info;
210 uint8_t caps;
211 uint8_t rsvd[0xd];
212 char fw_rev1[0x10];
213 char fw_rev2[0x10];
214 char fw_rev3[0x10];
215 char fw_rev4[0x10];
216 } QEMU_PACKED *fw_info;
217 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
219 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
220 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
221 return CXL_MBOX_INTERNAL_ERROR;
224 fw_info = (void *)cmd->payload;
225 memset(fw_info, 0, sizeof(*fw_info));
227 fw_info->slots_supported = 2;
228 fw_info->slot_info = BIT(0) | BIT(3);
229 fw_info->caps = 0;
230 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
232 *len = sizeof(*fw_info);
233 return CXL_MBOX_SUCCESS;
236 /* 8.2.9.3.1 */
237 static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd,
238 CXLDeviceState *cxl_dstate,
239 uint16_t *len)
241 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
243 stq_le_p(cmd->payload, final_time);
244 *len = 8;
246 return CXL_MBOX_SUCCESS;
249 /* 8.2.9.3.2 */
250 static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd,
251 CXLDeviceState *cxl_dstate,
252 uint16_t *len)
254 cxl_dstate->timestamp.set = true;
255 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
257 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)cmd->payload);
259 *len = 0;
260 return CXL_MBOX_SUCCESS;
263 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
264 static const QemuUUID cel_uuid = {
265 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
266 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
269 /* 8.2.9.4.1 */
270 static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd,
271 CXLDeviceState *cxl_dstate,
272 uint16_t *len)
274 struct {
275 uint16_t entries;
276 uint8_t rsvd[6];
277 struct {
278 QemuUUID uuid;
279 uint32_t size;
280 } log_entries[1];
281 } QEMU_PACKED *supported_logs = (void *)cmd->payload;
282 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
284 supported_logs->entries = 1;
285 supported_logs->log_entries[0].uuid = cel_uuid;
286 supported_logs->log_entries[0].size = 4 * cxl_dstate->cel_size;
288 *len = sizeof(*supported_logs);
289 return CXL_MBOX_SUCCESS;
292 /* 8.2.9.4.2 */
293 static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd,
294 CXLDeviceState *cxl_dstate,
295 uint16_t *len)
297 struct {
298 QemuUUID uuid;
299 uint32_t offset;
300 uint32_t length;
301 } QEMU_PACKED QEMU_ALIGNED(16) *get_log = (void *)cmd->payload;
304 * 8.2.9.4.2
305 * The device shall return Invalid Parameter if the Offset or Length
306 * fields attempt to access beyond the size of the log as reported by Get
307 * Supported Logs.
309 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
310 * XXX: Spec doesn't address incorrect UUID incorrectness.
312 * The CEL buffer is large enough to fit all commands in the emulation, so
313 * the only possible failure would be if the mailbox itself isn't big
314 * enough.
316 if (get_log->offset + get_log->length > cxl_dstate->payload_size) {
317 return CXL_MBOX_INVALID_INPUT;
320 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
321 return CXL_MBOX_UNSUPPORTED;
324 /* Store off everything to local variables so we can wipe out the payload */
325 *len = get_log->length;
327 memmove(cmd->payload, cxl_dstate->cel_log + get_log->offset,
328 get_log->length);
330 return CXL_MBOX_SUCCESS;
333 /* 8.2.9.5.1.1 */
334 static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd,
335 CXLDeviceState *cxl_dstate,
336 uint16_t *len)
338 struct {
339 char fw_revision[0x10];
340 uint64_t total_capacity;
341 uint64_t volatile_capacity;
342 uint64_t persistent_capacity;
343 uint64_t partition_align;
344 uint16_t info_event_log_size;
345 uint16_t warning_event_log_size;
346 uint16_t failure_event_log_size;
347 uint16_t fatal_event_log_size;
348 uint32_t lsa_size;
349 uint8_t poison_list_max_mer[3];
350 uint16_t inject_poison_limit;
351 uint8_t poison_caps;
352 uint8_t qos_telemetry_caps;
353 } QEMU_PACKED *id;
354 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43);
356 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
357 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
359 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
360 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
361 return CXL_MBOX_INTERNAL_ERROR;
364 id = (void *)cmd->payload;
365 memset(id, 0, sizeof(*id));
367 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
369 stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
370 stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
371 stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
372 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
373 /* 256 poison records */
374 st24_le_p(id->poison_list_max_mer, 256);
375 /* No limit - so limited by main poison record limit */
376 stw_le_p(&id->inject_poison_limit, 0);
378 *len = sizeof(*id);
379 return CXL_MBOX_SUCCESS;
382 static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd,
383 CXLDeviceState *cxl_dstate,
384 uint16_t *len)
386 struct {
387 uint64_t active_vmem;
388 uint64_t active_pmem;
389 uint64_t next_vmem;
390 uint64_t next_pmem;
391 } QEMU_PACKED *part_info = (void *)cmd->payload;
392 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
394 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
395 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
396 return CXL_MBOX_INTERNAL_ERROR;
399 stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
401 * When both next_vmem and next_pmem are 0, there is no pending change to
402 * partitioning.
404 stq_le_p(&part_info->next_vmem, 0);
405 stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
406 stq_le_p(&part_info->next_pmem, 0);
408 *len = sizeof(*part_info);
409 return CXL_MBOX_SUCCESS;
412 static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd,
413 CXLDeviceState *cxl_dstate,
414 uint16_t *len)
416 struct {
417 uint32_t offset;
418 uint32_t length;
419 } QEMU_PACKED *get_lsa;
420 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
421 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
422 uint32_t offset, length;
424 get_lsa = (void *)cmd->payload;
425 offset = get_lsa->offset;
426 length = get_lsa->length;
428 if (offset + length > cvc->get_lsa_size(ct3d)) {
429 *len = 0;
430 return CXL_MBOX_INVALID_INPUT;
433 *len = cvc->get_lsa(ct3d, get_lsa, length, offset);
434 return CXL_MBOX_SUCCESS;
437 static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd,
438 CXLDeviceState *cxl_dstate,
439 uint16_t *len)
441 struct set_lsa_pl {
442 uint32_t offset;
443 uint32_t rsvd;
444 uint8_t data[];
445 } QEMU_PACKED;
446 struct set_lsa_pl *set_lsa_payload = (void *)cmd->payload;
447 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
448 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
449 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
450 uint16_t plen = *len;
452 *len = 0;
453 if (!plen) {
454 return CXL_MBOX_SUCCESS;
457 if (set_lsa_payload->offset + plen > cvc->get_lsa_size(ct3d) + hdr_len) {
458 return CXL_MBOX_INVALID_INPUT;
460 plen -= hdr_len;
462 cvc->set_lsa(ct3d, set_lsa_payload->data, plen, set_lsa_payload->offset);
463 return CXL_MBOX_SUCCESS;
467 * This is very inefficient, but good enough for now!
468 * Also the payload will always fit, so no need to handle the MORE flag and
469 * make this stateful. We may want to allow longer poison lists to aid
470 * testing that kernel functionality.
472 static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd,
473 CXLDeviceState *cxl_dstate,
474 uint16_t *len)
476 struct get_poison_list_pl {
477 uint64_t pa;
478 uint64_t length;
479 } QEMU_PACKED;
481 struct get_poison_list_out_pl {
482 uint8_t flags;
483 uint8_t rsvd1;
484 uint64_t overflow_timestamp;
485 uint16_t count;
486 uint8_t rsvd2[0x14];
487 struct {
488 uint64_t addr;
489 uint32_t length;
490 uint32_t resv;
491 } QEMU_PACKED records[];
492 } QEMU_PACKED;
494 struct get_poison_list_pl *in = (void *)cmd->payload;
495 struct get_poison_list_out_pl *out = (void *)cmd->payload;
496 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
497 uint16_t record_count = 0, i = 0;
498 uint64_t query_start, query_length;
499 CXLPoisonList *poison_list = &ct3d->poison_list;
500 CXLPoison *ent;
501 uint16_t out_pl_len;
503 query_start = ldq_le_p(&in->pa);
504 /* 64 byte alignment required */
505 if (query_start & 0x3f) {
506 return CXL_MBOX_INVALID_INPUT;
508 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
510 QLIST_FOREACH(ent, poison_list, node) {
511 /* Check for no overlap */
512 if (ent->start >= query_start + query_length ||
513 ent->start + ent->length <= query_start) {
514 continue;
516 record_count++;
518 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
519 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
521 memset(out, 0, out_pl_len);
522 QLIST_FOREACH(ent, poison_list, node) {
523 uint64_t start, stop;
525 /* Check for no overlap */
526 if (ent->start >= query_start + query_length ||
527 ent->start + ent->length <= query_start) {
528 continue;
531 /* Deal with overlap */
532 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
533 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
534 query_start + query_length);
535 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
536 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
537 i++;
539 if (ct3d->poison_list_overflowed) {
540 out->flags = (1 << 1);
541 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
543 stw_le_p(&out->count, record_count);
544 *len = out_pl_len;
545 return CXL_MBOX_SUCCESS;
548 static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd,
549 CXLDeviceState *cxl_dstate,
550 uint16_t *len_unused)
552 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
553 CXLPoisonList *poison_list = &ct3d->poison_list;
554 CXLPoison *ent;
555 struct inject_poison_pl {
556 uint64_t dpa;
558 struct inject_poison_pl *in = (void *)cmd->payload;
559 uint64_t dpa = ldq_le_p(&in->dpa);
560 CXLPoison *p;
562 QLIST_FOREACH(ent, poison_list, node) {
563 if (dpa >= ent->start &&
564 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
565 return CXL_MBOX_SUCCESS;
569 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
570 return CXL_MBOX_INJECT_POISON_LIMIT;
572 p = g_new0(CXLPoison, 1);
574 p->length = CXL_CACHE_LINE_SIZE;
575 p->start = dpa;
576 p->type = CXL_POISON_TYPE_INJECTED;
579 * Possible todo: Merge with existing entry if next to it and if same type
581 QLIST_INSERT_HEAD(poison_list, p, node);
582 ct3d->poison_list_cnt++;
584 return CXL_MBOX_SUCCESS;
587 static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd,
588 CXLDeviceState *cxl_dstate,
589 uint16_t *len_unused)
591 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
592 CXLPoisonList *poison_list = &ct3d->poison_list;
593 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
594 struct clear_poison_pl {
595 uint64_t dpa;
596 uint8_t data[64];
598 CXLPoison *ent;
599 uint64_t dpa;
601 struct clear_poison_pl *in = (void *)cmd->payload;
603 dpa = ldq_le_p(&in->dpa);
604 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) {
605 return CXL_MBOX_INVALID_PA;
608 /* Clearing a region with no poison is not an error so always do so */
609 if (cvc->set_cacheline) {
610 if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
611 return CXL_MBOX_INTERNAL_ERROR;
615 QLIST_FOREACH(ent, poison_list, node) {
617 * Test for contained in entry. Simpler than general case
618 * as clearing 64 bytes and entries 64 byte aligned
620 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
621 break;
624 if (!ent) {
625 return CXL_MBOX_SUCCESS;
628 QLIST_REMOVE(ent, node);
629 ct3d->poison_list_cnt--;
631 if (dpa > ent->start) {
632 CXLPoison *frag;
633 /* Cannot overflow as replacing existing entry */
635 frag = g_new0(CXLPoison, 1);
637 frag->start = ent->start;
638 frag->length = dpa - ent->start;
639 frag->type = ent->type;
641 QLIST_INSERT_HEAD(poison_list, frag, node);
642 ct3d->poison_list_cnt++;
645 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
646 CXLPoison *frag;
648 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
649 cxl_set_poison_list_overflowed(ct3d);
650 } else {
651 frag = g_new0(CXLPoison, 1);
653 frag->start = dpa + CXL_CACHE_LINE_SIZE;
654 frag->length = ent->start + ent->length - frag->start;
655 frag->type = ent->type;
656 QLIST_INSERT_HEAD(poison_list, frag, node);
657 ct3d->poison_list_cnt++;
660 /* Any fragments have been added, free original entry */
661 g_free(ent);
663 return CXL_MBOX_SUCCESS;
666 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
667 #define IMMEDIATE_DATA_CHANGE (1 << 2)
668 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
669 #define IMMEDIATE_LOG_CHANGE (1 << 4)
671 static struct cxl_cmd cxl_cmd_set[256][256] = {
672 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
673 cmd_events_get_records, 1, 0 },
674 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
675 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
676 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
677 cmd_events_get_interrupt_policy, 0, 0 },
678 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
679 cmd_events_set_interrupt_policy,
680 ~0, IMMEDIATE_CONFIG_CHANGE },
681 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
682 cmd_firmware_update_get_info, 0, 0 },
683 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
684 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, IMMEDIATE_POLICY_CHANGE },
685 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 },
686 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
687 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
688 cmd_identify_memory_device, 0, 0 },
689 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
690 cmd_ccls_get_partition_info, 0, 0 },
691 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
692 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
693 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
694 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
695 cmd_media_get_poison_list, 16, 0 },
696 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
697 cmd_media_inject_poison, 8, 0 },
698 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
699 cmd_media_clear_poison, 72, 0 },
702 void cxl_process_mailbox(CXLDeviceState *cxl_dstate)
704 uint16_t ret = CXL_MBOX_SUCCESS;
705 struct cxl_cmd *cxl_cmd;
706 uint64_t status_reg;
707 opcode_handler h;
708 uint64_t command_reg = cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
710 uint8_t set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET);
711 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
712 uint16_t len = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
713 cxl_cmd = &cxl_cmd_set[set][cmd];
714 h = cxl_cmd->handler;
715 if (h) {
716 if (len == cxl_cmd->in || cxl_cmd->in == ~0) {
717 cxl_cmd->payload = cxl_dstate->mbox_reg_state +
718 A_CXL_DEV_CMD_PAYLOAD;
719 ret = (*h)(cxl_cmd, cxl_dstate, &len);
720 assert(len <= cxl_dstate->payload_size);
721 } else {
722 ret = CXL_MBOX_INVALID_PAYLOAD_LENGTH;
724 } else {
725 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
726 set << 8 | cmd);
727 ret = CXL_MBOX_UNSUPPORTED;
730 /* Set the return code */
731 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, ERRNO, ret);
733 /* Set the return length */
734 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET, 0);
735 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND, 0);
736 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH, len);
738 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
739 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
741 /* Tell the host we're done */
742 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
743 DOORBELL, 0);
746 void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate)
748 for (int set = 0; set < 256; set++) {
749 for (int cmd = 0; cmd < 256; cmd++) {
750 if (cxl_cmd_set[set][cmd].handler) {
751 struct cxl_cmd *c = &cxl_cmd_set[set][cmd];
752 struct cel_log *log =
753 &cxl_dstate->cel_log[cxl_dstate->cel_size];
755 log->opcode = (set << 8) | cmd;
756 log->effect = c->effect;
757 cxl_dstate->cel_size++;