xive2: Add a get_config() handler for the router configuration
[qemu.git] / hw / intc / xive2.c
blobc45422542459e2837b02131577a0ee90842dd26b
1 /*
2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation..
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "hw/qdev-properties.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/xive.h"
20 #include "hw/ppc/xive2.h"
21 #include "hw/ppc/xive2_regs.h"
23 uint32_t xive2_router_get_config(Xive2Router *xrtr)
25 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
27 return xrc->get_config(xrtr);
30 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon)
32 if (!xive2_eas_is_valid(eas)) {
33 return;
36 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
37 lisn, xive2_eas_is_masked(eas) ? "M" : " ",
38 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
39 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
40 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
43 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
44 Monitor *mon)
46 uint64_t qaddr_base = xive2_end_qaddr(end);
47 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
48 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
49 uint32_t qentries = 1 << (qsize + 10);
50 int i;
53 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
55 monitor_printf(mon, " [ ");
56 qindex = (qindex - (width - 1)) & (qentries - 1);
57 for (i = 0; i < width; i++) {
58 uint64_t qaddr = qaddr_base + (qindex << 2);
59 uint32_t qdata = -1;
61 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
62 sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
64 HWADDR_PRIx "\n", qaddr);
65 return;
67 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
68 be32_to_cpu(qdata));
69 qindex = (qindex + 1) & (qentries - 1);
71 monitor_printf(mon, "]");
74 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon)
76 uint64_t qaddr_base = xive2_end_qaddr(end);
77 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
78 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
79 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
80 uint32_t qentries = 1 << (qsize + 10);
82 uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
83 uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
84 uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
85 uint8_t pq;
87 if (!xive2_end_is_valid(end)) {
88 return;
91 pq = xive_get_field32(END2_W1_ESn, end->w1);
93 monitor_printf(mon,
94 " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x",
95 end_idx,
96 pq & XIVE_ESB_VAL_P ? 'P' : '-',
97 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
98 xive2_end_is_valid(end) ? 'v' : '-',
99 xive2_end_is_enqueue(end) ? 'q' : '-',
100 xive2_end_is_notify(end) ? 'n' : '-',
101 xive2_end_is_backlog(end) ? 'b' : '-',
102 xive2_end_is_escalate(end) ? 'e' : '-',
103 xive2_end_is_escalate_end(end) ? 'N' : '-',
104 xive2_end_is_uncond_escalation(end) ? 'u' : '-',
105 xive2_end_is_silent_escalation(end) ? 's' : '-',
106 xive2_end_is_firmware1(end) ? 'f' : '-',
107 xive2_end_is_firmware2(end) ? 'F' : '-',
108 priority, nvp_blk, nvp_idx);
110 if (qaddr_base) {
111 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
112 qaddr_base, qindex, qentries, qgen);
113 xive2_end_queue_pic_print_info(end, 6, mon);
115 monitor_printf(mon, "\n");
118 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
119 Monitor *mon)
121 Xive2Eas *eas = (Xive2Eas *) &end->w4;
122 uint8_t pq;
124 if (!xive2_end_is_escalate(end)) {
125 return;
128 pq = xive_get_field32(END2_W1_ESe, end->w1);
130 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
131 end_idx,
132 pq & XIVE_ESB_VAL_P ? 'P' : '-',
133 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
134 xive2_eas_is_valid(eas) ? 'v' : ' ',
135 xive2_eas_is_masked(eas) ? 'M' : ' ',
136 (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
137 (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
138 (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
141 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
143 uint64_t qaddr_base = xive2_end_qaddr(end);
144 uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
145 uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
146 uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
148 uint64_t qaddr = qaddr_base + (qindex << 2);
149 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
150 uint32_t qentries = 1 << (qsize + 10);
152 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
153 MEMTXATTRS_UNSPECIFIED)) {
154 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
155 HWADDR_PRIx "\n", qaddr);
156 return;
159 qindex = (qindex + 1) & (qentries - 1);
160 if (qindex == 0) {
161 qgen ^= 1;
162 end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
164 /* TODO(PowerNV): reset GF bit on a cache watch operation */
165 end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
167 end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
171 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
174 static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
175 uint32_t *nvp_idx, bool *vo)
177 *nvp_blk = xive2_nvp_blk(cam);
178 *nvp_idx = xive2_nvp_idx(cam);
179 *vo = !!(cam & TM2_QW1W2_VO);
182 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
183 hwaddr offset, unsigned size)
185 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
186 uint32_t qw1w2_new;
187 uint32_t cam = be32_to_cpu(qw1w2);
188 uint8_t nvp_blk;
189 uint32_t nvp_idx;
190 bool vo;
192 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
194 if (!vo) {
195 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
196 nvp_blk, nvp_idx);
199 /* Invalidate CAM line */
200 qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
201 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
203 return qw1w2;
206 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
207 uint8_t nvp_blk, uint32_t nvp_idx)
209 Xive2Nvp nvp;
210 uint8_t ipb;
211 uint8_t cppr = 0;
214 * Grab the associated thread interrupt context registers in the
215 * associated NVP
217 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
218 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
219 nvp_blk, nvp_idx);
220 return;
223 if (!xive2_nvp_is_valid(&nvp)) {
224 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
225 nvp_blk, nvp_idx);
226 return;
229 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
230 if (ipb) {
231 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
232 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
235 /* An IPB or CPPR change can trigger a resend */
236 if (ipb || cppr) {
237 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
242 * Updating the OS CAM line can trigger a resend of interrupt
244 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
245 hwaddr offset, uint64_t value, unsigned size)
247 uint32_t cam = value;
248 uint32_t qw1w2 = cpu_to_be32(cam);
249 uint8_t nvp_blk;
250 uint32_t nvp_idx;
251 bool vo;
253 xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
255 /* First update the thead context */
256 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
258 /* Check the interrupt pending bits */
259 if (vo) {
260 xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx);
265 * XIVE Router (aka. Virtualization Controller or IVRE)
268 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
269 Xive2Eas *eas)
271 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
273 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
276 static
277 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
278 uint8_t *pq)
280 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
282 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
285 static
286 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
287 uint8_t *pq)
289 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
291 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
294 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
295 Xive2End *end)
297 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
299 return xrc->get_end(xrtr, end_blk, end_idx, end);
302 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
303 Xive2End *end, uint8_t word_number)
305 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
307 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
310 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
311 Xive2Nvp *nvp)
313 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
315 return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
318 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
319 Xive2Nvp *nvp, uint8_t word_number)
321 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
323 return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
326 static int xive2_router_get_block_id(Xive2Router *xrtr)
328 Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
330 return xrc->get_block_id(xrtr);
334 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
335 * width and block id width is configurable at the IC level.
337 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
338 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
340 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
342 Xive2Router *xrtr = XIVE2_ROUTER(xptr);
343 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
344 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
345 uint8_t blk = xive2_router_get_block_id(xrtr);
346 uint8_t tid_shift = 7;
347 uint8_t tid_mask = (1 << tid_shift) - 1;
349 return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
353 * The thread context register words are in big-endian format.
355 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
356 uint8_t format,
357 uint8_t nvt_blk, uint32_t nvt_idx,
358 bool cam_ignore, uint32_t logic_serv)
360 uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
361 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
362 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
363 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
364 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
367 * TODO (PowerNV): ignore mode. The low order bits of the NVT
368 * identifier are ignored in the "CAM" match.
371 if (format == 0) {
372 if (cam_ignore == true) {
374 * F=0 & i=1: Logical server notification (bits ignored at
375 * the end of the NVT identifier)
377 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
378 nvt_blk, nvt_idx);
379 return -1;
382 /* F=0 & i=0: Specific NVT notification */
384 /* PHYS ring */
385 if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
386 cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
387 return TM_QW3_HV_PHYS;
390 /* HV POOL ring */
391 if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
392 cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
393 return TM_QW2_HV_POOL;
396 /* OS ring */
397 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
398 cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
399 return TM_QW1_OS;
401 } else {
402 /* F=1 : User level Event-Based Branch (EBB) notification */
404 /* USER ring */
405 if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
406 (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
407 (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
408 (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
409 return TM_QW0_USER;
412 return -1;
415 static void xive2_router_realize(DeviceState *dev, Error **errp)
417 Xive2Router *xrtr = XIVE2_ROUTER(dev);
419 assert(xrtr->xfb);
423 * Notification using the END ESe/ESn bit (Event State Buffer for
424 * escalation and notification). Profide futher coalescing in the
425 * Router.
427 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
428 uint32_t end_idx, Xive2End *end,
429 uint32_t end_esmask)
431 uint8_t pq = xive_get_field32(end_esmask, end->w1);
432 bool notify = xive_esb_trigger(&pq);
434 if (pq != xive_get_field32(end_esmask, end->w1)) {
435 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
436 xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
439 /* ESe/n[Q]=1 : end of notification */
440 return notify;
444 * An END trigger can come from an event trigger (IPI or HW) or from
445 * another chip. We don't model the PowerBus but the END trigger
446 * message has the same parameters than in the function below.
448 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
449 uint32_t end_idx, uint32_t end_data)
451 Xive2End end;
452 uint8_t priority;
453 uint8_t format;
454 bool found;
455 Xive2Nvp nvp;
456 uint8_t nvp_blk;
457 uint32_t nvp_idx;
459 /* END cache lookup */
460 if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
461 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
462 end_idx);
463 return;
466 if (!xive2_end_is_valid(&end)) {
467 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
468 end_blk, end_idx);
469 return;
472 if (xive2_end_is_enqueue(&end)) {
473 xive2_end_enqueue(&end, end_data);
474 /* Enqueuing event data modifies the EQ toggle and index */
475 xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
479 * When the END is silent, we skip the notification part.
481 if (xive2_end_is_silent_escalation(&end)) {
482 goto do_escalation;
486 * The W7 format depends on the F bit in W6. It defines the type
487 * of the notification :
489 * F=0 : single or multiple NVP notification
490 * F=1 : User level Event-Based Branch (EBB) notification, no
491 * priority
493 format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
494 priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
496 /* The END is masked */
497 if (format == 0 && priority == 0xff) {
498 return;
502 * Check the END ESn (Event State Buffer for notification) for
503 * even futher coalescing in the Router
505 if (!xive2_end_is_notify(&end)) {
506 /* ESn[Q]=1 : end of notification */
507 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
508 &end, END2_W1_ESn)) {
509 return;
514 * Follows IVPE notification
516 nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
517 nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
519 /* NVP cache lookup */
520 if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
521 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
522 nvp_blk, nvp_idx);
523 return;
526 if (!xive2_nvp_is_valid(&nvp)) {
527 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
528 nvp_blk, nvp_idx);
529 return;
532 found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
533 xive_get_field32(END2_W6_IGNORE, end.w7),
534 priority,
535 xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
537 /* TODO: Auto EOI. */
539 if (found) {
540 return;
544 * If no matching NVP is dispatched on a HW thread :
545 * - specific VP: update the NVP structure if backlog is activated
546 * - logical server : forward request to IVPE (not supported)
548 if (xive2_end_is_backlog(&end)) {
549 uint8_t ipb;
551 if (format == 1) {
552 qemu_log_mask(LOG_GUEST_ERROR,
553 "XIVE: END %x/%x invalid config: F1 & backlog\n",
554 end_blk, end_idx);
555 return;
559 * Record the IPB in the associated NVP structure for later
560 * use. The presenter will resend the interrupt when the vCPU
561 * is dispatched again on a HW thread.
563 ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
564 xive_priority_to_ipb(priority);
565 nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
566 xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
569 * On HW, follows a "Broadcast Backlog" to IVPEs
573 do_escalation:
575 * If activated, escalate notification using the ESe PQ bits and
576 * the EAS in w4-5
578 if (!xive2_end_is_escalate(&end)) {
579 return;
583 * Check the END ESe (Event State Buffer for escalation) for even
584 * futher coalescing in the Router
586 if (!xive2_end_is_uncond_escalation(&end)) {
587 /* ESe[Q]=1 : end of escalation notification */
588 if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
589 &end, END2_W1_ESe)) {
590 return;
595 * The END trigger becomes an Escalation trigger
597 xive2_router_end_notify(xrtr,
598 xive_get_field32(END2_W4_END_BLOCK, end.w4),
599 xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
600 xive_get_field32(END2_W5_ESC_END_DATA, end.w5));
603 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
605 Xive2Router *xrtr = XIVE2_ROUTER(xn);
606 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
607 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
608 Xive2Eas eas;
610 /* EAS cache lookup */
611 if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
612 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
613 return;
616 if (!pq_checked) {
617 bool notify;
618 uint8_t pq;
620 /* PQ cache lookup */
621 if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
622 /* Set FIR */
623 g_assert_not_reached();
626 notify = xive_esb_trigger(&pq);
628 if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
629 /* Set FIR */
630 g_assert_not_reached();
633 if (!notify) {
634 return;
638 if (!xive2_eas_is_valid(&eas)) {
639 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
640 return;
643 if (xive2_eas_is_masked(&eas)) {
644 /* Notification completed */
645 return;
649 * The event trigger becomes an END trigger
651 xive2_router_end_notify(xrtr,
652 xive_get_field64(EAS2_END_BLOCK, eas.w),
653 xive_get_field64(EAS2_END_INDEX, eas.w),
654 xive_get_field64(EAS2_END_DATA, eas.w));
657 static Property xive2_router_properties[] = {
658 DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
659 TYPE_XIVE_FABRIC, XiveFabric *),
660 DEFINE_PROP_END_OF_LIST(),
663 static void xive2_router_class_init(ObjectClass *klass, void *data)
665 DeviceClass *dc = DEVICE_CLASS(klass);
666 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
668 dc->desc = "XIVE2 Router Engine";
669 device_class_set_props(dc, xive2_router_properties);
670 /* Parent is SysBusDeviceClass. No need to call its realize hook */
671 dc->realize = xive2_router_realize;
672 xnc->notify = xive2_router_notify;
675 static const TypeInfo xive2_router_info = {
676 .name = TYPE_XIVE2_ROUTER,
677 .parent = TYPE_SYS_BUS_DEVICE,
678 .abstract = true,
679 .instance_size = sizeof(Xive2Router),
680 .class_size = sizeof(Xive2RouterClass),
681 .class_init = xive2_router_class_init,
682 .interfaces = (InterfaceInfo[]) {
683 { TYPE_XIVE_NOTIFIER },
684 { TYPE_XIVE_PRESENTER },
689 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
691 return !((addr >> shift) & 1);
694 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
696 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
697 uint32_t offset = addr & 0xFFF;
698 uint8_t end_blk;
699 uint32_t end_idx;
700 Xive2End end;
701 uint32_t end_esmask;
702 uint8_t pq;
703 uint64_t ret;
706 * The block id should be deduced from the load address on the END
707 * ESB MMIO but our model only supports a single block per XIVE chip.
709 end_blk = xive2_router_get_block_id(xsrc->xrtr);
710 end_idx = addr >> (xsrc->esb_shift + 1);
712 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
713 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
714 end_idx);
715 return -1;
718 if (!xive2_end_is_valid(&end)) {
719 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
720 end_blk, end_idx);
721 return -1;
724 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
725 END2_W1_ESe;
726 pq = xive_get_field32(end_esmask, end.w1);
728 switch (offset) {
729 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
730 ret = xive_esb_eoi(&pq);
732 /* Forward the source event notification for routing ?? */
733 break;
735 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
736 ret = pq;
737 break;
739 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
740 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
741 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
742 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
743 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
744 break;
745 default:
746 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
747 offset);
748 return -1;
751 if (pq != xive_get_field32(end_esmask, end.w1)) {
752 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
753 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
756 return ret;
759 static void xive2_end_source_write(void *opaque, hwaddr addr,
760 uint64_t value, unsigned size)
762 Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
763 uint32_t offset = addr & 0xFFF;
764 uint8_t end_blk;
765 uint32_t end_idx;
766 Xive2End end;
767 uint32_t end_esmask;
768 uint8_t pq;
769 bool notify = false;
772 * The block id should be deduced from the load address on the END
773 * ESB MMIO but our model only supports a single block per XIVE chip.
775 end_blk = xive2_router_get_block_id(xsrc->xrtr);
776 end_idx = addr >> (xsrc->esb_shift + 1);
778 if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
779 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
780 end_idx);
781 return;
784 if (!xive2_end_is_valid(&end)) {
785 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
786 end_blk, end_idx);
787 return;
790 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
791 END2_W1_ESe;
792 pq = xive_get_field32(end_esmask, end.w1);
794 switch (offset) {
795 case 0 ... 0x3FF:
796 notify = xive_esb_trigger(&pq);
797 break;
799 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
800 /* TODO: can we check StoreEOI availability from the router ? */
801 notify = xive_esb_eoi(&pq);
802 break;
804 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
805 if (end_esmask == END2_W1_ESe) {
806 qemu_log_mask(LOG_GUEST_ERROR,
807 "XIVE: END %x/%x can not EQ inject on ESe\n",
808 end_blk, end_idx);
809 return;
811 notify = true;
812 break;
814 default:
815 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
816 offset);
817 return;
820 if (pq != xive_get_field32(end_esmask, end.w1)) {
821 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
822 xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
825 /* TODO: Forward the source event notification for routing */
826 if (notify) {
831 static const MemoryRegionOps xive2_end_source_ops = {
832 .read = xive2_end_source_read,
833 .write = xive2_end_source_write,
834 .endianness = DEVICE_BIG_ENDIAN,
835 .valid = {
836 .min_access_size = 8,
837 .max_access_size = 8,
839 .impl = {
840 .min_access_size = 8,
841 .max_access_size = 8,
845 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
847 Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
849 assert(xsrc->xrtr);
851 if (!xsrc->nr_ends) {
852 error_setg(errp, "Number of interrupt needs to be greater than 0");
853 return;
856 if (xsrc->esb_shift != XIVE_ESB_4K &&
857 xsrc->esb_shift != XIVE_ESB_64K) {
858 error_setg(errp, "Invalid ESB shift setting");
859 return;
863 * Each END is assigned an even/odd pair of MMIO pages, the even page
864 * manages the ESn field while the odd page manages the ESe field.
866 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
867 &xive2_end_source_ops, xsrc, "xive.end",
868 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
871 static Property xive2_end_source_properties[] = {
872 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
873 DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
874 DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
875 Xive2Router *),
876 DEFINE_PROP_END_OF_LIST(),
879 static void xive2_end_source_class_init(ObjectClass *klass, void *data)
881 DeviceClass *dc = DEVICE_CLASS(klass);
883 dc->desc = "XIVE END Source";
884 device_class_set_props(dc, xive2_end_source_properties);
885 dc->realize = xive2_end_source_realize;
888 static const TypeInfo xive2_end_source_info = {
889 .name = TYPE_XIVE2_END_SOURCE,
890 .parent = TYPE_DEVICE,
891 .instance_size = sizeof(Xive2EndSource),
892 .class_init = xive2_end_source_class_init,
895 static void xive2_register_types(void)
897 type_register_static(&xive2_router_info);
898 type_register_static(&xive2_end_source_info);
901 type_init(xive2_register_types)