qcow2: manually add more coroutine_fn annotations
[qemu.git] / hw / intc / xive.c
bloba986b96843e7c50980bf0234123517bfcc853698
1 /*
2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "hw/qdev-properties.h"
19 #include "migration/vmstate.h"
20 #include "monitor/monitor.h"
21 #include "hw/irq.h"
22 #include "hw/ppc/xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "trace.h"
27 * XIVE Thread Interrupt Management context
31 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
32 * Interrupt Priority Register (PIPR), which contains the priority of
33 * the most favored pending notification.
35 static uint8_t ipb_to_pipr(uint8_t ibp)
37 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
40 static uint8_t exception_mask(uint8_t ring)
42 switch (ring) {
43 case TM_QW1_OS:
44 return TM_QW1_NSR_EO;
45 case TM_QW3_HV_PHYS:
46 return TM_QW3_NSR_HE;
47 default:
48 g_assert_not_reached();
52 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
54 switch (ring) {
55 case TM_QW0_USER:
56 return 0; /* Not supported */
57 case TM_QW1_OS:
58 return tctx->os_output;
59 case TM_QW2_HV_POOL:
60 case TM_QW3_HV_PHYS:
61 return tctx->hv_output;
62 default:
63 return 0;
67 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
69 uint8_t *regs = &tctx->regs[ring];
70 uint8_t nsr = regs[TM_NSR];
71 uint8_t mask = exception_mask(ring);
73 qemu_irq_lower(xive_tctx_output(tctx, ring));
75 if (regs[TM_NSR] & mask) {
76 uint8_t cppr = regs[TM_PIPR];
78 regs[TM_CPPR] = cppr;
80 /* Reset the pending buffer bit */
81 regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
82 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
84 /* Drop Exception bit */
85 regs[TM_NSR] &= ~mask;
87 trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
88 regs[TM_IPB], regs[TM_PIPR],
89 regs[TM_CPPR], regs[TM_NSR]);
92 return (nsr << 8) | regs[TM_CPPR];
95 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
97 uint8_t *regs = &tctx->regs[ring];
99 if (regs[TM_PIPR] < regs[TM_CPPR]) {
100 switch (ring) {
101 case TM_QW1_OS:
102 regs[TM_NSR] |= TM_QW1_NSR_EO;
103 break;
104 case TM_QW3_HV_PHYS:
105 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
106 break;
107 default:
108 g_assert_not_reached();
110 trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
111 regs[TM_IPB], regs[TM_PIPR],
112 regs[TM_CPPR], regs[TM_NSR]);
113 qemu_irq_raise(xive_tctx_output(tctx, ring));
117 void xive_tctx_reset_os_signal(XiveTCTX *tctx)
120 * Lower the External interrupt. Used when pulling an OS
121 * context. It is necessary to avoid catching it in the hypervisor
122 * context. It should be raised again when re-pushing the OS
123 * context.
125 qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS));
128 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
130 uint8_t *regs = &tctx->regs[ring];
132 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
133 regs[TM_IPB], regs[TM_PIPR],
134 cppr, regs[TM_NSR]);
136 if (cppr > XIVE_PRIORITY_MAX) {
137 cppr = 0xff;
140 tctx->regs[ring + TM_CPPR] = cppr;
142 /* CPPR has changed, check if we need to raise a pending exception */
143 xive_tctx_notify(tctx, ring);
146 void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
148 uint8_t *regs = &tctx->regs[ring];
150 regs[TM_IPB] |= ipb;
151 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
152 xive_tctx_notify(tctx, ring);
156 * XIVE Thread Interrupt Management Area (TIMA)
159 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
160 hwaddr offset, uint64_t value, unsigned size)
162 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
165 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
166 hwaddr offset, unsigned size)
168 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
171 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
172 hwaddr offset, unsigned size)
174 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
175 uint32_t qw2w2;
177 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
178 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
179 return qw2w2;
182 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
183 uint64_t value, unsigned size)
185 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
188 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
189 hwaddr offset, unsigned size)
191 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
195 * Define an access map for each page of the TIMA that we will use in
196 * the memory region ops to filter values when doing loads and stores
197 * of raw registers values
199 * Registers accessibility bits :
201 * 0x0 - no access
202 * 0x1 - write only
203 * 0x2 - read only
204 * 0x3 - read/write
207 static const uint8_t xive_tm_hw_view[] = {
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
209 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
210 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
211 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
214 static const uint8_t xive_tm_hv_view[] = {
215 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
216 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
217 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
218 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
221 static const uint8_t xive_tm_os_view[] = {
222 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
223 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
224 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
225 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
228 static const uint8_t xive_tm_user_view[] = {
229 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
236 * Overall TIMA access map for the thread interrupt management context
237 * registers
239 static const uint8_t *xive_tm_views[] = {
240 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
241 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
242 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
243 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
247 * Computes a register access mask for a given offset in the TIMA
249 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
251 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
252 uint8_t reg_offset = offset & 0x3F;
253 uint8_t reg_mask = write ? 0x1 : 0x2;
254 uint64_t mask = 0x0;
255 int i;
257 for (i = 0; i < size; i++) {
258 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
259 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
263 return mask;
266 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
267 unsigned size)
269 uint8_t ring_offset = offset & 0x30;
270 uint8_t reg_offset = offset & 0x3F;
271 uint64_t mask = xive_tm_mask(offset, size, true);
272 int i;
275 * Only 4 or 8 bytes stores are allowed and the User ring is
276 * excluded
278 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
279 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
280 HWADDR_PRIx"\n", offset);
281 return;
285 * Use the register offset for the raw values and filter out
286 * reserved values
288 for (i = 0; i < size; i++) {
289 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
290 if (byte_mask) {
291 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
292 byte_mask;
297 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
299 uint8_t ring_offset = offset & 0x30;
300 uint8_t reg_offset = offset & 0x3F;
301 uint64_t mask = xive_tm_mask(offset, size, false);
302 uint64_t ret;
303 int i;
306 * Only 4 or 8 bytes loads are allowed and the User ring is
307 * excluded
309 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
310 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
311 HWADDR_PRIx"\n", offset);
312 return -1;
315 /* Use the register offset for the raw values */
316 ret = 0;
317 for (i = 0; i < size; i++) {
318 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
321 /* filter out reserved values */
322 return ret & mask;
326 * The TM context is mapped twice within each page. Stores and loads
327 * to the first mapping below 2K write and read the specified values
328 * without modification. The second mapping above 2K performs specific
329 * state changes (side effects) in addition to setting/returning the
330 * interrupt management area context of the processor thread.
332 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
333 hwaddr offset, unsigned size)
335 return xive_tctx_accept(tctx, TM_QW1_OS);
338 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
339 hwaddr offset, uint64_t value, unsigned size)
341 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
345 * Adjust the IPB to allow a CPU to process event queues of other
346 * priorities during one physical interrupt cycle.
348 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
349 hwaddr offset, uint64_t value, unsigned size)
351 xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
354 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
355 uint32_t *nvt_idx, bool *vo)
357 if (nvt_blk) {
358 *nvt_blk = xive_nvt_blk(cam);
360 if (nvt_idx) {
361 *nvt_idx = xive_nvt_idx(cam);
363 if (vo) {
364 *vo = !!(cam & TM_QW1W2_VO);
368 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
369 uint32_t *nvt_idx, bool *vo)
371 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
372 uint32_t cam = be32_to_cpu(qw1w2);
374 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
375 return qw1w2;
378 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
380 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
383 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
384 hwaddr offset, unsigned size)
386 uint32_t qw1w2;
387 uint32_t qw1w2_new;
388 uint8_t nvt_blk;
389 uint32_t nvt_idx;
390 bool vo;
392 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
394 if (!vo) {
395 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
396 nvt_blk, nvt_idx);
399 /* Invalidate CAM line */
400 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
401 xive_tctx_set_os_cam(tctx, qw1w2_new);
403 xive_tctx_reset_os_signal(tctx);
404 return qw1w2;
407 static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
408 uint8_t nvt_blk, uint32_t nvt_idx)
410 XiveNVT nvt;
411 uint8_t ipb;
414 * Grab the associated NVT to pull the pending bits, and merge
415 * them with the IPB of the thread interrupt context registers
417 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
418 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
419 nvt_blk, nvt_idx);
420 return;
423 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
425 if (ipb) {
426 /* Reset the NVT value */
427 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
428 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
431 * Always call xive_tctx_ipb_update(). Even if there were no
432 * escalation triggered, there could be a pending interrupt which
433 * was saved when the context was pulled and that we need to take
434 * into account by recalculating the PIPR (which is not
435 * saved/restored).
436 * It will also raise the External interrupt signal if needed.
438 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
442 * Updating the OS CAM line can trigger a resend of interrupt
444 static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
445 hwaddr offset, uint64_t value, unsigned size)
447 uint32_t cam = value;
448 uint32_t qw1w2 = cpu_to_be32(cam);
449 uint8_t nvt_blk;
450 uint32_t nvt_idx;
451 bool vo;
453 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
455 /* First update the registers */
456 xive_tctx_set_os_cam(tctx, qw1w2);
458 /* Check the interrupt pending bits */
459 if (vo) {
460 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
465 * Define a mapping of "special" operations depending on the TIMA page
466 * offset and the size of the operation.
468 typedef struct XiveTmOp {
469 uint8_t page_offset;
470 uint32_t op_offset;
471 unsigned size;
472 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
473 hwaddr offset,
474 uint64_t value, unsigned size);
475 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
476 unsigned size);
477 } XiveTmOp;
479 static const XiveTmOp xive_tm_operations[] = {
481 * MMIOs below 2K : raw values and special operations without side
482 * effects
484 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
485 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
486 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
487 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
488 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
490 /* MMIOs above 2K : special operations with side effects */
491 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
492 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
493 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
494 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
495 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
496 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
497 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
500 static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
502 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
503 uint32_t op_offset = offset & 0xFFF;
504 int i;
506 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
507 const XiveTmOp *xto = &xive_tm_operations[i];
509 /* Accesses done from a more privileged TIMA page is allowed */
510 if (xto->page_offset >= page_offset &&
511 xto->op_offset == op_offset &&
512 xto->size == size &&
513 ((write && xto->write_handler) || (!write && xto->read_handler))) {
514 return xto;
517 return NULL;
521 * TIMA MMIO handlers
523 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
524 uint64_t value, unsigned size)
526 const XiveTmOp *xto;
528 trace_xive_tctx_tm_write(offset, size, value);
531 * TODO: check V bit in Q[0-3]W2
535 * First, check for special operations in the 2K region
537 if (offset & 0x800) {
538 xto = xive_tm_find_op(offset, size, true);
539 if (!xto) {
540 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
541 "@%"HWADDR_PRIx"\n", offset);
542 } else {
543 xto->write_handler(xptr, tctx, offset, value, size);
545 return;
549 * Then, for special operations in the region below 2K.
551 xto = xive_tm_find_op(offset, size, true);
552 if (xto) {
553 xto->write_handler(xptr, tctx, offset, value, size);
554 return;
558 * Finish with raw access to the register values
560 xive_tm_raw_write(tctx, offset, value, size);
563 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
564 unsigned size)
566 const XiveTmOp *xto;
567 uint64_t ret;
570 * TODO: check V bit in Q[0-3]W2
574 * First, check for special operations in the 2K region
576 if (offset & 0x800) {
577 xto = xive_tm_find_op(offset, size, false);
578 if (!xto) {
579 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
580 "@%"HWADDR_PRIx"\n", offset);
581 return -1;
583 ret = xto->read_handler(xptr, tctx, offset, size);
584 goto out;
588 * Then, for special operations in the region below 2K.
590 xto = xive_tm_find_op(offset, size, false);
591 if (xto) {
592 ret = xto->read_handler(xptr, tctx, offset, size);
593 goto out;
597 * Finish with raw access to the register values
599 ret = xive_tm_raw_read(tctx, offset, size);
600 out:
601 trace_xive_tctx_tm_read(offset, size, ret);
602 return ret;
605 static char *xive_tctx_ring_print(uint8_t *ring)
607 uint32_t w2 = xive_tctx_word2(ring);
609 return g_strdup_printf("%02x %02x %02x %02x %02x "
610 "%02x %02x %02x %08x",
611 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
612 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
613 be32_to_cpu(w2));
616 static const char * const xive_tctx_ring_names[] = {
617 "USER", "OS", "POOL", "PHYS",
621 * kvm_irqchip_in_kernel() will cause the compiler to turn this
622 * info a nop if CONFIG_KVM isn't defined.
624 #define xive_in_kernel(xptr) \
625 (kvm_irqchip_in_kernel() && \
626 ({ \
627 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
628 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
631 void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
633 int cpu_index;
634 int i;
636 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
637 * are hot plugged or unplugged.
639 if (!tctx) {
640 return;
643 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
645 if (xive_in_kernel(tctx->xptr)) {
646 Error *local_err = NULL;
648 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
649 if (local_err) {
650 error_report_err(local_err);
651 return;
655 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
656 " W2\n", cpu_index);
658 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
659 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
660 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
661 xive_tctx_ring_names[i], s);
662 g_free(s);
666 void xive_tctx_reset(XiveTCTX *tctx)
668 memset(tctx->regs, 0, sizeof(tctx->regs));
670 /* Set some defaults */
671 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
672 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
673 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
676 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
677 * CPPR is first set.
679 tctx->regs[TM_QW1_OS + TM_PIPR] =
680 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
681 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
682 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
685 static void xive_tctx_realize(DeviceState *dev, Error **errp)
687 XiveTCTX *tctx = XIVE_TCTX(dev);
688 PowerPCCPU *cpu;
689 CPUPPCState *env;
691 assert(tctx->cs);
692 assert(tctx->xptr);
694 cpu = POWERPC_CPU(tctx->cs);
695 env = &cpu->env;
696 switch (PPC_INPUT(env)) {
697 case PPC_FLAGS_INPUT_POWER9:
698 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT);
699 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
700 break;
702 default:
703 error_setg(errp, "XIVE interrupt controller does not support "
704 "this CPU bus model");
705 return;
708 /* Connect the presenter to the VCPU (required for CPU hotplug) */
709 if (xive_in_kernel(tctx->xptr)) {
710 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
711 return;
716 static int vmstate_xive_tctx_pre_save(void *opaque)
718 XiveTCTX *tctx = XIVE_TCTX(opaque);
719 Error *local_err = NULL;
720 int ret;
722 if (xive_in_kernel(tctx->xptr)) {
723 ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
724 if (ret < 0) {
725 error_report_err(local_err);
726 return ret;
730 return 0;
733 static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
735 XiveTCTX *tctx = XIVE_TCTX(opaque);
736 Error *local_err = NULL;
737 int ret;
739 if (xive_in_kernel(tctx->xptr)) {
741 * Required for hotplugged CPU, for which the state comes
742 * after all states of the machine.
744 ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
745 if (ret < 0) {
746 error_report_err(local_err);
747 return ret;
751 return 0;
754 static const VMStateDescription vmstate_xive_tctx = {
755 .name = TYPE_XIVE_TCTX,
756 .version_id = 1,
757 .minimum_version_id = 1,
758 .pre_save = vmstate_xive_tctx_pre_save,
759 .post_load = vmstate_xive_tctx_post_load,
760 .fields = (VMStateField[]) {
761 VMSTATE_BUFFER(regs, XiveTCTX),
762 VMSTATE_END_OF_LIST()
766 static Property xive_tctx_properties[] = {
767 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
768 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
769 XivePresenter *),
770 DEFINE_PROP_END_OF_LIST(),
773 static void xive_tctx_class_init(ObjectClass *klass, void *data)
775 DeviceClass *dc = DEVICE_CLASS(klass);
777 dc->desc = "XIVE Interrupt Thread Context";
778 dc->realize = xive_tctx_realize;
779 dc->vmsd = &vmstate_xive_tctx;
780 device_class_set_props(dc, xive_tctx_properties);
782 * Reason: part of XIVE interrupt controller, needs to be wired up
783 * by xive_tctx_create().
785 dc->user_creatable = false;
788 static const TypeInfo xive_tctx_info = {
789 .name = TYPE_XIVE_TCTX,
790 .parent = TYPE_DEVICE,
791 .instance_size = sizeof(XiveTCTX),
792 .class_init = xive_tctx_class_init,
795 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
797 Object *obj;
799 obj = object_new(TYPE_XIVE_TCTX);
800 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
801 object_unref(obj);
802 object_property_set_link(obj, "cpu", cpu, &error_abort);
803 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
804 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
805 object_unparent(obj);
806 return NULL;
808 return obj;
811 void xive_tctx_destroy(XiveTCTX *tctx)
813 Object *obj = OBJECT(tctx);
815 object_unparent(obj);
819 * XIVE ESB helpers
822 uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
824 uint8_t old_pq = *pq & 0x3;
826 *pq &= ~0x3;
827 *pq |= value & 0x3;
829 return old_pq;
832 bool xive_esb_trigger(uint8_t *pq)
834 uint8_t old_pq = *pq & 0x3;
836 switch (old_pq) {
837 case XIVE_ESB_RESET:
838 xive_esb_set(pq, XIVE_ESB_PENDING);
839 return true;
840 case XIVE_ESB_PENDING:
841 case XIVE_ESB_QUEUED:
842 xive_esb_set(pq, XIVE_ESB_QUEUED);
843 return false;
844 case XIVE_ESB_OFF:
845 xive_esb_set(pq, XIVE_ESB_OFF);
846 return false;
847 default:
848 g_assert_not_reached();
852 bool xive_esb_eoi(uint8_t *pq)
854 uint8_t old_pq = *pq & 0x3;
856 switch (old_pq) {
857 case XIVE_ESB_RESET:
858 case XIVE_ESB_PENDING:
859 xive_esb_set(pq, XIVE_ESB_RESET);
860 return false;
861 case XIVE_ESB_QUEUED:
862 xive_esb_set(pq, XIVE_ESB_PENDING);
863 return true;
864 case XIVE_ESB_OFF:
865 xive_esb_set(pq, XIVE_ESB_OFF);
866 return false;
867 default:
868 g_assert_not_reached();
873 * XIVE Interrupt Source (or IVSE)
876 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
878 assert(srcno < xsrc->nr_irqs);
880 return xsrc->status[srcno] & 0x3;
883 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
885 assert(srcno < xsrc->nr_irqs);
887 return xive_esb_set(&xsrc->status[srcno], pq);
891 * Returns whether the event notification should be forwarded.
893 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
895 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
897 xive_source_set_asserted(xsrc, srcno, true);
899 switch (old_pq) {
900 case XIVE_ESB_RESET:
901 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
902 return true;
903 default:
904 return false;
909 * Sources can be configured with PQ offloading in which case the check
910 * on the PQ state bits of MSIs is disabled
912 static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno)
914 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) &&
915 !xive_source_irq_is_lsi(xsrc, srcno);
919 * Returns whether the event notification should be forwarded.
921 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
923 bool ret;
925 assert(srcno < xsrc->nr_irqs);
927 if (xive_source_esb_disabled(xsrc, srcno)) {
928 return true;
931 ret = xive_esb_trigger(&xsrc->status[srcno]);
933 if (xive_source_irq_is_lsi(xsrc, srcno) &&
934 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
935 qemu_log_mask(LOG_GUEST_ERROR,
936 "XIVE: queued an event on LSI IRQ %d\n", srcno);
939 return ret;
943 * Returns whether the event notification should be forwarded.
945 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
947 bool ret;
949 assert(srcno < xsrc->nr_irqs);
951 if (xive_source_esb_disabled(xsrc, srcno)) {
952 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno);
953 return false;
956 ret = xive_esb_eoi(&xsrc->status[srcno]);
959 * LSI sources do not set the Q bit but they can still be
960 * asserted, in which case we should forward a new event
961 * notification
963 if (xive_source_irq_is_lsi(xsrc, srcno) &&
964 xive_source_is_asserted(xsrc, srcno)) {
965 ret = xive_source_lsi_trigger(xsrc, srcno);
968 return ret;
972 * Forward the source event notification to the Router
974 static void xive_source_notify(XiveSource *xsrc, int srcno)
976 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
977 bool pq_checked = !xive_source_esb_disabled(xsrc, srcno);
979 if (xnc->notify) {
980 xnc->notify(xsrc->xive, srcno, pq_checked);
985 * In a two pages ESB MMIO setting, even page is the trigger page, odd
986 * page is for management
988 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
990 return !((addr >> shift) & 1);
993 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
995 return xive_source_esb_has_2page(xsrc) &&
996 addr_is_even(addr, xsrc->esb_shift - 1);
1000 * ESB MMIO loads
1001 * Trigger page Management/EOI page
1003 * ESB MMIO setting 2 pages 1 or 2 pages
1005 * 0x000 .. 0x3FF -1 EOI and return 0|1
1006 * 0x400 .. 0x7FF -1 EOI and return 0|1
1007 * 0x800 .. 0xBFF -1 return PQ
1008 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
1009 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
1010 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
1011 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
1013 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
1015 XiveSource *xsrc = XIVE_SOURCE(opaque);
1016 uint32_t offset = addr & 0xFFF;
1017 uint32_t srcno = addr >> xsrc->esb_shift;
1018 uint64_t ret = -1;
1020 /* In a two pages ESB MMIO setting, trigger page should not be read */
1021 if (xive_source_is_trigger_page(xsrc, addr)) {
1022 qemu_log_mask(LOG_GUEST_ERROR,
1023 "XIVE: invalid load on IRQ %d trigger page at "
1024 "0x%"HWADDR_PRIx"\n", srcno, addr);
1025 return -1;
1028 switch (offset) {
1029 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1030 ret = xive_source_esb_eoi(xsrc, srcno);
1032 /* Forward the source event notification for routing */
1033 if (ret) {
1034 xive_source_notify(xsrc, srcno);
1036 break;
1038 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1039 ret = xive_source_esb_get(xsrc, srcno);
1040 break;
1042 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1043 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1044 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1045 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1046 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1047 break;
1048 default:
1049 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
1050 offset);
1053 trace_xive_source_esb_read(addr, srcno, ret);
1055 return ret;
1059 * ESB MMIO stores
1060 * Trigger page Management/EOI page
1062 * ESB MMIO setting 2 pages 1 or 2 pages
1064 * 0x000 .. 0x3FF Trigger Trigger
1065 * 0x400 .. 0x7FF Trigger EOI
1066 * 0x800 .. 0xBFF Trigger undefined
1067 * 0xC00 .. 0xCFF Trigger PQ=00
1068 * 0xD00 .. 0xDFF Trigger PQ=01
1069 * 0xE00 .. 0xDFF Trigger PQ=10
1070 * 0xF00 .. 0xDFF Trigger PQ=11
1072 static void xive_source_esb_write(void *opaque, hwaddr addr,
1073 uint64_t value, unsigned size)
1075 XiveSource *xsrc = XIVE_SOURCE(opaque);
1076 uint32_t offset = addr & 0xFFF;
1077 uint32_t srcno = addr >> xsrc->esb_shift;
1078 bool notify = false;
1080 trace_xive_source_esb_write(addr, srcno, value);
1082 /* In a two pages ESB MMIO setting, trigger page only triggers */
1083 if (xive_source_is_trigger_page(xsrc, addr)) {
1084 notify = xive_source_esb_trigger(xsrc, srcno);
1085 goto out;
1088 switch (offset) {
1089 case 0 ... 0x3FF:
1090 notify = xive_source_esb_trigger(xsrc, srcno);
1091 break;
1093 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1094 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
1095 qemu_log_mask(LOG_GUEST_ERROR,
1096 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
1097 return;
1100 notify = xive_source_esb_eoi(xsrc, srcno);
1101 break;
1104 * This is an internal offset used to inject triggers when the PQ
1105 * state bits are not controlled locally. Such as for LSIs when
1106 * under ABT mode.
1108 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1109 notify = true;
1110 break;
1112 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1113 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1114 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1115 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1116 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1117 break;
1119 default:
1120 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
1121 offset);
1122 return;
1125 out:
1126 /* Forward the source event notification for routing */
1127 if (notify) {
1128 xive_source_notify(xsrc, srcno);
1132 static const MemoryRegionOps xive_source_esb_ops = {
1133 .read = xive_source_esb_read,
1134 .write = xive_source_esb_write,
1135 .endianness = DEVICE_BIG_ENDIAN,
1136 .valid = {
1137 .min_access_size = 8,
1138 .max_access_size = 8,
1140 .impl = {
1141 .min_access_size = 8,
1142 .max_access_size = 8,
1146 void xive_source_set_irq(void *opaque, int srcno, int val)
1148 XiveSource *xsrc = XIVE_SOURCE(opaque);
1149 bool notify = false;
1151 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1152 if (val) {
1153 notify = xive_source_lsi_trigger(xsrc, srcno);
1154 } else {
1155 xive_source_set_asserted(xsrc, srcno, false);
1157 } else {
1158 if (val) {
1159 notify = xive_source_esb_trigger(xsrc, srcno);
1163 /* Forward the source event notification for routing */
1164 if (notify) {
1165 xive_source_notify(xsrc, srcno);
1169 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1171 int i;
1173 for (i = 0; i < xsrc->nr_irqs; i++) {
1174 uint8_t pq = xive_source_esb_get(xsrc, i);
1176 if (pq == XIVE_ESB_OFF) {
1177 continue;
1180 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1181 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1182 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1183 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1184 xive_source_is_asserted(xsrc, i) ? 'A' : ' ');
1188 static void xive_source_reset(void *dev)
1190 XiveSource *xsrc = XIVE_SOURCE(dev);
1192 /* Do not clear the LSI bitmap */
1194 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
1195 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1198 static void xive_source_realize(DeviceState *dev, Error **errp)
1200 XiveSource *xsrc = XIVE_SOURCE(dev);
1201 size_t esb_len = xive_source_esb_len(xsrc);
1203 assert(xsrc->xive);
1205 if (!xsrc->nr_irqs) {
1206 error_setg(errp, "Number of interrupt needs to be greater than 0");
1207 return;
1210 if (xsrc->esb_shift != XIVE_ESB_4K &&
1211 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1212 xsrc->esb_shift != XIVE_ESB_64K &&
1213 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1214 error_setg(errp, "Invalid ESB shift setting");
1215 return;
1218 xsrc->status = g_malloc0(xsrc->nr_irqs);
1219 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1221 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
1222 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
1223 &xive_source_esb_ops, xsrc, "xive.esb-emulated",
1224 esb_len);
1225 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
1227 qemu_register_reset(xive_source_reset, dev);
1230 static const VMStateDescription vmstate_xive_source = {
1231 .name = TYPE_XIVE_SOURCE,
1232 .version_id = 1,
1233 .minimum_version_id = 1,
1234 .fields = (VMStateField[]) {
1235 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1236 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1237 VMSTATE_END_OF_LIST()
1242 * The default XIVE interrupt source setting for the ESB MMIOs is two
1243 * 64k pages without Store EOI, to be in sync with KVM.
1245 static Property xive_source_properties[] = {
1246 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1247 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1248 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1249 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
1250 XiveNotifier *),
1251 DEFINE_PROP_END_OF_LIST(),
1254 static void xive_source_class_init(ObjectClass *klass, void *data)
1256 DeviceClass *dc = DEVICE_CLASS(klass);
1258 dc->desc = "XIVE Interrupt Source";
1259 device_class_set_props(dc, xive_source_properties);
1260 dc->realize = xive_source_realize;
1261 dc->vmsd = &vmstate_xive_source;
1263 * Reason: part of XIVE interrupt controller, needs to be wired up,
1264 * e.g. by spapr_xive_instance_init().
1266 dc->user_creatable = false;
1269 static const TypeInfo xive_source_info = {
1270 .name = TYPE_XIVE_SOURCE,
1271 .parent = TYPE_DEVICE,
1272 .instance_size = sizeof(XiveSource),
1273 .class_init = xive_source_class_init,
1277 * XiveEND helpers
1280 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1282 uint64_t qaddr_base = xive_end_qaddr(end);
1283 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1284 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1285 uint32_t qentries = 1 << (qsize + 10);
1286 int i;
1289 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1291 monitor_printf(mon, " [ ");
1292 qindex = (qindex - (width - 1)) & (qentries - 1);
1293 for (i = 0; i < width; i++) {
1294 uint64_t qaddr = qaddr_base + (qindex << 2);
1295 uint32_t qdata = -1;
1297 if (dma_memory_read(&address_space_memory, qaddr,
1298 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1299 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1300 HWADDR_PRIx "\n", qaddr);
1301 return;
1303 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1304 be32_to_cpu(qdata));
1305 qindex = (qindex + 1) & (qentries - 1);
1307 monitor_printf(mon, "]");
1310 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1312 uint64_t qaddr_base = xive_end_qaddr(end);
1313 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1314 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1315 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1316 uint32_t qentries = 1 << (qsize + 10);
1318 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1319 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1320 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1321 uint8_t pq;
1323 if (!xive_end_is_valid(end)) {
1324 return;
1327 pq = xive_get_field32(END_W1_ESn, end->w1);
1329 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1330 end_idx,
1331 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1332 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1333 xive_end_is_valid(end) ? 'v' : '-',
1334 xive_end_is_enqueue(end) ? 'q' : '-',
1335 xive_end_is_notify(end) ? 'n' : '-',
1336 xive_end_is_backlog(end) ? 'b' : '-',
1337 xive_end_is_escalate(end) ? 'e' : '-',
1338 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1339 xive_end_is_silent_escalation(end) ? 's' : '-',
1340 xive_end_is_firmware(end) ? 'f' : '-',
1341 priority, nvt_blk, nvt_idx);
1343 if (qaddr_base) {
1344 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1345 qaddr_base, qindex, qentries, qgen);
1346 xive_end_queue_pic_print_info(end, 6, mon);
1348 monitor_printf(mon, "\n");
1351 static void xive_end_enqueue(XiveEND *end, uint32_t data)
1353 uint64_t qaddr_base = xive_end_qaddr(end);
1354 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1355 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1356 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1358 uint64_t qaddr = qaddr_base + (qindex << 2);
1359 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1360 uint32_t qentries = 1 << (qsize + 10);
1362 if (dma_memory_write(&address_space_memory, qaddr,
1363 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1364 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1365 HWADDR_PRIx "\n", qaddr);
1366 return;
1369 qindex = (qindex + 1) & (qentries - 1);
1370 if (qindex == 0) {
1371 qgen ^= 1;
1372 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1374 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1377 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
1378 Monitor *mon)
1380 XiveEAS *eas = (XiveEAS *) &end->w4;
1381 uint8_t pq;
1383 if (!xive_end_is_escalate(end)) {
1384 return;
1387 pq = xive_get_field32(END_W1_ESe, end->w1);
1389 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1390 end_idx,
1391 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1392 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1393 xive_eas_is_valid(eas) ? 'V' : ' ',
1394 xive_eas_is_masked(eas) ? 'M' : ' ',
1395 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1396 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1397 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1401 * XIVE Router (aka. Virtualization Controller or IVRE)
1404 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1405 XiveEAS *eas)
1407 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1409 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1412 static
1413 int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1414 uint8_t *pq)
1416 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1418 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
1421 static
1422 int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1423 uint8_t *pq)
1425 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1427 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
1430 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1431 XiveEND *end)
1433 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1435 return xrc->get_end(xrtr, end_blk, end_idx, end);
1438 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1439 XiveEND *end, uint8_t word_number)
1441 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1443 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1446 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1447 XiveNVT *nvt)
1449 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1451 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1454 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1455 XiveNVT *nvt, uint8_t word_number)
1457 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1459 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1462 static int xive_router_get_block_id(XiveRouter *xrtr)
1464 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1466 return xrc->get_block_id(xrtr);
1469 static void xive_router_realize(DeviceState *dev, Error **errp)
1471 XiveRouter *xrtr = XIVE_ROUTER(dev);
1473 assert(xrtr->xfb);
1477 * Encode the HW CAM line in the block group mode format :
1479 * chip << 19 | 0000000 0 0001 thread (7Bit)
1481 static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
1483 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1484 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1485 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
1487 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
1491 * The thread context register words are in big-endian format.
1493 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1494 uint8_t format,
1495 uint8_t nvt_blk, uint32_t nvt_idx,
1496 bool cam_ignore, uint32_t logic_serv)
1498 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1499 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1500 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1501 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1502 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1505 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1506 * identifier are ignored in the "CAM" match.
1509 if (format == 0) {
1510 if (cam_ignore == true) {
1512 * F=0 & i=1: Logical server notification (bits ignored at
1513 * the end of the NVT identifier)
1515 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1516 nvt_blk, nvt_idx);
1517 return -1;
1520 /* F=0 & i=0: Specific NVT notification */
1522 /* PHYS ring */
1523 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1524 cam == xive_tctx_hw_cam_line(xptr, tctx)) {
1525 return TM_QW3_HV_PHYS;
1528 /* HV POOL ring */
1529 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1530 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1531 return TM_QW2_HV_POOL;
1534 /* OS ring */
1535 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1536 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1537 return TM_QW1_OS;
1539 } else {
1540 /* F=1 : User level Event-Based Branch (EBB) notification */
1542 /* USER ring */
1543 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1544 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1545 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1546 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1547 return TM_QW0_USER;
1550 return -1;
1554 * This is our simple Xive Presenter Engine model. It is merged in the
1555 * Router as it does not require an extra object.
1557 * It receives notification requests sent by the IVRE to find one
1558 * matching NVT (or more) dispatched on the processor threads. In case
1559 * of a single NVT notification, the process is abreviated and the
1560 * thread is signaled if a match is found. In case of a logical server
1561 * notification (bits ignored at the end of the NVT identifier), the
1562 * IVPE and IVRE select a winning thread using different filters. This
1563 * involves 2 or 3 exchanges on the PowerBus that the model does not
1564 * support.
1566 * The parameters represent what is sent on the PowerBus
1568 bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
1569 uint8_t nvt_blk, uint32_t nvt_idx,
1570 bool cam_ignore, uint8_t priority,
1571 uint32_t logic_serv)
1573 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
1574 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1575 int count;
1578 * Ask the machine to scan the interrupt controllers for a match
1580 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
1581 priority, logic_serv, &match);
1582 if (count < 0) {
1583 return false;
1586 /* handle CPU exception delivery */
1587 if (count) {
1588 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
1589 xive_tctx_ipb_update(match.tctx, match.ring,
1590 xive_priority_to_ipb(priority));
1593 return !!count;
1597 * Notification using the END ESe/ESn bit (Event State Buffer for
1598 * escalation and notification). Provide further coalescing in the
1599 * Router.
1601 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1602 uint32_t end_idx, XiveEND *end,
1603 uint32_t end_esmask)
1605 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1606 bool notify = xive_esb_trigger(&pq);
1608 if (pq != xive_get_field32(end_esmask, end->w1)) {
1609 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1610 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1613 /* ESe/n[Q]=1 : end of notification */
1614 return notify;
1618 * An END trigger can come from an event trigger (IPI or HW) or from
1619 * another chip. We don't model the PowerBus but the END trigger
1620 * message has the same parameters than in the function below.
1622 static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1623 uint32_t end_idx, uint32_t end_data)
1625 XiveEND end;
1626 uint8_t priority;
1627 uint8_t format;
1628 uint8_t nvt_blk;
1629 uint32_t nvt_idx;
1630 XiveNVT nvt;
1631 bool found;
1633 /* END cache lookup */
1634 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1635 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1636 end_idx);
1637 return;
1640 if (!xive_end_is_valid(&end)) {
1641 trace_xive_router_end_notify(end_blk, end_idx, end_data);
1642 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1643 end_blk, end_idx);
1644 return;
1647 if (xive_end_is_enqueue(&end)) {
1648 xive_end_enqueue(&end, end_data);
1649 /* Enqueuing event data modifies the EQ toggle and index */
1650 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1654 * When the END is silent, we skip the notification part.
1656 if (xive_end_is_silent_escalation(&end)) {
1657 goto do_escalation;
1661 * The W7 format depends on the F bit in W6. It defines the type
1662 * of the notification :
1664 * F=0 : single or multiple NVT notification
1665 * F=1 : User level Event-Based Branch (EBB) notification, no
1666 * priority
1668 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1669 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1671 /* The END is masked */
1672 if (format == 0 && priority == 0xff) {
1673 return;
1677 * Check the END ESn (Event State Buffer for notification) for
1678 * even further coalescing in the Router
1680 if (!xive_end_is_notify(&end)) {
1681 /* ESn[Q]=1 : end of notification */
1682 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1683 &end, END_W1_ESn)) {
1684 return;
1689 * Follows IVPE notification
1691 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1692 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1694 /* NVT cache lookup */
1695 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1696 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1697 nvt_blk, nvt_idx);
1698 return;
1701 if (!xive_nvt_is_valid(&nvt)) {
1702 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1703 nvt_blk, nvt_idx);
1704 return;
1707 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
1708 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1709 priority,
1710 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1712 /* TODO: Auto EOI. */
1714 if (found) {
1715 return;
1719 * If no matching NVT is dispatched on a HW thread :
1720 * - specific VP: update the NVT structure if backlog is activated
1721 * - logical server : forward request to IVPE (not supported)
1723 if (xive_end_is_backlog(&end)) {
1724 uint8_t ipb;
1726 if (format == 1) {
1727 qemu_log_mask(LOG_GUEST_ERROR,
1728 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1729 end_blk, end_idx);
1730 return;
1733 * Record the IPB in the associated NVT structure for later
1734 * use. The presenter will resend the interrupt when the vCPU
1735 * is dispatched again on a HW thread.
1737 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) |
1738 xive_priority_to_ipb(priority);
1739 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
1740 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1743 * On HW, follows a "Broadcast Backlog" to IVPEs
1747 do_escalation:
1749 * If activated, escalate notification using the ESe PQ bits and
1750 * the EAS in w4-5
1752 if (!xive_end_is_escalate(&end)) {
1753 return;
1757 * Check the END ESe (Event State Buffer for escalation) for even
1758 * further coalescing in the Router
1760 if (!xive_end_is_uncond_escalation(&end)) {
1761 /* ESe[Q]=1 : end of notification */
1762 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1763 &end, END_W1_ESe)) {
1764 return;
1768 trace_xive_router_end_escalate(end_blk, end_idx,
1769 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1770 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1771 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1773 * The END trigger becomes an Escalation trigger
1775 xive_router_end_notify(xrtr,
1776 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1777 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1778 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1781 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
1783 XiveRouter *xrtr = XIVE_ROUTER(xn);
1784 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1785 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1786 XiveEAS eas;
1788 /* EAS cache lookup */
1789 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1790 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1791 return;
1794 if (!pq_checked) {
1795 bool notify;
1796 uint8_t pq;
1798 /* PQ cache lookup */
1799 if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1800 /* Set FIR */
1801 g_assert_not_reached();
1804 notify = xive_esb_trigger(&pq);
1806 if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1807 /* Set FIR */
1808 g_assert_not_reached();
1811 if (!notify) {
1812 return;
1816 if (!xive_eas_is_valid(&eas)) {
1817 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1818 return;
1821 if (xive_eas_is_masked(&eas)) {
1822 /* Notification completed */
1823 return;
1827 * The event trigger becomes an END trigger
1829 xive_router_end_notify(xrtr,
1830 xive_get_field64(EAS_END_BLOCK, eas.w),
1831 xive_get_field64(EAS_END_INDEX, eas.w),
1832 xive_get_field64(EAS_END_DATA, eas.w));
1835 static Property xive_router_properties[] = {
1836 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
1837 TYPE_XIVE_FABRIC, XiveFabric *),
1838 DEFINE_PROP_END_OF_LIST(),
1841 static void xive_router_class_init(ObjectClass *klass, void *data)
1843 DeviceClass *dc = DEVICE_CLASS(klass);
1844 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1846 dc->desc = "XIVE Router Engine";
1847 device_class_set_props(dc, xive_router_properties);
1848 /* Parent is SysBusDeviceClass. No need to call its realize hook */
1849 dc->realize = xive_router_realize;
1850 xnc->notify = xive_router_notify;
1853 static const TypeInfo xive_router_info = {
1854 .name = TYPE_XIVE_ROUTER,
1855 .parent = TYPE_SYS_BUS_DEVICE,
1856 .abstract = true,
1857 .instance_size = sizeof(XiveRouter),
1858 .class_size = sizeof(XiveRouterClass),
1859 .class_init = xive_router_class_init,
1860 .interfaces = (InterfaceInfo[]) {
1861 { TYPE_XIVE_NOTIFIER },
1862 { TYPE_XIVE_PRESENTER },
1867 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1869 if (!xive_eas_is_valid(eas)) {
1870 return;
1873 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1874 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1875 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1876 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1877 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1881 * END ESB MMIO loads
1883 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1885 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1886 uint32_t offset = addr & 0xFFF;
1887 uint8_t end_blk;
1888 uint32_t end_idx;
1889 XiveEND end;
1890 uint32_t end_esmask;
1891 uint8_t pq;
1892 uint64_t ret = -1;
1895 * The block id should be deduced from the load address on the END
1896 * ESB MMIO but our model only supports a single block per XIVE chip.
1898 end_blk = xive_router_get_block_id(xsrc->xrtr);
1899 end_idx = addr >> (xsrc->esb_shift + 1);
1901 trace_xive_end_source_read(end_blk, end_idx, addr);
1903 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1904 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1905 end_idx);
1906 return -1;
1909 if (!xive_end_is_valid(&end)) {
1910 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1911 end_blk, end_idx);
1912 return -1;
1915 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1916 pq = xive_get_field32(end_esmask, end.w1);
1918 switch (offset) {
1919 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1920 ret = xive_esb_eoi(&pq);
1922 /* Forward the source event notification for routing ?? */
1923 break;
1925 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1926 ret = pq;
1927 break;
1929 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1930 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1931 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1932 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1933 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1934 break;
1935 default:
1936 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1937 offset);
1938 return -1;
1941 if (pq != xive_get_field32(end_esmask, end.w1)) {
1942 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1943 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1946 return ret;
1950 * END ESB MMIO stores are invalid
1952 static void xive_end_source_write(void *opaque, hwaddr addr,
1953 uint64_t value, unsigned size)
1955 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1956 HWADDR_PRIx"\n", addr);
1959 static const MemoryRegionOps xive_end_source_ops = {
1960 .read = xive_end_source_read,
1961 .write = xive_end_source_write,
1962 .endianness = DEVICE_BIG_ENDIAN,
1963 .valid = {
1964 .min_access_size = 8,
1965 .max_access_size = 8,
1967 .impl = {
1968 .min_access_size = 8,
1969 .max_access_size = 8,
1973 static void xive_end_source_realize(DeviceState *dev, Error **errp)
1975 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1977 assert(xsrc->xrtr);
1979 if (!xsrc->nr_ends) {
1980 error_setg(errp, "Number of interrupt needs to be greater than 0");
1981 return;
1984 if (xsrc->esb_shift != XIVE_ESB_4K &&
1985 xsrc->esb_shift != XIVE_ESB_64K) {
1986 error_setg(errp, "Invalid ESB shift setting");
1987 return;
1991 * Each END is assigned an even/odd pair of MMIO pages, the even page
1992 * manages the ESn field while the odd page manages the ESe field.
1994 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1995 &xive_end_source_ops, xsrc, "xive.end",
1996 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1999 static Property xive_end_source_properties[] = {
2000 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
2001 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
2002 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
2003 XiveRouter *),
2004 DEFINE_PROP_END_OF_LIST(),
2007 static void xive_end_source_class_init(ObjectClass *klass, void *data)
2009 DeviceClass *dc = DEVICE_CLASS(klass);
2011 dc->desc = "XIVE END Source";
2012 device_class_set_props(dc, xive_end_source_properties);
2013 dc->realize = xive_end_source_realize;
2015 * Reason: part of XIVE interrupt controller, needs to be wired up,
2016 * e.g. by spapr_xive_instance_init().
2018 dc->user_creatable = false;
2021 static const TypeInfo xive_end_source_info = {
2022 .name = TYPE_XIVE_END_SOURCE,
2023 .parent = TYPE_DEVICE,
2024 .instance_size = sizeof(XiveENDSource),
2025 .class_init = xive_end_source_class_init,
2029 * XIVE Notifier
2031 static const TypeInfo xive_notifier_info = {
2032 .name = TYPE_XIVE_NOTIFIER,
2033 .parent = TYPE_INTERFACE,
2034 .class_size = sizeof(XiveNotifierClass),
2038 * XIVE Presenter
2040 static const TypeInfo xive_presenter_info = {
2041 .name = TYPE_XIVE_PRESENTER,
2042 .parent = TYPE_INTERFACE,
2043 .class_size = sizeof(XivePresenterClass),
2047 * XIVE Fabric
2049 static const TypeInfo xive_fabric_info = {
2050 .name = TYPE_XIVE_FABRIC,
2051 .parent = TYPE_INTERFACE,
2052 .class_size = sizeof(XiveFabricClass),
2055 static void xive_register_types(void)
2057 type_register_static(&xive_fabric_info);
2058 type_register_static(&xive_source_info);
2059 type_register_static(&xive_notifier_info);
2060 type_register_static(&xive_presenter_info);
2061 type_register_static(&xive_router_info);
2062 type_register_static(&xive_end_source_info);
2063 type_register_static(&xive_tctx_info);
2066 type_init(xive_register_types)