Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[qemu/kevin.git] / hw / intc / xive.c
blob70f11f993bcd3fbc5758ab9fbdeb525c04d7d2aa
1 /*
2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "hw/qdev-properties.h"
19 #include "migration/vmstate.h"
20 #include "hw/irq.h"
21 #include "hw/ppc/xive.h"
22 #include "hw/ppc/xive2.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "trace.h"
27 * XIVE Thread Interrupt Management context
31 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
32 * Interrupt Priority Register (PIPR), which contains the priority of
33 * the most favored pending notification.
35 static uint8_t ipb_to_pipr(uint8_t ibp)
37 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
40 static uint8_t exception_mask(uint8_t ring)
42 switch (ring) {
43 case TM_QW1_OS:
44 return TM_QW1_NSR_EO;
45 case TM_QW3_HV_PHYS:
46 return TM_QW3_NSR_HE;
47 default:
48 g_assert_not_reached();
52 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
54 switch (ring) {
55 case TM_QW0_USER:
56 return 0; /* Not supported */
57 case TM_QW1_OS:
58 return tctx->os_output;
59 case TM_QW2_HV_POOL:
60 case TM_QW3_HV_PHYS:
61 return tctx->hv_output;
62 default:
63 return 0;
67 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
69 uint8_t *regs = &tctx->regs[ring];
70 uint8_t nsr = regs[TM_NSR];
71 uint8_t mask = exception_mask(ring);
73 qemu_irq_lower(xive_tctx_output(tctx, ring));
75 if (regs[TM_NSR] & mask) {
76 uint8_t cppr = regs[TM_PIPR];
78 regs[TM_CPPR] = cppr;
80 /* Reset the pending buffer bit */
81 regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
82 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
84 /* Drop Exception bit */
85 regs[TM_NSR] &= ~mask;
87 trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
88 regs[TM_IPB], regs[TM_PIPR],
89 regs[TM_CPPR], regs[TM_NSR]);
92 return (nsr << 8) | regs[TM_CPPR];
95 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
97 uint8_t *regs = &tctx->regs[ring];
99 if (regs[TM_PIPR] < regs[TM_CPPR]) {
100 switch (ring) {
101 case TM_QW1_OS:
102 regs[TM_NSR] |= TM_QW1_NSR_EO;
103 break;
104 case TM_QW3_HV_PHYS:
105 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
106 break;
107 default:
108 g_assert_not_reached();
110 trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
111 regs[TM_IPB], regs[TM_PIPR],
112 regs[TM_CPPR], regs[TM_NSR]);
113 qemu_irq_raise(xive_tctx_output(tctx, ring));
117 void xive_tctx_reset_os_signal(XiveTCTX *tctx)
120 * Lower the External interrupt. Used when pulling an OS
121 * context. It is necessary to avoid catching it in the hypervisor
122 * context. It should be raised again when re-pushing the OS
123 * context.
125 qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS));
128 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
130 uint8_t *regs = &tctx->regs[ring];
132 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
133 regs[TM_IPB], regs[TM_PIPR],
134 cppr, regs[TM_NSR]);
136 if (cppr > XIVE_PRIORITY_MAX) {
137 cppr = 0xff;
140 tctx->regs[ring + TM_CPPR] = cppr;
142 /* CPPR has changed, check if we need to raise a pending exception */
143 xive_tctx_notify(tctx, ring);
146 void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
148 uint8_t *regs = &tctx->regs[ring];
150 regs[TM_IPB] |= ipb;
151 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
152 xive_tctx_notify(tctx, ring);
156 * XIVE Thread Interrupt Management Area (TIMA)
159 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
160 hwaddr offset, uint64_t value, unsigned size)
162 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
165 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
166 hwaddr offset, unsigned size)
168 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
171 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
172 hwaddr offset, unsigned size)
174 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
175 uint32_t qw2w2;
177 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
178 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
179 return qw2w2;
182 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
183 uint64_t value, unsigned size)
185 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
188 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
189 hwaddr offset, unsigned size)
191 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
195 * Define an access map for each page of the TIMA that we will use in
196 * the memory region ops to filter values when doing loads and stores
197 * of raw registers values
199 * Registers accessibility bits :
201 * 0x0 - no access
202 * 0x1 - write only
203 * 0x2 - read only
204 * 0x3 - read/write
207 static const uint8_t xive_tm_hw_view[] = {
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
209 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
210 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
211 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
214 static const uint8_t xive_tm_hv_view[] = {
215 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
216 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
217 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
218 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
221 static const uint8_t xive_tm_os_view[] = {
222 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
223 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
224 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
225 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
228 static const uint8_t xive_tm_user_view[] = {
229 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
236 * Overall TIMA access map for the thread interrupt management context
237 * registers
239 static const uint8_t *xive_tm_views[] = {
240 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
241 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
242 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
243 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
247 * Computes a register access mask for a given offset in the TIMA
249 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
251 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
252 uint8_t reg_offset = offset & TM_REG_OFFSET;
253 uint8_t reg_mask = write ? 0x1 : 0x2;
254 uint64_t mask = 0x0;
255 int i;
257 for (i = 0; i < size; i++) {
258 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
259 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
263 return mask;
266 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
267 unsigned size)
269 uint8_t ring_offset = offset & TM_RING_OFFSET;
270 uint8_t reg_offset = offset & TM_REG_OFFSET;
271 uint64_t mask = xive_tm_mask(offset, size, true);
272 int i;
275 * Only 4 or 8 bytes stores are allowed and the User ring is
276 * excluded
278 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
279 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
280 HWADDR_PRIx"\n", offset);
281 return;
285 * Use the register offset for the raw values and filter out
286 * reserved values
288 for (i = 0; i < size; i++) {
289 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
290 if (byte_mask) {
291 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
292 byte_mask;
297 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
299 uint8_t ring_offset = offset & TM_RING_OFFSET;
300 uint8_t reg_offset = offset & TM_REG_OFFSET;
301 uint64_t mask = xive_tm_mask(offset, size, false);
302 uint64_t ret;
303 int i;
306 * Only 4 or 8 bytes loads are allowed and the User ring is
307 * excluded
309 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
310 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
311 HWADDR_PRIx"\n", offset);
312 return -1;
315 /* Use the register offset for the raw values */
316 ret = 0;
317 for (i = 0; i < size; i++) {
318 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
321 /* filter out reserved values */
322 return ret & mask;
326 * The TM context is mapped twice within each page. Stores and loads
327 * to the first mapping below 2K write and read the specified values
328 * without modification. The second mapping above 2K performs specific
329 * state changes (side effects) in addition to setting/returning the
330 * interrupt management area context of the processor thread.
332 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
333 hwaddr offset, unsigned size)
335 return xive_tctx_accept(tctx, TM_QW1_OS);
338 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
339 hwaddr offset, uint64_t value, unsigned size)
341 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
345 * Adjust the IPB to allow a CPU to process event queues of other
346 * priorities during one physical interrupt cycle.
348 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
349 hwaddr offset, uint64_t value, unsigned size)
351 xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
354 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
355 uint32_t *nvt_idx, bool *vo)
357 if (nvt_blk) {
358 *nvt_blk = xive_nvt_blk(cam);
360 if (nvt_idx) {
361 *nvt_idx = xive_nvt_idx(cam);
363 if (vo) {
364 *vo = !!(cam & TM_QW1W2_VO);
368 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
369 uint32_t *nvt_idx, bool *vo)
371 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
372 uint32_t cam = be32_to_cpu(qw1w2);
374 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
375 return qw1w2;
378 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
380 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
383 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
384 hwaddr offset, unsigned size)
386 uint32_t qw1w2;
387 uint32_t qw1w2_new;
388 uint8_t nvt_blk;
389 uint32_t nvt_idx;
390 bool vo;
392 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
394 if (!vo) {
395 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
396 nvt_blk, nvt_idx);
399 /* Invalidate CAM line */
400 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
401 xive_tctx_set_os_cam(tctx, qw1w2_new);
403 xive_tctx_reset_os_signal(tctx);
404 return qw1w2;
407 static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
408 uint8_t nvt_blk, uint32_t nvt_idx)
410 XiveNVT nvt;
411 uint8_t ipb;
414 * Grab the associated NVT to pull the pending bits, and merge
415 * them with the IPB of the thread interrupt context registers
417 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
418 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
419 nvt_blk, nvt_idx);
420 return;
423 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
425 if (ipb) {
426 /* Reset the NVT value */
427 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
428 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
431 * Always call xive_tctx_ipb_update(). Even if there were no
432 * escalation triggered, there could be a pending interrupt which
433 * was saved when the context was pulled and that we need to take
434 * into account by recalculating the PIPR (which is not
435 * saved/restored).
436 * It will also raise the External interrupt signal if needed.
438 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
442 * Updating the OS CAM line can trigger a resend of interrupt
444 static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
445 hwaddr offset, uint64_t value, unsigned size)
447 uint32_t cam = value;
448 uint32_t qw1w2 = cpu_to_be32(cam);
449 uint8_t nvt_blk;
450 uint32_t nvt_idx;
451 bool vo;
453 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
455 /* First update the registers */
456 xive_tctx_set_os_cam(tctx, qw1w2);
458 /* Check the interrupt pending bits */
459 if (vo) {
460 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
464 static uint32_t xive_presenter_get_config(XivePresenter *xptr)
466 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
468 return xpc->get_config(xptr);
472 * Define a mapping of "special" operations depending on the TIMA page
473 * offset and the size of the operation.
475 typedef struct XiveTmOp {
476 uint8_t page_offset;
477 uint32_t op_offset;
478 unsigned size;
479 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
480 hwaddr offset,
481 uint64_t value, unsigned size);
482 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
483 unsigned size);
484 } XiveTmOp;
486 static const XiveTmOp xive_tm_operations[] = {
488 * MMIOs below 2K : raw values and special operations without side
489 * effects
491 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
492 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
493 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
494 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
495 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
497 /* MMIOs above 2K : special operations with side effects */
498 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
499 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
500 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
501 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
502 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
503 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
504 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
507 static const XiveTmOp xive2_tm_operations[] = {
509 * MMIOs below 2K : raw values and special operations without side
510 * effects
512 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
513 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL },
514 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
515 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
516 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
518 /* MMIOs above 2K : special operations with side effects */
519 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
520 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
521 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx },
522 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx },
523 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
524 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
525 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
528 static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
529 unsigned size, bool write)
531 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
532 uint32_t op_offset = offset & TM_ADDRESS_MASK;
533 const XiveTmOp *tm_ops;
534 int i, tm_ops_count;
535 uint32_t cfg;
537 cfg = xive_presenter_get_config(xptr);
538 if (cfg & XIVE_PRESENTER_GEN1_TIMA_OS) {
539 tm_ops = xive_tm_operations;
540 tm_ops_count = ARRAY_SIZE(xive_tm_operations);
541 } else {
542 tm_ops = xive2_tm_operations;
543 tm_ops_count = ARRAY_SIZE(xive2_tm_operations);
546 for (i = 0; i < tm_ops_count; i++) {
547 const XiveTmOp *xto = &tm_ops[i];
549 /* Accesses done from a more privileged TIMA page is allowed */
550 if (xto->page_offset >= page_offset &&
551 xto->op_offset == op_offset &&
552 xto->size == size &&
553 ((write && xto->write_handler) || (!write && xto->read_handler))) {
554 return xto;
557 return NULL;
561 * TIMA MMIO handlers
563 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
564 uint64_t value, unsigned size)
566 const XiveTmOp *xto;
568 trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value);
571 * TODO: check V bit in Q[0-3]W2
575 * First, check for special operations in the 2K region
577 if (offset & TM_SPECIAL_OP) {
578 xto = xive_tm_find_op(tctx->xptr, offset, size, true);
579 if (!xto) {
580 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
581 "@%"HWADDR_PRIx"\n", offset);
582 } else {
583 xto->write_handler(xptr, tctx, offset, value, size);
585 return;
589 * Then, for special operations in the region below 2K.
591 xto = xive_tm_find_op(tctx->xptr, offset, size, true);
592 if (xto) {
593 xto->write_handler(xptr, tctx, offset, value, size);
594 return;
598 * Finish with raw access to the register values
600 xive_tm_raw_write(tctx, offset, value, size);
603 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
604 unsigned size)
606 const XiveTmOp *xto;
607 uint64_t ret;
610 * TODO: check V bit in Q[0-3]W2
614 * First, check for special operations in the 2K region
616 if (offset & TM_SPECIAL_OP) {
617 xto = xive_tm_find_op(tctx->xptr, offset, size, false);
618 if (!xto) {
619 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
620 "@%"HWADDR_PRIx"\n", offset);
621 return -1;
623 ret = xto->read_handler(xptr, tctx, offset, size);
624 goto out;
628 * Then, for special operations in the region below 2K.
630 xto = xive_tm_find_op(tctx->xptr, offset, size, false);
631 if (xto) {
632 ret = xto->read_handler(xptr, tctx, offset, size);
633 goto out;
637 * Finish with raw access to the register values
639 ret = xive_tm_raw_read(tctx, offset, size);
640 out:
641 trace_xive_tctx_tm_read(tctx->cs->cpu_index, offset, size, ret);
642 return ret;
645 static char *xive_tctx_ring_print(uint8_t *ring)
647 uint32_t w2 = xive_tctx_word2(ring);
649 return g_strdup_printf("%02x %02x %02x %02x %02x "
650 "%02x %02x %02x %08x",
651 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
652 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
653 be32_to_cpu(w2));
656 static const char * const xive_tctx_ring_names[] = {
657 "USER", "OS", "POOL", "PHYS",
661 * kvm_irqchip_in_kernel() will cause the compiler to turn this
662 * info a nop if CONFIG_KVM isn't defined.
664 #define xive_in_kernel(xptr) \
665 (kvm_irqchip_in_kernel() && \
666 ({ \
667 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
668 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
671 void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf)
673 int cpu_index;
674 int i;
676 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
677 * are hot plugged or unplugged.
679 if (!tctx) {
680 return;
683 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
685 if (xive_in_kernel(tctx->xptr)) {
686 Error *local_err = NULL;
688 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
689 if (local_err) {
690 error_report_err(local_err);
691 return;
695 g_string_append_printf(buf, "CPU[%04x]: "
696 "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR W2\n",
697 cpu_index);
699 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
700 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
701 g_string_append_printf(buf, "CPU[%04x]: %4s %s\n",
702 cpu_index, xive_tctx_ring_names[i], s);
703 g_free(s);
707 void xive_tctx_reset(XiveTCTX *tctx)
709 memset(tctx->regs, 0, sizeof(tctx->regs));
711 /* Set some defaults */
712 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
713 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
714 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
717 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
718 * CPPR is first set.
720 tctx->regs[TM_QW1_OS + TM_PIPR] =
721 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
722 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
723 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
726 static void xive_tctx_realize(DeviceState *dev, Error **errp)
728 XiveTCTX *tctx = XIVE_TCTX(dev);
729 PowerPCCPU *cpu;
730 CPUPPCState *env;
732 assert(tctx->cs);
733 assert(tctx->xptr);
735 cpu = POWERPC_CPU(tctx->cs);
736 env = &cpu->env;
737 switch (PPC_INPUT(env)) {
738 case PPC_FLAGS_INPUT_POWER9:
739 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT);
740 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
741 break;
743 default:
744 error_setg(errp, "XIVE interrupt controller does not support "
745 "this CPU bus model");
746 return;
749 /* Connect the presenter to the VCPU (required for CPU hotplug) */
750 if (xive_in_kernel(tctx->xptr)) {
751 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
752 return;
757 static int vmstate_xive_tctx_pre_save(void *opaque)
759 XiveTCTX *tctx = XIVE_TCTX(opaque);
760 Error *local_err = NULL;
761 int ret;
763 if (xive_in_kernel(tctx->xptr)) {
764 ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
765 if (ret < 0) {
766 error_report_err(local_err);
767 return ret;
771 return 0;
774 static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
776 XiveTCTX *tctx = XIVE_TCTX(opaque);
777 Error *local_err = NULL;
778 int ret;
780 if (xive_in_kernel(tctx->xptr)) {
782 * Required for hotplugged CPU, for which the state comes
783 * after all states of the machine.
785 ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
786 if (ret < 0) {
787 error_report_err(local_err);
788 return ret;
792 return 0;
795 static const VMStateDescription vmstate_xive_tctx = {
796 .name = TYPE_XIVE_TCTX,
797 .version_id = 1,
798 .minimum_version_id = 1,
799 .pre_save = vmstate_xive_tctx_pre_save,
800 .post_load = vmstate_xive_tctx_post_load,
801 .fields = (const VMStateField[]) {
802 VMSTATE_BUFFER(regs, XiveTCTX),
803 VMSTATE_END_OF_LIST()
807 static Property xive_tctx_properties[] = {
808 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
809 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
810 XivePresenter *),
811 DEFINE_PROP_END_OF_LIST(),
814 static void xive_tctx_class_init(ObjectClass *klass, void *data)
816 DeviceClass *dc = DEVICE_CLASS(klass);
818 dc->desc = "XIVE Interrupt Thread Context";
819 dc->realize = xive_tctx_realize;
820 dc->vmsd = &vmstate_xive_tctx;
821 device_class_set_props(dc, xive_tctx_properties);
823 * Reason: part of XIVE interrupt controller, needs to be wired up
824 * by xive_tctx_create().
826 dc->user_creatable = false;
829 static const TypeInfo xive_tctx_info = {
830 .name = TYPE_XIVE_TCTX,
831 .parent = TYPE_DEVICE,
832 .instance_size = sizeof(XiveTCTX),
833 .class_init = xive_tctx_class_init,
836 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
838 Object *obj;
840 obj = object_new(TYPE_XIVE_TCTX);
841 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
842 object_unref(obj);
843 object_property_set_link(obj, "cpu", cpu, &error_abort);
844 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
845 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
846 object_unparent(obj);
847 return NULL;
849 return obj;
852 void xive_tctx_destroy(XiveTCTX *tctx)
854 Object *obj = OBJECT(tctx);
856 object_unparent(obj);
860 * XIVE ESB helpers
863 uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
865 uint8_t old_pq = *pq & 0x3;
867 *pq &= ~0x3;
868 *pq |= value & 0x3;
870 return old_pq;
873 bool xive_esb_trigger(uint8_t *pq)
875 uint8_t old_pq = *pq & 0x3;
877 switch (old_pq) {
878 case XIVE_ESB_RESET:
879 xive_esb_set(pq, XIVE_ESB_PENDING);
880 return true;
881 case XIVE_ESB_PENDING:
882 case XIVE_ESB_QUEUED:
883 xive_esb_set(pq, XIVE_ESB_QUEUED);
884 return false;
885 case XIVE_ESB_OFF:
886 xive_esb_set(pq, XIVE_ESB_OFF);
887 return false;
888 default:
889 g_assert_not_reached();
893 bool xive_esb_eoi(uint8_t *pq)
895 uint8_t old_pq = *pq & 0x3;
897 switch (old_pq) {
898 case XIVE_ESB_RESET:
899 case XIVE_ESB_PENDING:
900 xive_esb_set(pq, XIVE_ESB_RESET);
901 return false;
902 case XIVE_ESB_QUEUED:
903 xive_esb_set(pq, XIVE_ESB_PENDING);
904 return true;
905 case XIVE_ESB_OFF:
906 xive_esb_set(pq, XIVE_ESB_OFF);
907 return false;
908 default:
909 g_assert_not_reached();
914 * XIVE Interrupt Source (or IVSE)
917 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
919 assert(srcno < xsrc->nr_irqs);
921 return xsrc->status[srcno] & 0x3;
924 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
926 assert(srcno < xsrc->nr_irqs);
928 return xive_esb_set(&xsrc->status[srcno], pq);
932 * Returns whether the event notification should be forwarded.
934 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
936 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
938 xive_source_set_asserted(xsrc, srcno, true);
940 switch (old_pq) {
941 case XIVE_ESB_RESET:
942 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
943 return true;
944 default:
945 return false;
950 * Sources can be configured with PQ offloading in which case the check
951 * on the PQ state bits of MSIs is disabled
953 static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno)
955 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) &&
956 !xive_source_irq_is_lsi(xsrc, srcno);
960 * Returns whether the event notification should be forwarded.
962 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
964 bool ret;
966 assert(srcno < xsrc->nr_irqs);
968 if (xive_source_esb_disabled(xsrc, srcno)) {
969 return true;
972 ret = xive_esb_trigger(&xsrc->status[srcno]);
974 if (xive_source_irq_is_lsi(xsrc, srcno) &&
975 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
976 qemu_log_mask(LOG_GUEST_ERROR,
977 "XIVE: queued an event on LSI IRQ %d\n", srcno);
980 return ret;
984 * Returns whether the event notification should be forwarded.
986 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
988 bool ret;
990 assert(srcno < xsrc->nr_irqs);
992 if (xive_source_esb_disabled(xsrc, srcno)) {
993 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno);
994 return false;
997 ret = xive_esb_eoi(&xsrc->status[srcno]);
1000 * LSI sources do not set the Q bit but they can still be
1001 * asserted, in which case we should forward a new event
1002 * notification
1004 if (xive_source_irq_is_lsi(xsrc, srcno) &&
1005 xive_source_is_asserted(xsrc, srcno)) {
1006 ret = xive_source_lsi_trigger(xsrc, srcno);
1009 return ret;
1013 * Forward the source event notification to the Router
1015 static void xive_source_notify(XiveSource *xsrc, int srcno)
1017 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
1018 bool pq_checked = !xive_source_esb_disabled(xsrc, srcno);
1020 if (xnc->notify) {
1021 xnc->notify(xsrc->xive, srcno, pq_checked);
1026 * In a two pages ESB MMIO setting, even page is the trigger page, odd
1027 * page is for management
1029 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1031 return !((addr >> shift) & 1);
1034 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
1036 return xive_source_esb_has_2page(xsrc) &&
1037 addr_is_even(addr, xsrc->esb_shift - 1);
1041 * ESB MMIO loads
1042 * Trigger page Management/EOI page
1044 * ESB MMIO setting 2 pages 1 or 2 pages
1046 * 0x000 .. 0x3FF -1 EOI and return 0|1
1047 * 0x400 .. 0x7FF -1 EOI and return 0|1
1048 * 0x800 .. 0xBFF -1 return PQ
1049 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
1050 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
1051 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
1052 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
1054 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
1056 XiveSource *xsrc = XIVE_SOURCE(opaque);
1057 uint32_t offset = addr & 0xFFF;
1058 uint32_t srcno = addr >> xsrc->esb_shift;
1059 uint64_t ret = -1;
1061 /* In a two pages ESB MMIO setting, trigger page should not be read */
1062 if (xive_source_is_trigger_page(xsrc, addr)) {
1063 qemu_log_mask(LOG_GUEST_ERROR,
1064 "XIVE: invalid load on IRQ %d trigger page at "
1065 "0x%"HWADDR_PRIx"\n", srcno, addr);
1066 return -1;
1069 switch (offset) {
1070 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1071 ret = xive_source_esb_eoi(xsrc, srcno);
1073 /* Forward the source event notification for routing */
1074 if (ret) {
1075 xive_source_notify(xsrc, srcno);
1077 break;
1079 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1080 ret = xive_source_esb_get(xsrc, srcno);
1081 break;
1083 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1084 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1085 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1086 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1087 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1088 break;
1089 default:
1090 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
1091 offset);
1094 trace_xive_source_esb_read(addr, srcno, ret);
1096 return ret;
1100 * ESB MMIO stores
1101 * Trigger page Management/EOI page
1103 * ESB MMIO setting 2 pages 1 or 2 pages
1105 * 0x000 .. 0x3FF Trigger Trigger
1106 * 0x400 .. 0x7FF Trigger EOI
1107 * 0x800 .. 0xBFF Trigger undefined
1108 * 0xC00 .. 0xCFF Trigger PQ=00
1109 * 0xD00 .. 0xDFF Trigger PQ=01
1110 * 0xE00 .. 0xDFF Trigger PQ=10
1111 * 0xF00 .. 0xDFF Trigger PQ=11
1113 static void xive_source_esb_write(void *opaque, hwaddr addr,
1114 uint64_t value, unsigned size)
1116 XiveSource *xsrc = XIVE_SOURCE(opaque);
1117 uint32_t offset = addr & 0xFFF;
1118 uint32_t srcno = addr >> xsrc->esb_shift;
1119 bool notify = false;
1121 trace_xive_source_esb_write(addr, srcno, value);
1123 /* In a two pages ESB MMIO setting, trigger page only triggers */
1124 if (xive_source_is_trigger_page(xsrc, addr)) {
1125 notify = xive_source_esb_trigger(xsrc, srcno);
1126 goto out;
1129 switch (offset) {
1130 case 0 ... 0x3FF:
1131 notify = xive_source_esb_trigger(xsrc, srcno);
1132 break;
1134 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1135 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
1136 qemu_log_mask(LOG_GUEST_ERROR,
1137 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
1138 return;
1141 notify = xive_source_esb_eoi(xsrc, srcno);
1142 break;
1145 * This is an internal offset used to inject triggers when the PQ
1146 * state bits are not controlled locally. Such as for LSIs when
1147 * under ABT mode.
1149 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1150 notify = true;
1151 break;
1153 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1154 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1155 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1156 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1157 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1158 break;
1160 default:
1161 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
1162 offset);
1163 return;
1166 out:
1167 /* Forward the source event notification for routing */
1168 if (notify) {
1169 xive_source_notify(xsrc, srcno);
1173 static const MemoryRegionOps xive_source_esb_ops = {
1174 .read = xive_source_esb_read,
1175 .write = xive_source_esb_write,
1176 .endianness = DEVICE_BIG_ENDIAN,
1177 .valid = {
1178 .min_access_size = 1,
1179 .max_access_size = 8,
1181 .impl = {
1182 .min_access_size = 1,
1183 .max_access_size = 8,
1187 void xive_source_set_irq(void *opaque, int srcno, int val)
1189 XiveSource *xsrc = XIVE_SOURCE(opaque);
1190 bool notify = false;
1192 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1193 if (val) {
1194 notify = xive_source_lsi_trigger(xsrc, srcno);
1195 } else {
1196 xive_source_set_asserted(xsrc, srcno, false);
1198 } else {
1199 if (val) {
1200 notify = xive_source_esb_trigger(xsrc, srcno);
1204 /* Forward the source event notification for routing */
1205 if (notify) {
1206 xive_source_notify(xsrc, srcno);
1210 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, GString *buf)
1212 for (unsigned i = 0; i < xsrc->nr_irqs; i++) {
1213 uint8_t pq = xive_source_esb_get(xsrc, i);
1215 if (pq == XIVE_ESB_OFF) {
1216 continue;
1219 g_string_append_printf(buf, " %08x %s %c%c%c\n", i + offset,
1220 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1221 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1222 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1223 xive_source_is_asserted(xsrc, i) ? 'A' : ' ');
1227 static void xive_source_reset(void *dev)
1229 XiveSource *xsrc = XIVE_SOURCE(dev);
1231 /* Do not clear the LSI bitmap */
1233 memset(xsrc->status, xsrc->reset_pq, xsrc->nr_irqs);
1236 static void xive_source_realize(DeviceState *dev, Error **errp)
1238 XiveSource *xsrc = XIVE_SOURCE(dev);
1239 size_t esb_len = xive_source_esb_len(xsrc);
1241 assert(xsrc->xive);
1243 if (!xsrc->nr_irqs) {
1244 error_setg(errp, "Number of interrupt needs to be greater than 0");
1245 return;
1248 if (xsrc->esb_shift != XIVE_ESB_4K &&
1249 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1250 xsrc->esb_shift != XIVE_ESB_64K &&
1251 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1252 error_setg(errp, "Invalid ESB shift setting");
1253 return;
1256 xsrc->status = g_malloc0(xsrc->nr_irqs);
1257 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1259 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
1260 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
1261 &xive_source_esb_ops, xsrc, "xive.esb-emulated",
1262 esb_len);
1263 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
1265 qemu_register_reset(xive_source_reset, dev);
1268 static const VMStateDescription vmstate_xive_source = {
1269 .name = TYPE_XIVE_SOURCE,
1270 .version_id = 1,
1271 .minimum_version_id = 1,
1272 .fields = (const VMStateField[]) {
1273 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1274 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1275 VMSTATE_END_OF_LIST()
1280 * The default XIVE interrupt source setting for the ESB MMIOs is two
1281 * 64k pages without Store EOI, to be in sync with KVM.
1283 static Property xive_source_properties[] = {
1284 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1285 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1286 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1288 * By default, PQs are initialized to 0b01 (Q=1) which corresponds
1289 * to "ints off"
1291 DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF),
1292 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
1293 XiveNotifier *),
1294 DEFINE_PROP_END_OF_LIST(),
1297 static void xive_source_class_init(ObjectClass *klass, void *data)
1299 DeviceClass *dc = DEVICE_CLASS(klass);
1301 dc->desc = "XIVE Interrupt Source";
1302 device_class_set_props(dc, xive_source_properties);
1303 dc->realize = xive_source_realize;
1304 dc->vmsd = &vmstate_xive_source;
1306 * Reason: part of XIVE interrupt controller, needs to be wired up,
1307 * e.g. by spapr_xive_instance_init().
1309 dc->user_creatable = false;
1312 static const TypeInfo xive_source_info = {
1313 .name = TYPE_XIVE_SOURCE,
1314 .parent = TYPE_DEVICE,
1315 .instance_size = sizeof(XiveSource),
1316 .class_init = xive_source_class_init,
1320 * XiveEND helpers
1323 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, GString *buf)
1325 uint64_t qaddr_base = xive_end_qaddr(end);
1326 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1327 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1328 uint32_t qentries = 1 << (qsize + 10);
1329 int i;
1332 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1334 g_string_append_printf(buf, " [ ");
1335 qindex = (qindex - (width - 1)) & (qentries - 1);
1336 for (i = 0; i < width; i++) {
1337 uint64_t qaddr = qaddr_base + (qindex << 2);
1338 uint32_t qdata = -1;
1340 if (dma_memory_read(&address_space_memory, qaddr,
1341 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1342 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1343 HWADDR_PRIx "\n", qaddr);
1344 return;
1346 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
1347 be32_to_cpu(qdata));
1348 qindex = (qindex + 1) & (qentries - 1);
1350 g_string_append_c(buf, ']');
1353 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf)
1355 uint64_t qaddr_base = xive_end_qaddr(end);
1356 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1357 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1358 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1359 uint32_t qentries = 1 << (qsize + 10);
1361 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1362 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1363 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1364 uint8_t pq;
1366 if (!xive_end_is_valid(end)) {
1367 return;
1370 pq = xive_get_field32(END_W1_ESn, end->w1);
1372 g_string_append_printf(buf,
1373 " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1374 end_idx,
1375 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1376 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1377 xive_end_is_valid(end) ? 'v' : '-',
1378 xive_end_is_enqueue(end) ? 'q' : '-',
1379 xive_end_is_notify(end) ? 'n' : '-',
1380 xive_end_is_backlog(end) ? 'b' : '-',
1381 xive_end_is_escalate(end) ? 'e' : '-',
1382 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1383 xive_end_is_silent_escalation(end) ? 's' : '-',
1384 xive_end_is_firmware(end) ? 'f' : '-',
1385 priority, nvt_blk, nvt_idx);
1387 if (qaddr_base) {
1388 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1389 qaddr_base, qindex, qentries, qgen);
1390 xive_end_queue_pic_print_info(end, 6, buf);
1392 g_string_append_c(buf, '\n');
1395 static void xive_end_enqueue(XiveEND *end, uint32_t data)
1397 uint64_t qaddr_base = xive_end_qaddr(end);
1398 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1399 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1400 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1402 uint64_t qaddr = qaddr_base + (qindex << 2);
1403 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1404 uint32_t qentries = 1 << (qsize + 10);
1406 if (dma_memory_write(&address_space_memory, qaddr,
1407 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1408 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1409 HWADDR_PRIx "\n", qaddr);
1410 return;
1413 qindex = (qindex + 1) & (qentries - 1);
1414 if (qindex == 0) {
1415 qgen ^= 1;
1416 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1418 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1421 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf)
1423 XiveEAS *eas = (XiveEAS *) &end->w4;
1424 uint8_t pq;
1426 if (!xive_end_is_escalate(end)) {
1427 return;
1430 pq = xive_get_field32(END_W1_ESe, end->w1);
1432 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1433 end_idx,
1434 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1435 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1436 xive_eas_is_valid(eas) ? 'V' : ' ',
1437 xive_eas_is_masked(eas) ? 'M' : ' ',
1438 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1439 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1440 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1444 * XIVE Router (aka. Virtualization Controller or IVRE)
1447 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1448 XiveEAS *eas)
1450 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1452 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1455 static
1456 int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1457 uint8_t *pq)
1459 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1461 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
1464 static
1465 int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1466 uint8_t *pq)
1468 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1470 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
1473 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1474 XiveEND *end)
1476 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1478 return xrc->get_end(xrtr, end_blk, end_idx, end);
1481 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1482 XiveEND *end, uint8_t word_number)
1484 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1486 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1489 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1490 XiveNVT *nvt)
1492 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1494 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1497 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1498 XiveNVT *nvt, uint8_t word_number)
1500 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1502 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1505 static int xive_router_get_block_id(XiveRouter *xrtr)
1507 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1509 return xrc->get_block_id(xrtr);
1512 static void xive_router_realize(DeviceState *dev, Error **errp)
1514 XiveRouter *xrtr = XIVE_ROUTER(dev);
1516 assert(xrtr->xfb);
1519 static void xive_router_end_notify_handler(XiveRouter *xrtr, XiveEAS *eas)
1521 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1523 return xrc->end_notify(xrtr, eas);
1527 * Encode the HW CAM line in the block group mode format :
1529 * chip << 19 | 0000000 0 0001 thread (7Bit)
1531 static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
1533 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1534 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1535 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
1537 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
1541 * The thread context register words are in big-endian format.
1543 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1544 uint8_t format,
1545 uint8_t nvt_blk, uint32_t nvt_idx,
1546 bool cam_ignore, uint32_t logic_serv)
1548 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1549 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1550 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1551 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1552 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1555 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1556 * identifier are ignored in the "CAM" match.
1559 if (format == 0) {
1560 if (cam_ignore == true) {
1562 * F=0 & i=1: Logical server notification (bits ignored at
1563 * the end of the NVT identifier)
1565 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1566 nvt_blk, nvt_idx);
1567 return -1;
1570 /* F=0 & i=0: Specific NVT notification */
1572 /* PHYS ring */
1573 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1574 cam == xive_tctx_hw_cam_line(xptr, tctx)) {
1575 return TM_QW3_HV_PHYS;
1578 /* HV POOL ring */
1579 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1580 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1581 return TM_QW2_HV_POOL;
1584 /* OS ring */
1585 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1586 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1587 return TM_QW1_OS;
1589 } else {
1590 /* F=1 : User level Event-Based Branch (EBB) notification */
1592 /* USER ring */
1593 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1594 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1595 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1596 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1597 return TM_QW0_USER;
1600 return -1;
1604 * This is our simple Xive Presenter Engine model. It is merged in the
1605 * Router as it does not require an extra object.
1607 * It receives notification requests sent by the IVRE to find one
1608 * matching NVT (or more) dispatched on the processor threads. In case
1609 * of a single NVT notification, the process is abbreviated and the
1610 * thread is signaled if a match is found. In case of a logical server
1611 * notification (bits ignored at the end of the NVT identifier), the
1612 * IVPE and IVRE select a winning thread using different filters. This
1613 * involves 2 or 3 exchanges on the PowerBus that the model does not
1614 * support.
1616 * The parameters represent what is sent on the PowerBus
1618 bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
1619 uint8_t nvt_blk, uint32_t nvt_idx,
1620 bool cam_ignore, uint8_t priority,
1621 uint32_t logic_serv)
1623 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
1624 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1625 int count;
1628 * Ask the machine to scan the interrupt controllers for a match
1630 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
1631 priority, logic_serv, &match);
1632 if (count < 0) {
1633 return false;
1636 /* handle CPU exception delivery */
1637 if (count) {
1638 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
1639 xive_tctx_ipb_update(match.tctx, match.ring,
1640 xive_priority_to_ipb(priority));
1643 return !!count;
1647 * Notification using the END ESe/ESn bit (Event State Buffer for
1648 * escalation and notification). Provide further coalescing in the
1649 * Router.
1651 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1652 uint32_t end_idx, XiveEND *end,
1653 uint32_t end_esmask)
1655 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1656 bool notify = xive_esb_trigger(&pq);
1658 if (pq != xive_get_field32(end_esmask, end->w1)) {
1659 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1660 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1663 /* ESe/n[Q]=1 : end of notification */
1664 return notify;
1668 * An END trigger can come from an event trigger (IPI or HW) or from
1669 * another chip. We don't model the PowerBus but the END trigger
1670 * message has the same parameters than in the function below.
1672 void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1674 XiveEND end;
1675 uint8_t priority;
1676 uint8_t format;
1677 uint8_t nvt_blk;
1678 uint32_t nvt_idx;
1679 XiveNVT nvt;
1680 bool found;
1682 uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1683 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1684 uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1686 /* END cache lookup */
1687 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1688 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1689 end_idx);
1690 return;
1693 if (!xive_end_is_valid(&end)) {
1694 trace_xive_router_end_notify(end_blk, end_idx, end_data);
1695 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1696 end_blk, end_idx);
1697 return;
1700 if (xive_end_is_enqueue(&end)) {
1701 xive_end_enqueue(&end, end_data);
1702 /* Enqueuing event data modifies the EQ toggle and index */
1703 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1707 * When the END is silent, we skip the notification part.
1709 if (xive_end_is_silent_escalation(&end)) {
1710 goto do_escalation;
1714 * The W7 format depends on the F bit in W6. It defines the type
1715 * of the notification :
1717 * F=0 : single or multiple NVT notification
1718 * F=1 : User level Event-Based Branch (EBB) notification, no
1719 * priority
1721 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1722 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1724 /* The END is masked */
1725 if (format == 0 && priority == 0xff) {
1726 return;
1730 * Check the END ESn (Event State Buffer for notification) for
1731 * even further coalescing in the Router
1733 if (!xive_end_is_notify(&end)) {
1734 /* ESn[Q]=1 : end of notification */
1735 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1736 &end, END_W1_ESn)) {
1737 return;
1742 * Follows IVPE notification
1744 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1745 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1747 /* NVT cache lookup */
1748 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1749 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1750 nvt_blk, nvt_idx);
1751 return;
1754 if (!xive_nvt_is_valid(&nvt)) {
1755 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1756 nvt_blk, nvt_idx);
1757 return;
1760 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
1761 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1762 priority,
1763 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1765 /* TODO: Auto EOI. */
1767 if (found) {
1768 return;
1772 * If no matching NVT is dispatched on a HW thread :
1773 * - specific VP: update the NVT structure if backlog is activated
1774 * - logical server : forward request to IVPE (not supported)
1776 if (xive_end_is_backlog(&end)) {
1777 uint8_t ipb;
1779 if (format == 1) {
1780 qemu_log_mask(LOG_GUEST_ERROR,
1781 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1782 end_blk, end_idx);
1783 return;
1786 * Record the IPB in the associated NVT structure for later
1787 * use. The presenter will resend the interrupt when the vCPU
1788 * is dispatched again on a HW thread.
1790 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) |
1791 xive_priority_to_ipb(priority);
1792 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
1793 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1796 * On HW, follows a "Broadcast Backlog" to IVPEs
1800 do_escalation:
1802 * If activated, escalate notification using the ESe PQ bits and
1803 * the EAS in w4-5
1805 if (!xive_end_is_escalate(&end)) {
1806 return;
1810 * Check the END ESe (Event State Buffer for escalation) for even
1811 * further coalescing in the Router
1813 if (!xive_end_is_uncond_escalation(&end)) {
1814 /* ESe[Q]=1 : end of notification */
1815 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1816 &end, END_W1_ESe)) {
1817 return;
1821 trace_xive_router_end_escalate(end_blk, end_idx,
1822 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1823 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1824 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1826 * The END trigger becomes an Escalation trigger
1828 xive_router_end_notify_handler(xrtr, (XiveEAS *) &end.w4);
1831 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
1833 XiveRouter *xrtr = XIVE_ROUTER(xn);
1834 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1835 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1836 XiveEAS eas;
1838 /* EAS cache lookup */
1839 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1840 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1841 return;
1844 if (!pq_checked) {
1845 bool notify;
1846 uint8_t pq;
1848 /* PQ cache lookup */
1849 if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1850 /* Set FIR */
1851 g_assert_not_reached();
1854 notify = xive_esb_trigger(&pq);
1856 if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1857 /* Set FIR */
1858 g_assert_not_reached();
1861 if (!notify) {
1862 return;
1866 if (!xive_eas_is_valid(&eas)) {
1867 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1868 return;
1871 if (xive_eas_is_masked(&eas)) {
1872 /* Notification completed */
1873 return;
1877 * The event trigger becomes an END trigger
1879 xive_router_end_notify_handler(xrtr, &eas);
1882 static Property xive_router_properties[] = {
1883 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
1884 TYPE_XIVE_FABRIC, XiveFabric *),
1885 DEFINE_PROP_END_OF_LIST(),
1888 static void xive_router_class_init(ObjectClass *klass, void *data)
1890 DeviceClass *dc = DEVICE_CLASS(klass);
1891 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1892 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1894 dc->desc = "XIVE Router Engine";
1895 device_class_set_props(dc, xive_router_properties);
1896 /* Parent is SysBusDeviceClass. No need to call its realize hook */
1897 dc->realize = xive_router_realize;
1898 xnc->notify = xive_router_notify;
1900 /* By default, the router handles END triggers locally */
1901 xrc->end_notify = xive_router_end_notify;
1904 static const TypeInfo xive_router_info = {
1905 .name = TYPE_XIVE_ROUTER,
1906 .parent = TYPE_SYS_BUS_DEVICE,
1907 .abstract = true,
1908 .instance_size = sizeof(XiveRouter),
1909 .class_size = sizeof(XiveRouterClass),
1910 .class_init = xive_router_class_init,
1911 .interfaces = (InterfaceInfo[]) {
1912 { TYPE_XIVE_NOTIFIER },
1913 { TYPE_XIVE_PRESENTER },
1918 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, GString *buf)
1920 if (!xive_eas_is_valid(eas)) {
1921 return;
1924 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n",
1925 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1926 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1927 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1928 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1932 * END ESB MMIO loads
1934 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1936 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1937 uint32_t offset = addr & 0xFFF;
1938 uint8_t end_blk;
1939 uint32_t end_idx;
1940 XiveEND end;
1941 uint32_t end_esmask;
1942 uint8_t pq;
1943 uint64_t ret = -1;
1946 * The block id should be deduced from the load address on the END
1947 * ESB MMIO but our model only supports a single block per XIVE chip.
1949 end_blk = xive_router_get_block_id(xsrc->xrtr);
1950 end_idx = addr >> (xsrc->esb_shift + 1);
1952 trace_xive_end_source_read(end_blk, end_idx, addr);
1954 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1955 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1956 end_idx);
1957 return -1;
1960 if (!xive_end_is_valid(&end)) {
1961 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1962 end_blk, end_idx);
1963 return -1;
1966 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1967 pq = xive_get_field32(end_esmask, end.w1);
1969 switch (offset) {
1970 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1971 ret = xive_esb_eoi(&pq);
1973 /* Forward the source event notification for routing ?? */
1974 break;
1976 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1977 ret = pq;
1978 break;
1980 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1981 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1982 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1983 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1984 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1985 break;
1986 default:
1987 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1988 offset);
1989 return -1;
1992 if (pq != xive_get_field32(end_esmask, end.w1)) {
1993 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1994 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1997 return ret;
2001 * END ESB MMIO stores are invalid
2003 static void xive_end_source_write(void *opaque, hwaddr addr,
2004 uint64_t value, unsigned size)
2006 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
2007 HWADDR_PRIx"\n", addr);
2010 static const MemoryRegionOps xive_end_source_ops = {
2011 .read = xive_end_source_read,
2012 .write = xive_end_source_write,
2013 .endianness = DEVICE_BIG_ENDIAN,
2014 .valid = {
2015 .min_access_size = 1,
2016 .max_access_size = 8,
2018 .impl = {
2019 .min_access_size = 1,
2020 .max_access_size = 8,
2024 static void xive_end_source_realize(DeviceState *dev, Error **errp)
2026 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
2028 assert(xsrc->xrtr);
2030 if (!xsrc->nr_ends) {
2031 error_setg(errp, "Number of interrupt needs to be greater than 0");
2032 return;
2035 if (xsrc->esb_shift != XIVE_ESB_4K &&
2036 xsrc->esb_shift != XIVE_ESB_64K) {
2037 error_setg(errp, "Invalid ESB shift setting");
2038 return;
2042 * Each END is assigned an even/odd pair of MMIO pages, the even page
2043 * manages the ESn field while the odd page manages the ESe field.
2045 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
2046 &xive_end_source_ops, xsrc, "xive.end",
2047 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
2050 static Property xive_end_source_properties[] = {
2051 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
2052 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
2053 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
2054 XiveRouter *),
2055 DEFINE_PROP_END_OF_LIST(),
2058 static void xive_end_source_class_init(ObjectClass *klass, void *data)
2060 DeviceClass *dc = DEVICE_CLASS(klass);
2062 dc->desc = "XIVE END Source";
2063 device_class_set_props(dc, xive_end_source_properties);
2064 dc->realize = xive_end_source_realize;
2066 * Reason: part of XIVE interrupt controller, needs to be wired up,
2067 * e.g. by spapr_xive_instance_init().
2069 dc->user_creatable = false;
2072 static const TypeInfo xive_end_source_info = {
2073 .name = TYPE_XIVE_END_SOURCE,
2074 .parent = TYPE_DEVICE,
2075 .instance_size = sizeof(XiveENDSource),
2076 .class_init = xive_end_source_class_init,
2080 * XIVE Notifier
2082 static const TypeInfo xive_notifier_info = {
2083 .name = TYPE_XIVE_NOTIFIER,
2084 .parent = TYPE_INTERFACE,
2085 .class_size = sizeof(XiveNotifierClass),
2089 * XIVE Presenter
2091 static const TypeInfo xive_presenter_info = {
2092 .name = TYPE_XIVE_PRESENTER,
2093 .parent = TYPE_INTERFACE,
2094 .class_size = sizeof(XivePresenterClass),
2098 * XIVE Fabric
2100 static const TypeInfo xive_fabric_info = {
2101 .name = TYPE_XIVE_FABRIC,
2102 .parent = TYPE_INTERFACE,
2103 .class_size = sizeof(XiveFabricClass),
2106 static void xive_register_types(void)
2108 type_register_static(&xive_fabric_info);
2109 type_register_static(&xive_source_info);
2110 type_register_static(&xive_notifier_info);
2111 type_register_static(&xive_presenter_info);
2112 type_register_static(&xive_router_info);
2113 type_register_static(&xive_end_source_info);
2114 type_register_static(&xive_tctx_info);
2117 type_init(xive_register_types)