2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation..
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "hw/qdev-properties.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/xive.h"
20 #include "hw/ppc/xive2.h"
21 #include "hw/ppc/xive2_regs.h"
23 uint32_t xive2_router_get_config(Xive2Router
*xrtr
)
25 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
27 return xrc
->get_config(xrtr
);
30 void xive2_eas_pic_print_info(Xive2Eas
*eas
, uint32_t lisn
, Monitor
*mon
)
32 if (!xive2_eas_is_valid(eas
)) {
36 monitor_printf(mon
, " %08x %s end:%02x/%04x data:%08x\n",
37 lisn
, xive2_eas_is_masked(eas
) ? "M" : " ",
38 (uint8_t) xive_get_field64(EAS2_END_BLOCK
, eas
->w
),
39 (uint32_t) xive_get_field64(EAS2_END_INDEX
, eas
->w
),
40 (uint32_t) xive_get_field64(EAS2_END_DATA
, eas
->w
));
43 void xive2_end_queue_pic_print_info(Xive2End
*end
, uint32_t width
,
46 uint64_t qaddr_base
= xive2_end_qaddr(end
);
47 uint32_t qsize
= xive_get_field32(END2_W3_QSIZE
, end
->w3
);
48 uint32_t qindex
= xive_get_field32(END2_W1_PAGE_OFF
, end
->w1
);
49 uint32_t qentries
= 1 << (qsize
+ 10);
53 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
55 monitor_printf(mon
, " [ ");
56 qindex
= (qindex
- (width
- 1)) & (qentries
- 1);
57 for (i
= 0; i
< width
; i
++) {
58 uint64_t qaddr
= qaddr_base
+ (qindex
<< 2);
61 if (dma_memory_read(&address_space_memory
, qaddr
, &qdata
,
62 sizeof(qdata
), MEMTXATTRS_UNSPECIFIED
)) {
63 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: failed to read EQ @0x%"
64 HWADDR_PRIx
"\n", qaddr
);
67 monitor_printf(mon
, "%s%08x ", i
== width
- 1 ? "^" : "",
69 qindex
= (qindex
+ 1) & (qentries
- 1);
71 monitor_printf(mon
, "]");
74 void xive2_end_pic_print_info(Xive2End
*end
, uint32_t end_idx
, Monitor
*mon
)
76 uint64_t qaddr_base
= xive2_end_qaddr(end
);
77 uint32_t qindex
= xive_get_field32(END2_W1_PAGE_OFF
, end
->w1
);
78 uint32_t qgen
= xive_get_field32(END2_W1_GENERATION
, end
->w1
);
79 uint32_t qsize
= xive_get_field32(END2_W3_QSIZE
, end
->w3
);
80 uint32_t qentries
= 1 << (qsize
+ 10);
82 uint32_t nvp_blk
= xive_get_field32(END2_W6_VP_BLOCK
, end
->w6
);
83 uint32_t nvp_idx
= xive_get_field32(END2_W6_VP_OFFSET
, end
->w6
);
84 uint8_t priority
= xive_get_field32(END2_W7_F0_PRIORITY
, end
->w7
);
87 if (!xive2_end_is_valid(end
)) {
91 pq
= xive_get_field32(END2_W1_ESn
, end
->w1
);
94 " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x",
96 pq
& XIVE_ESB_VAL_P
? 'P' : '-',
97 pq
& XIVE_ESB_VAL_Q
? 'Q' : '-',
98 xive2_end_is_valid(end
) ? 'v' : '-',
99 xive2_end_is_enqueue(end
) ? 'q' : '-',
100 xive2_end_is_notify(end
) ? 'n' : '-',
101 xive2_end_is_backlog(end
) ? 'b' : '-',
102 xive2_end_is_escalate(end
) ? 'e' : '-',
103 xive2_end_is_escalate_end(end
) ? 'N' : '-',
104 xive2_end_is_uncond_escalation(end
) ? 'u' : '-',
105 xive2_end_is_silent_escalation(end
) ? 's' : '-',
106 xive2_end_is_firmware1(end
) ? 'f' : '-',
107 xive2_end_is_firmware2(end
) ? 'F' : '-',
108 priority
, nvp_blk
, nvp_idx
);
111 monitor_printf(mon
, " eq:@%08"PRIx64
"% 6d/%5d ^%d",
112 qaddr_base
, qindex
, qentries
, qgen
);
113 xive2_end_queue_pic_print_info(end
, 6, mon
);
115 monitor_printf(mon
, "\n");
118 void xive2_end_eas_pic_print_info(Xive2End
*end
, uint32_t end_idx
,
121 Xive2Eas
*eas
= (Xive2Eas
*) &end
->w4
;
124 if (!xive2_end_is_escalate(end
)) {
128 pq
= xive_get_field32(END2_W1_ESe
, end
->w1
);
130 monitor_printf(mon
, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
132 pq
& XIVE_ESB_VAL_P
? 'P' : '-',
133 pq
& XIVE_ESB_VAL_Q
? 'Q' : '-',
134 xive2_eas_is_valid(eas
) ? 'v' : ' ',
135 xive2_eas_is_masked(eas
) ? 'M' : ' ',
136 (uint8_t) xive_get_field64(EAS2_END_BLOCK
, eas
->w
),
137 (uint32_t) xive_get_field64(EAS2_END_INDEX
, eas
->w
),
138 (uint32_t) xive_get_field64(EAS2_END_DATA
, eas
->w
));
141 static void xive2_end_enqueue(Xive2End
*end
, uint32_t data
)
143 uint64_t qaddr_base
= xive2_end_qaddr(end
);
144 uint32_t qsize
= xive_get_field32(END2_W3_QSIZE
, end
->w3
);
145 uint32_t qindex
= xive_get_field32(END2_W1_PAGE_OFF
, end
->w1
);
146 uint32_t qgen
= xive_get_field32(END2_W1_GENERATION
, end
->w1
);
148 uint64_t qaddr
= qaddr_base
+ (qindex
<< 2);
149 uint32_t qdata
= cpu_to_be32((qgen
<< 31) | (data
& 0x7fffffff));
150 uint32_t qentries
= 1 << (qsize
+ 10);
152 if (dma_memory_write(&address_space_memory
, qaddr
, &qdata
, sizeof(qdata
),
153 MEMTXATTRS_UNSPECIFIED
)) {
154 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: failed to write END data @0x%"
155 HWADDR_PRIx
"\n", qaddr
);
159 qindex
= (qindex
+ 1) & (qentries
- 1);
162 end
->w1
= xive_set_field32(END2_W1_GENERATION
, end
->w1
, qgen
);
164 /* TODO(PowerNV): reset GF bit on a cache watch operation */
165 end
->w1
= xive_set_field32(END2_W1_GEN_FLIPPED
, end
->w1
, qgen
);
167 end
->w1
= xive_set_field32(END2_W1_PAGE_OFF
, end
->w1
, qindex
);
171 * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
173 * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
175 * - if a context is enabled with the H bit set, the VP context
176 * information is retrieved from the NVP structure (“check out”)
177 * and stored back on a context pull (“check in”), the SW receives
178 * the same context pull information as on P9
180 * - the H bit cannot be changed while the V bit is set, i.e. a
181 * context cannot be set up in the TIMA and then be “pushed” into
182 * the NVP by changing the H bit while the context is enabled
185 static void xive2_tctx_save_os_ctx(Xive2Router
*xrtr
, XiveTCTX
*tctx
,
186 uint8_t nvp_blk
, uint32_t nvp_idx
)
188 CPUPPCState
*env
= &POWERPC_CPU(tctx
->cs
)->env
;
189 uint32_t pir
= env
->spr_cb
[SPR_PIR
].default_value
;
191 uint8_t *regs
= &tctx
->regs
[TM_QW1_OS
];
193 if (xive2_router_get_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
)) {
194 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No NVP %x/%x\n",
199 if (!xive2_nvp_is_valid(&nvp
)) {
200 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid NVP %x/%x\n",
205 if (!xive2_nvp_is_hw(&nvp
)) {
206 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVP %x/%x is not HW owned\n",
211 if (!xive2_nvp_is_co(&nvp
)) {
212 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVP %x/%x is not checkout\n",
217 if (xive_get_field32(NVP2_W1_CO_THRID_VALID
, nvp
.w1
) &&
218 xive_get_field32(NVP2_W1_CO_THRID
, nvp
.w1
) != pir
) {
219 qemu_log_mask(LOG_GUEST_ERROR
,
220 "XIVE: NVP %x/%x invalid checkout Thread %x\n",
221 nvp_blk
, nvp_idx
, pir
);
225 nvp
.w2
= xive_set_field32(NVP2_W2_IPB
, nvp
.w2
, regs
[TM_IPB
]);
226 nvp
.w2
= xive_set_field32(NVP2_W2_CPPR
, nvp
.w2
, regs
[TM_CPPR
]);
227 nvp
.w2
= xive_set_field32(NVP2_W2_LSMFB
, nvp
.w2
, regs
[TM_LSMFB
]);
228 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
, 2);
230 nvp
.w1
= xive_set_field32(NVP2_W1_CO
, nvp
.w1
, 0);
231 /* NVP2_W1_CO_THRID_VALID only set once */
232 nvp
.w1
= xive_set_field32(NVP2_W1_CO_THRID
, nvp
.w1
, 0xFFFF);
233 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
, 1);
236 static void xive2_os_cam_decode(uint32_t cam
, uint8_t *nvp_blk
,
237 uint32_t *nvp_idx
, bool *vo
, bool *ho
)
239 *nvp_blk
= xive2_nvp_blk(cam
);
240 *nvp_idx
= xive2_nvp_idx(cam
);
241 *vo
= !!(cam
& TM2_QW1W2_VO
);
242 *ho
= !!(cam
& TM2_QW1W2_HO
);
245 uint64_t xive2_tm_pull_os_ctx(XivePresenter
*xptr
, XiveTCTX
*tctx
,
246 hwaddr offset
, unsigned size
)
248 Xive2Router
*xrtr
= XIVE2_ROUTER(xptr
);
249 uint32_t qw1w2
= xive_tctx_word2(&tctx
->regs
[TM_QW1_OS
]);
251 uint32_t cam
= be32_to_cpu(qw1w2
);
257 xive2_os_cam_decode(cam
, &nvp_blk
, &nvp_idx
, &vo
, &do_save
);
260 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: pulling invalid NVP %x/%x !?\n",
264 /* Invalidate CAM line */
265 qw1w2_new
= xive_set_field32(TM2_QW1W2_VO
, qw1w2
, 0);
266 memcpy(&tctx
->regs
[TM_QW1_OS
+ TM_WORD2
], &qw1w2_new
, 4);
268 if (xive2_router_get_config(xrtr
) & XIVE2_VP_SAVE_RESTORE
&& do_save
) {
269 xive2_tctx_save_os_ctx(xrtr
, tctx
, nvp_blk
, nvp_idx
);
272 xive_tctx_reset_os_signal(tctx
);
276 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router
*xrtr
, XiveTCTX
*tctx
,
277 uint8_t nvp_blk
, uint32_t nvp_idx
,
280 CPUPPCState
*env
= &POWERPC_CPU(tctx
->cs
)->env
;
281 uint32_t pir
= env
->spr_cb
[SPR_PIR
].default_value
;
284 if (!xive2_nvp_is_hw(nvp
)) {
285 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVP %x/%x is not HW owned\n",
290 cppr
= xive_get_field32(NVP2_W2_CPPR
, nvp
->w2
);
291 nvp
->w2
= xive_set_field32(NVP2_W2_CPPR
, nvp
->w2
, 0);
292 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, nvp
, 2);
294 tctx
->regs
[TM_QW1_OS
+ TM_CPPR
] = cppr
;
295 /* we don't model LSMFB */
297 nvp
->w1
= xive_set_field32(NVP2_W1_CO
, nvp
->w1
, 1);
298 nvp
->w1
= xive_set_field32(NVP2_W1_CO_THRID_VALID
, nvp
->w1
, 1);
299 nvp
->w1
= xive_set_field32(NVP2_W1_CO_THRID
, nvp
->w1
, pir
);
302 * Checkout privilege: 0:OS, 1:Pool, 2:Hard
304 * TODO: we only support OS push/pull
306 nvp
->w1
= xive_set_field32(NVP2_W1_CO_PRIV
, nvp
->w1
, 0);
308 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, nvp
, 1);
310 /* return restored CPPR to generate a CPU exception if needed */
314 static void xive2_tctx_need_resend(Xive2Router
*xrtr
, XiveTCTX
*tctx
,
315 uint8_t nvp_blk
, uint32_t nvp_idx
,
322 * Grab the associated thread interrupt context registers in the
325 if (xive2_router_get_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
)) {
326 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No NVP %x/%x\n",
331 if (!xive2_nvp_is_valid(&nvp
)) {
332 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid NVP %x/%x\n",
337 /* Automatically restore thread context registers */
338 if (xive2_router_get_config(xrtr
) & XIVE2_VP_SAVE_RESTORE
&&
340 xive2_tctx_restore_os_ctx(xrtr
, tctx
, nvp_blk
, nvp_idx
, &nvp
);
343 ipb
= xive_get_field32(NVP2_W2_IPB
, nvp
.w2
);
345 nvp
.w2
= xive_set_field32(NVP2_W2_IPB
, nvp
.w2
, 0);
346 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
, 2);
349 * Always call xive_tctx_ipb_update(). Even if there were no
350 * escalation triggered, there could be a pending interrupt which
351 * was saved when the context was pulled and that we need to take
352 * into account by recalculating the PIPR (which is not
354 * It will also raise the External interrupt signal if needed.
356 xive_tctx_ipb_update(tctx
, TM_QW1_OS
, ipb
);
360 * Updating the OS CAM line can trigger a resend of interrupt
362 void xive2_tm_push_os_ctx(XivePresenter
*xptr
, XiveTCTX
*tctx
,
363 hwaddr offset
, uint64_t value
, unsigned size
)
365 uint32_t cam
= value
;
366 uint32_t qw1w2
= cpu_to_be32(cam
);
372 xive2_os_cam_decode(cam
, &nvp_blk
, &nvp_idx
, &vo
, &do_restore
);
374 /* First update the thead context */
375 memcpy(&tctx
->regs
[TM_QW1_OS
+ TM_WORD2
], &qw1w2
, 4);
377 /* Check the interrupt pending bits */
379 xive2_tctx_need_resend(XIVE2_ROUTER(xptr
), tctx
, nvp_blk
, nvp_idx
,
385 * XIVE Router (aka. Virtualization Controller or IVRE)
388 int xive2_router_get_eas(Xive2Router
*xrtr
, uint8_t eas_blk
, uint32_t eas_idx
,
391 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
393 return xrc
->get_eas(xrtr
, eas_blk
, eas_idx
, eas
);
397 int xive2_router_get_pq(Xive2Router
*xrtr
, uint8_t eas_blk
, uint32_t eas_idx
,
400 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
402 return xrc
->get_pq(xrtr
, eas_blk
, eas_idx
, pq
);
406 int xive2_router_set_pq(Xive2Router
*xrtr
, uint8_t eas_blk
, uint32_t eas_idx
,
409 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
411 return xrc
->set_pq(xrtr
, eas_blk
, eas_idx
, pq
);
414 int xive2_router_get_end(Xive2Router
*xrtr
, uint8_t end_blk
, uint32_t end_idx
,
417 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
419 return xrc
->get_end(xrtr
, end_blk
, end_idx
, end
);
422 int xive2_router_write_end(Xive2Router
*xrtr
, uint8_t end_blk
, uint32_t end_idx
,
423 Xive2End
*end
, uint8_t word_number
)
425 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
427 return xrc
->write_end(xrtr
, end_blk
, end_idx
, end
, word_number
);
430 int xive2_router_get_nvp(Xive2Router
*xrtr
, uint8_t nvp_blk
, uint32_t nvp_idx
,
433 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
435 return xrc
->get_nvp(xrtr
, nvp_blk
, nvp_idx
, nvp
);
438 int xive2_router_write_nvp(Xive2Router
*xrtr
, uint8_t nvp_blk
, uint32_t nvp_idx
,
439 Xive2Nvp
*nvp
, uint8_t word_number
)
441 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
443 return xrc
->write_nvp(xrtr
, nvp_blk
, nvp_idx
, nvp
, word_number
);
446 static int xive2_router_get_block_id(Xive2Router
*xrtr
)
448 Xive2RouterClass
*xrc
= XIVE2_ROUTER_GET_CLASS(xrtr
);
450 return xrc
->get_block_id(xrtr
);
454 * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
455 * width and block id width is configurable at the IC level.
457 * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
458 * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
460 static uint32_t xive2_tctx_hw_cam_line(XivePresenter
*xptr
, XiveTCTX
*tctx
)
462 Xive2Router
*xrtr
= XIVE2_ROUTER(xptr
);
463 CPUPPCState
*env
= &POWERPC_CPU(tctx
->cs
)->env
;
464 uint32_t pir
= env
->spr_cb
[SPR_PIR
].default_value
;
465 uint8_t blk
= xive2_router_get_block_id(xrtr
);
467 xive2_router_get_config(xrtr
) & XIVE2_THREADID_8BITS
? 8 : 7;
468 uint8_t tid_mask
= (1 << tid_shift
) - 1;
470 return xive2_nvp_cam_line(blk
, 1 << tid_shift
| (pir
& tid_mask
));
474 * The thread context register words are in big-endian format.
476 int xive2_presenter_tctx_match(XivePresenter
*xptr
, XiveTCTX
*tctx
,
478 uint8_t nvt_blk
, uint32_t nvt_idx
,
479 bool cam_ignore
, uint32_t logic_serv
)
481 uint32_t cam
= xive2_nvp_cam_line(nvt_blk
, nvt_idx
);
482 uint32_t qw3w2
= xive_tctx_word2(&tctx
->regs
[TM_QW3_HV_PHYS
]);
483 uint32_t qw2w2
= xive_tctx_word2(&tctx
->regs
[TM_QW2_HV_POOL
]);
484 uint32_t qw1w2
= xive_tctx_word2(&tctx
->regs
[TM_QW1_OS
]);
485 uint32_t qw0w2
= xive_tctx_word2(&tctx
->regs
[TM_QW0_USER
]);
488 * TODO (PowerNV): ignore mode. The low order bits of the NVT
489 * identifier are ignored in the "CAM" match.
493 if (cam_ignore
== true) {
495 * F=0 & i=1: Logical server notification (bits ignored at
496 * the end of the NVT identifier)
498 qemu_log_mask(LOG_UNIMP
, "XIVE: no support for LS NVT %x/%x\n",
503 /* F=0 & i=0: Specific NVT notification */
506 if ((be32_to_cpu(qw3w2
) & TM2_QW3W2_VT
) &&
507 cam
== xive2_tctx_hw_cam_line(xptr
, tctx
)) {
508 return TM_QW3_HV_PHYS
;
512 if ((be32_to_cpu(qw2w2
) & TM2_QW2W2_VP
) &&
513 cam
== xive_get_field32(TM2_QW2W2_POOL_CAM
, qw2w2
)) {
514 return TM_QW2_HV_POOL
;
518 if ((be32_to_cpu(qw1w2
) & TM2_QW1W2_VO
) &&
519 cam
== xive_get_field32(TM2_QW1W2_OS_CAM
, qw1w2
)) {
523 /* F=1 : User level Event-Based Branch (EBB) notification */
526 if ((be32_to_cpu(qw1w2
) & TM2_QW1W2_VO
) &&
527 (cam
== xive_get_field32(TM2_QW1W2_OS_CAM
, qw1w2
)) &&
528 (be32_to_cpu(qw0w2
) & TM2_QW0W2_VU
) &&
529 (logic_serv
== xive_get_field32(TM2_QW0W2_LOGIC_SERV
, qw0w2
))) {
536 static void xive2_router_realize(DeviceState
*dev
, Error
**errp
)
538 Xive2Router
*xrtr
= XIVE2_ROUTER(dev
);
544 * Notification using the END ESe/ESn bit (Event State Buffer for
545 * escalation and notification). Profide futher coalescing in the
548 static bool xive2_router_end_es_notify(Xive2Router
*xrtr
, uint8_t end_blk
,
549 uint32_t end_idx
, Xive2End
*end
,
552 uint8_t pq
= xive_get_field32(end_esmask
, end
->w1
);
553 bool notify
= xive_esb_trigger(&pq
);
555 if (pq
!= xive_get_field32(end_esmask
, end
->w1
)) {
556 end
->w1
= xive_set_field32(end_esmask
, end
->w1
, pq
);
557 xive2_router_write_end(xrtr
, end_blk
, end_idx
, end
, 1);
560 /* ESe/n[Q]=1 : end of notification */
565 * An END trigger can come from an event trigger (IPI or HW) or from
566 * another chip. We don't model the PowerBus but the END trigger
567 * message has the same parameters than in the function below.
569 static void xive2_router_end_notify(Xive2Router
*xrtr
, uint8_t end_blk
,
570 uint32_t end_idx
, uint32_t end_data
)
580 /* END cache lookup */
581 if (xive2_router_get_end(xrtr
, end_blk
, end_idx
, &end
)) {
582 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No END %x/%x\n", end_blk
,
587 if (!xive2_end_is_valid(&end
)) {
588 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: END %x/%x is invalid\n",
593 if (xive2_end_is_enqueue(&end
)) {
594 xive2_end_enqueue(&end
, end_data
);
595 /* Enqueuing event data modifies the EQ toggle and index */
596 xive2_router_write_end(xrtr
, end_blk
, end_idx
, &end
, 1);
600 * When the END is silent, we skip the notification part.
602 if (xive2_end_is_silent_escalation(&end
)) {
607 * The W7 format depends on the F bit in W6. It defines the type
608 * of the notification :
610 * F=0 : single or multiple NVP notification
611 * F=1 : User level Event-Based Branch (EBB) notification, no
614 format
= xive_get_field32(END2_W6_FORMAT_BIT
, end
.w6
);
615 priority
= xive_get_field32(END2_W7_F0_PRIORITY
, end
.w7
);
617 /* The END is masked */
618 if (format
== 0 && priority
== 0xff) {
623 * Check the END ESn (Event State Buffer for notification) for
624 * even futher coalescing in the Router
626 if (!xive2_end_is_notify(&end
)) {
627 /* ESn[Q]=1 : end of notification */
628 if (!xive2_router_end_es_notify(xrtr
, end_blk
, end_idx
,
629 &end
, END2_W1_ESn
)) {
635 * Follows IVPE notification
637 nvp_blk
= xive_get_field32(END2_W6_VP_BLOCK
, end
.w6
);
638 nvp_idx
= xive_get_field32(END2_W6_VP_OFFSET
, end
.w6
);
640 /* NVP cache lookup */
641 if (xive2_router_get_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
)) {
642 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: no NVP %x/%x\n",
647 if (!xive2_nvp_is_valid(&nvp
)) {
648 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVP %x/%x is invalid\n",
653 found
= xive_presenter_notify(xrtr
->xfb
, format
, nvp_blk
, nvp_idx
,
654 xive_get_field32(END2_W6_IGNORE
, end
.w7
),
656 xive_get_field32(END2_W7_F1_LOG_SERVER_ID
, end
.w7
));
658 /* TODO: Auto EOI. */
665 * If no matching NVP is dispatched on a HW thread :
666 * - specific VP: update the NVP structure if backlog is activated
667 * - logical server : forward request to IVPE (not supported)
669 if (xive2_end_is_backlog(&end
)) {
673 qemu_log_mask(LOG_GUEST_ERROR
,
674 "XIVE: END %x/%x invalid config: F1 & backlog\n",
680 * Record the IPB in the associated NVP structure for later
681 * use. The presenter will resend the interrupt when the vCPU
682 * is dispatched again on a HW thread.
684 ipb
= xive_get_field32(NVP2_W2_IPB
, nvp
.w2
) |
685 xive_priority_to_ipb(priority
);
686 nvp
.w2
= xive_set_field32(NVP2_W2_IPB
, nvp
.w2
, ipb
);
687 xive2_router_write_nvp(xrtr
, nvp_blk
, nvp_idx
, &nvp
, 2);
690 * On HW, follows a "Broadcast Backlog" to IVPEs
696 * If activated, escalate notification using the ESe PQ bits and
699 if (!xive2_end_is_escalate(&end
)) {
704 * Check the END ESe (Event State Buffer for escalation) for even
705 * futher coalescing in the Router
707 if (!xive2_end_is_uncond_escalation(&end
)) {
708 /* ESe[Q]=1 : end of escalation notification */
709 if (!xive2_router_end_es_notify(xrtr
, end_blk
, end_idx
,
710 &end
, END2_W1_ESe
)) {
716 * The END trigger becomes an Escalation trigger
718 xive2_router_end_notify(xrtr
,
719 xive_get_field32(END2_W4_END_BLOCK
, end
.w4
),
720 xive_get_field32(END2_W4_ESC_END_INDEX
, end
.w4
),
721 xive_get_field32(END2_W5_ESC_END_DATA
, end
.w5
));
724 void xive2_router_notify(XiveNotifier
*xn
, uint32_t lisn
, bool pq_checked
)
726 Xive2Router
*xrtr
= XIVE2_ROUTER(xn
);
727 uint8_t eas_blk
= XIVE_EAS_BLOCK(lisn
);
728 uint32_t eas_idx
= XIVE_EAS_INDEX(lisn
);
731 /* EAS cache lookup */
732 if (xive2_router_get_eas(xrtr
, eas_blk
, eas_idx
, &eas
)) {
733 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: Unknown LISN %x\n", lisn
);
741 /* PQ cache lookup */
742 if (xive2_router_get_pq(xrtr
, eas_blk
, eas_idx
, &pq
)) {
744 g_assert_not_reached();
747 notify
= xive_esb_trigger(&pq
);
749 if (xive2_router_set_pq(xrtr
, eas_blk
, eas_idx
, &pq
)) {
751 g_assert_not_reached();
759 if (!xive2_eas_is_valid(&eas
)) {
760 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: Invalid LISN %x\n", lisn
);
764 if (xive2_eas_is_masked(&eas
)) {
765 /* Notification completed */
770 * The event trigger becomes an END trigger
772 xive2_router_end_notify(xrtr
,
773 xive_get_field64(EAS2_END_BLOCK
, eas
.w
),
774 xive_get_field64(EAS2_END_INDEX
, eas
.w
),
775 xive_get_field64(EAS2_END_DATA
, eas
.w
));
778 static Property xive2_router_properties
[] = {
779 DEFINE_PROP_LINK("xive-fabric", Xive2Router
, xfb
,
780 TYPE_XIVE_FABRIC
, XiveFabric
*),
781 DEFINE_PROP_END_OF_LIST(),
784 static void xive2_router_class_init(ObjectClass
*klass
, void *data
)
786 DeviceClass
*dc
= DEVICE_CLASS(klass
);
787 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
789 dc
->desc
= "XIVE2 Router Engine";
790 device_class_set_props(dc
, xive2_router_properties
);
791 /* Parent is SysBusDeviceClass. No need to call its realize hook */
792 dc
->realize
= xive2_router_realize
;
793 xnc
->notify
= xive2_router_notify
;
796 static const TypeInfo xive2_router_info
= {
797 .name
= TYPE_XIVE2_ROUTER
,
798 .parent
= TYPE_SYS_BUS_DEVICE
,
800 .instance_size
= sizeof(Xive2Router
),
801 .class_size
= sizeof(Xive2RouterClass
),
802 .class_init
= xive2_router_class_init
,
803 .interfaces
= (InterfaceInfo
[]) {
804 { TYPE_XIVE_NOTIFIER
},
805 { TYPE_XIVE_PRESENTER
},
810 static inline bool addr_is_even(hwaddr addr
, uint32_t shift
)
812 return !((addr
>> shift
) & 1);
815 static uint64_t xive2_end_source_read(void *opaque
, hwaddr addr
, unsigned size
)
817 Xive2EndSource
*xsrc
= XIVE2_END_SOURCE(opaque
);
818 uint32_t offset
= addr
& 0xFFF;
827 * The block id should be deduced from the load address on the END
828 * ESB MMIO but our model only supports a single block per XIVE chip.
830 end_blk
= xive2_router_get_block_id(xsrc
->xrtr
);
831 end_idx
= addr
>> (xsrc
->esb_shift
+ 1);
833 if (xive2_router_get_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
)) {
834 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No END %x/%x\n", end_blk
,
839 if (!xive2_end_is_valid(&end
)) {
840 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: END %x/%x is invalid\n",
845 end_esmask
= addr_is_even(addr
, xsrc
->esb_shift
) ? END2_W1_ESn
:
847 pq
= xive_get_field32(end_esmask
, end
.w1
);
850 case XIVE_ESB_LOAD_EOI
... XIVE_ESB_LOAD_EOI
+ 0x7FF:
851 ret
= xive_esb_eoi(&pq
);
853 /* Forward the source event notification for routing ?? */
856 case XIVE_ESB_GET
... XIVE_ESB_GET
+ 0x3FF:
860 case XIVE_ESB_SET_PQ_00
... XIVE_ESB_SET_PQ_00
+ 0x0FF:
861 case XIVE_ESB_SET_PQ_01
... XIVE_ESB_SET_PQ_01
+ 0x0FF:
862 case XIVE_ESB_SET_PQ_10
... XIVE_ESB_SET_PQ_10
+ 0x0FF:
863 case XIVE_ESB_SET_PQ_11
... XIVE_ESB_SET_PQ_11
+ 0x0FF:
864 ret
= xive_esb_set(&pq
, (offset
>> 8) & 0x3);
867 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid END ESB load addr %d\n",
872 if (pq
!= xive_get_field32(end_esmask
, end
.w1
)) {
873 end
.w1
= xive_set_field32(end_esmask
, end
.w1
, pq
);
874 xive2_router_write_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
, 1);
880 static void xive2_end_source_write(void *opaque
, hwaddr addr
,
881 uint64_t value
, unsigned size
)
883 Xive2EndSource
*xsrc
= XIVE2_END_SOURCE(opaque
);
884 uint32_t offset
= addr
& 0xFFF;
893 * The block id should be deduced from the load address on the END
894 * ESB MMIO but our model only supports a single block per XIVE chip.
896 end_blk
= xive2_router_get_block_id(xsrc
->xrtr
);
897 end_idx
= addr
>> (xsrc
->esb_shift
+ 1);
899 if (xive2_router_get_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
)) {
900 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No END %x/%x\n", end_blk
,
905 if (!xive2_end_is_valid(&end
)) {
906 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: END %x/%x is invalid\n",
911 end_esmask
= addr_is_even(addr
, xsrc
->esb_shift
) ? END2_W1_ESn
:
913 pq
= xive_get_field32(end_esmask
, end
.w1
);
917 notify
= xive_esb_trigger(&pq
);
920 case XIVE_ESB_STORE_EOI
... XIVE_ESB_STORE_EOI
+ 0x3FF:
921 /* TODO: can we check StoreEOI availability from the router ? */
922 notify
= xive_esb_eoi(&pq
);
925 case XIVE_ESB_INJECT
... XIVE_ESB_INJECT
+ 0x3FF:
926 if (end_esmask
== END2_W1_ESe
) {
927 qemu_log_mask(LOG_GUEST_ERROR
,
928 "XIVE: END %x/%x can not EQ inject on ESe\n",
936 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid END ESB write addr %d\n",
941 if (pq
!= xive_get_field32(end_esmask
, end
.w1
)) {
942 end
.w1
= xive_set_field32(end_esmask
, end
.w1
, pq
);
943 xive2_router_write_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
, 1);
946 /* TODO: Forward the source event notification for routing */
952 static const MemoryRegionOps xive2_end_source_ops
= {
953 .read
= xive2_end_source_read
,
954 .write
= xive2_end_source_write
,
955 .endianness
= DEVICE_BIG_ENDIAN
,
957 .min_access_size
= 8,
958 .max_access_size
= 8,
961 .min_access_size
= 8,
962 .max_access_size
= 8,
966 static void xive2_end_source_realize(DeviceState
*dev
, Error
**errp
)
968 Xive2EndSource
*xsrc
= XIVE2_END_SOURCE(dev
);
972 if (!xsrc
->nr_ends
) {
973 error_setg(errp
, "Number of interrupt needs to be greater than 0");
977 if (xsrc
->esb_shift
!= XIVE_ESB_4K
&&
978 xsrc
->esb_shift
!= XIVE_ESB_64K
) {
979 error_setg(errp
, "Invalid ESB shift setting");
984 * Each END is assigned an even/odd pair of MMIO pages, the even page
985 * manages the ESn field while the odd page manages the ESe field.
987 memory_region_init_io(&xsrc
->esb_mmio
, OBJECT(xsrc
),
988 &xive2_end_source_ops
, xsrc
, "xive.end",
989 (1ull << (xsrc
->esb_shift
+ 1)) * xsrc
->nr_ends
);
992 static Property xive2_end_source_properties
[] = {
993 DEFINE_PROP_UINT32("nr-ends", Xive2EndSource
, nr_ends
, 0),
994 DEFINE_PROP_UINT32("shift", Xive2EndSource
, esb_shift
, XIVE_ESB_64K
),
995 DEFINE_PROP_LINK("xive", Xive2EndSource
, xrtr
, TYPE_XIVE2_ROUTER
,
997 DEFINE_PROP_END_OF_LIST(),
1000 static void xive2_end_source_class_init(ObjectClass
*klass
, void *data
)
1002 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1004 dc
->desc
= "XIVE END Source";
1005 device_class_set_props(dc
, xive2_end_source_properties
);
1006 dc
->realize
= xive2_end_source_realize
;
1007 dc
->user_creatable
= false;
1010 static const TypeInfo xive2_end_source_info
= {
1011 .name
= TYPE_XIVE2_END_SOURCE
,
1012 .parent
= TYPE_DEVICE
,
1013 .instance_size
= sizeof(Xive2EndSource
),
1014 .class_init
= xive2_end_source_class_init
,
1017 static void xive2_register_types(void)
1019 type_register_static(&xive2_router_info
);
1020 type_register_static(&xive2_end_source_info
);
1023 type_init(xive2_register_types
)