2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "hw/qdev-properties.h"
19 #include "migration/vmstate.h"
20 #include "monitor/monitor.h"
22 #include "hw/ppc/xive.h"
23 #include "hw/ppc/xive_regs.h"
26 * XIVE Thread Interrupt Management context
30 * Convert a priority number to an Interrupt Pending Buffer (IPB)
31 * register, which indicates a pending interrupt at the priority
32 * corresponding to the bit number
34 static uint8_t priority_to_ipb(uint8_t priority
)
36 return priority
> XIVE_PRIORITY_MAX
?
37 0 : 1 << (XIVE_PRIORITY_MAX
- priority
);
41 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
42 * Interrupt Priority Register (PIPR), which contains the priority of
43 * the most favored pending notification.
45 static uint8_t ipb_to_pipr(uint8_t ibp
)
47 return ibp
? clz32((uint32_t)ibp
<< 24) : 0xff;
50 static void ipb_update(uint8_t *regs
, uint8_t priority
)
52 regs
[TM_IPB
] |= priority_to_ipb(priority
);
53 regs
[TM_PIPR
] = ipb_to_pipr(regs
[TM_IPB
]);
56 static uint8_t exception_mask(uint8_t ring
)
64 g_assert_not_reached();
68 static qemu_irq
xive_tctx_output(XiveTCTX
*tctx
, uint8_t ring
)
72 return 0; /* Not supported */
74 return tctx
->os_output
;
77 return tctx
->hv_output
;
83 static uint64_t xive_tctx_accept(XiveTCTX
*tctx
, uint8_t ring
)
85 uint8_t *regs
= &tctx
->regs
[ring
];
86 uint8_t nsr
= regs
[TM_NSR
];
87 uint8_t mask
= exception_mask(ring
);
89 qemu_irq_lower(xive_tctx_output(tctx
, ring
));
91 if (regs
[TM_NSR
] & mask
) {
92 uint8_t cppr
= regs
[TM_PIPR
];
96 /* Reset the pending buffer bit */
97 regs
[TM_IPB
] &= ~priority_to_ipb(cppr
);
98 regs
[TM_PIPR
] = ipb_to_pipr(regs
[TM_IPB
]);
100 /* Drop Exception bit */
101 regs
[TM_NSR
] &= ~mask
;
104 return (nsr
<< 8) | regs
[TM_CPPR
];
107 static void xive_tctx_notify(XiveTCTX
*tctx
, uint8_t ring
)
109 uint8_t *regs
= &tctx
->regs
[ring
];
111 if (regs
[TM_PIPR
] < regs
[TM_CPPR
]) {
114 regs
[TM_NSR
] |= TM_QW1_NSR_EO
;
117 regs
[TM_NSR
] |= (TM_QW3_NSR_HE_PHYS
<< 6);
120 g_assert_not_reached();
122 qemu_irq_raise(xive_tctx_output(tctx
, ring
));
126 static void xive_tctx_set_cppr(XiveTCTX
*tctx
, uint8_t ring
, uint8_t cppr
)
128 if (cppr
> XIVE_PRIORITY_MAX
) {
132 tctx
->regs
[ring
+ TM_CPPR
] = cppr
;
134 /* CPPR has changed, check if we need to raise a pending exception */
135 xive_tctx_notify(tctx
, ring
);
138 static inline uint32_t xive_tctx_word2(uint8_t *ring
)
140 return *((uint32_t *) &ring
[TM_WORD2
]);
144 * XIVE Thread Interrupt Management Area (TIMA)
147 static void xive_tm_set_hv_cppr(XiveTCTX
*tctx
, hwaddr offset
,
148 uint64_t value
, unsigned size
)
150 xive_tctx_set_cppr(tctx
, TM_QW3_HV_PHYS
, value
& 0xff);
153 static uint64_t xive_tm_ack_hv_reg(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
)
155 return xive_tctx_accept(tctx
, TM_QW3_HV_PHYS
);
158 static uint64_t xive_tm_pull_pool_ctx(XiveTCTX
*tctx
, hwaddr offset
,
161 uint32_t qw2w2_prev
= xive_tctx_word2(&tctx
->regs
[TM_QW2_HV_POOL
]);
164 qw2w2
= xive_set_field32(TM_QW2W2_VP
, qw2w2_prev
, 0);
165 memcpy(&tctx
->regs
[TM_QW2_HV_POOL
+ TM_WORD2
], &qw2w2
, 4);
169 static void xive_tm_vt_push(XiveTCTX
*tctx
, hwaddr offset
,
170 uint64_t value
, unsigned size
)
172 tctx
->regs
[TM_QW3_HV_PHYS
+ TM_WORD2
] = value
& 0xff;
175 static uint64_t xive_tm_vt_poll(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
)
177 return tctx
->regs
[TM_QW3_HV_PHYS
+ TM_WORD2
] & 0xff;
181 * Define an access map for each page of the TIMA that we will use in
182 * the memory region ops to filter values when doing loads and stores
183 * of raw registers values
185 * Registers accessibility bits :
193 static const uint8_t xive_tm_hw_view
[] = {
194 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
195 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
196 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
197 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
200 static const uint8_t xive_tm_hv_view
[] = {
201 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
202 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
203 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
204 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
207 static const uint8_t xive_tm_os_view
[] = {
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
209 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
210 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
214 static const uint8_t xive_tm_user_view
[] = {
215 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
222 * Overall TIMA access map for the thread interrupt management context
225 static const uint8_t *xive_tm_views
[] = {
226 [XIVE_TM_HW_PAGE
] = xive_tm_hw_view
,
227 [XIVE_TM_HV_PAGE
] = xive_tm_hv_view
,
228 [XIVE_TM_OS_PAGE
] = xive_tm_os_view
,
229 [XIVE_TM_USER_PAGE
] = xive_tm_user_view
,
233 * Computes a register access mask for a given offset in the TIMA
235 static uint64_t xive_tm_mask(hwaddr offset
, unsigned size
, bool write
)
237 uint8_t page_offset
= (offset
>> TM_SHIFT
) & 0x3;
238 uint8_t reg_offset
= offset
& 0x3F;
239 uint8_t reg_mask
= write
? 0x1 : 0x2;
243 for (i
= 0; i
< size
; i
++) {
244 if (xive_tm_views
[page_offset
][reg_offset
+ i
] & reg_mask
) {
245 mask
|= (uint64_t) 0xff << (8 * (size
- i
- 1));
252 static void xive_tm_raw_write(XiveTCTX
*tctx
, hwaddr offset
, uint64_t value
,
255 uint8_t ring_offset
= offset
& 0x30;
256 uint8_t reg_offset
= offset
& 0x3F;
257 uint64_t mask
= xive_tm_mask(offset
, size
, true);
261 * Only 4 or 8 bytes stores are allowed and the User ring is
264 if (size
< 4 || !mask
|| ring_offset
== TM_QW0_USER
) {
265 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid write access at TIMA @%"
266 HWADDR_PRIx
"\n", offset
);
271 * Use the register offset for the raw values and filter out
274 for (i
= 0; i
< size
; i
++) {
275 uint8_t byte_mask
= (mask
>> (8 * (size
- i
- 1)));
277 tctx
->regs
[reg_offset
+ i
] = (value
>> (8 * (size
- i
- 1))) &
283 static uint64_t xive_tm_raw_read(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
)
285 uint8_t ring_offset
= offset
& 0x30;
286 uint8_t reg_offset
= offset
& 0x3F;
287 uint64_t mask
= xive_tm_mask(offset
, size
, false);
292 * Only 4 or 8 bytes loads are allowed and the User ring is
295 if (size
< 4 || !mask
|| ring_offset
== TM_QW0_USER
) {
296 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid read access at TIMA @%"
297 HWADDR_PRIx
"\n", offset
);
301 /* Use the register offset for the raw values */
303 for (i
= 0; i
< size
; i
++) {
304 ret
|= (uint64_t) tctx
->regs
[reg_offset
+ i
] << (8 * (size
- i
- 1));
307 /* filter out reserved values */
312 * The TM context is mapped twice within each page. Stores and loads
313 * to the first mapping below 2K write and read the specified values
314 * without modification. The second mapping above 2K performs specific
315 * state changes (side effects) in addition to setting/returning the
316 * interrupt management area context of the processor thread.
318 static uint64_t xive_tm_ack_os_reg(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
)
320 return xive_tctx_accept(tctx
, TM_QW1_OS
);
323 static void xive_tm_set_os_cppr(XiveTCTX
*tctx
, hwaddr offset
,
324 uint64_t value
, unsigned size
)
326 xive_tctx_set_cppr(tctx
, TM_QW1_OS
, value
& 0xff);
330 * Adjust the IPB to allow a CPU to process event queues of other
331 * priorities during one physical interrupt cycle.
333 static void xive_tm_set_os_pending(XiveTCTX
*tctx
, hwaddr offset
,
334 uint64_t value
, unsigned size
)
336 ipb_update(&tctx
->regs
[TM_QW1_OS
], value
& 0xff);
337 xive_tctx_notify(tctx
, TM_QW1_OS
);
340 static uint64_t xive_tm_pull_os_ctx(XiveTCTX
*tctx
, hwaddr offset
,
343 uint32_t qw1w2_prev
= xive_tctx_word2(&tctx
->regs
[TM_QW1_OS
]);
346 qw1w2
= xive_set_field32(TM_QW1W2_VO
, qw1w2_prev
, 0);
347 memcpy(&tctx
->regs
[TM_QW1_OS
+ TM_WORD2
], &qw1w2
, 4);
352 * Define a mapping of "special" operations depending on the TIMA page
353 * offset and the size of the operation.
355 typedef struct XiveTmOp
{
359 void (*write_handler
)(XiveTCTX
*tctx
, hwaddr offset
, uint64_t value
,
361 uint64_t (*read_handler
)(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
);
364 static const XiveTmOp xive_tm_operations
[] = {
366 * MMIOs below 2K : raw values and special operations without side
369 { XIVE_TM_OS_PAGE
, TM_QW1_OS
+ TM_CPPR
, 1, xive_tm_set_os_cppr
, NULL
},
370 { XIVE_TM_HV_PAGE
, TM_QW3_HV_PHYS
+ TM_CPPR
, 1, xive_tm_set_hv_cppr
, NULL
},
371 { XIVE_TM_HV_PAGE
, TM_QW3_HV_PHYS
+ TM_WORD2
, 1, xive_tm_vt_push
, NULL
},
372 { XIVE_TM_HV_PAGE
, TM_QW3_HV_PHYS
+ TM_WORD2
, 1, NULL
, xive_tm_vt_poll
},
374 /* MMIOs above 2K : special operations with side effects */
375 { XIVE_TM_OS_PAGE
, TM_SPC_ACK_OS_REG
, 2, NULL
, xive_tm_ack_os_reg
},
376 { XIVE_TM_OS_PAGE
, TM_SPC_SET_OS_PENDING
, 1, xive_tm_set_os_pending
, NULL
},
377 { XIVE_TM_HV_PAGE
, TM_SPC_PULL_OS_CTX
, 4, NULL
, xive_tm_pull_os_ctx
},
378 { XIVE_TM_HV_PAGE
, TM_SPC_PULL_OS_CTX
, 8, NULL
, xive_tm_pull_os_ctx
},
379 { XIVE_TM_HV_PAGE
, TM_SPC_ACK_HV_REG
, 2, NULL
, xive_tm_ack_hv_reg
},
380 { XIVE_TM_HV_PAGE
, TM_SPC_PULL_POOL_CTX
, 4, NULL
, xive_tm_pull_pool_ctx
},
381 { XIVE_TM_HV_PAGE
, TM_SPC_PULL_POOL_CTX
, 8, NULL
, xive_tm_pull_pool_ctx
},
384 static const XiveTmOp
*xive_tm_find_op(hwaddr offset
, unsigned size
, bool write
)
386 uint8_t page_offset
= (offset
>> TM_SHIFT
) & 0x3;
387 uint32_t op_offset
= offset
& 0xFFF;
390 for (i
= 0; i
< ARRAY_SIZE(xive_tm_operations
); i
++) {
391 const XiveTmOp
*xto
= &xive_tm_operations
[i
];
393 /* Accesses done from a more privileged TIMA page is allowed */
394 if (xto
->page_offset
>= page_offset
&&
395 xto
->op_offset
== op_offset
&&
397 ((write
&& xto
->write_handler
) || (!write
&& xto
->read_handler
))) {
407 void xive_tctx_tm_write(XiveTCTX
*tctx
, hwaddr offset
, uint64_t value
,
413 * TODO: check V bit in Q[0-3]W2
417 * First, check for special operations in the 2K region
419 if (offset
& 0x800) {
420 xto
= xive_tm_find_op(offset
, size
, true);
422 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid write access at TIMA "
423 "@%"HWADDR_PRIx
"\n", offset
);
425 xto
->write_handler(tctx
, offset
, value
, size
);
431 * Then, for special operations in the region below 2K.
433 xto
= xive_tm_find_op(offset
, size
, true);
435 xto
->write_handler(tctx
, offset
, value
, size
);
440 * Finish with raw access to the register values
442 xive_tm_raw_write(tctx
, offset
, value
, size
);
445 uint64_t xive_tctx_tm_read(XiveTCTX
*tctx
, hwaddr offset
, unsigned size
)
450 * TODO: check V bit in Q[0-3]W2
454 * First, check for special operations in the 2K region
456 if (offset
& 0x800) {
457 xto
= xive_tm_find_op(offset
, size
, false);
459 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid read access to TIMA"
460 "@%"HWADDR_PRIx
"\n", offset
);
463 return xto
->read_handler(tctx
, offset
, size
);
467 * Then, for special operations in the region below 2K.
469 xto
= xive_tm_find_op(offset
, size
, false);
471 return xto
->read_handler(tctx
, offset
, size
);
475 * Finish with raw access to the register values
477 return xive_tm_raw_read(tctx
, offset
, size
);
480 static void xive_tm_write(void *opaque
, hwaddr offset
,
481 uint64_t value
, unsigned size
)
483 XiveTCTX
*tctx
= xive_router_get_tctx(XIVE_ROUTER(opaque
), current_cpu
);
485 xive_tctx_tm_write(tctx
, offset
, value
, size
);
488 static uint64_t xive_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
490 XiveTCTX
*tctx
= xive_router_get_tctx(XIVE_ROUTER(opaque
), current_cpu
);
492 return xive_tctx_tm_read(tctx
, offset
, size
);
495 const MemoryRegionOps xive_tm_ops
= {
496 .read
= xive_tm_read
,
497 .write
= xive_tm_write
,
498 .endianness
= DEVICE_BIG_ENDIAN
,
500 .min_access_size
= 1,
501 .max_access_size
= 8,
504 .min_access_size
= 1,
505 .max_access_size
= 8,
509 static char *xive_tctx_ring_print(uint8_t *ring
)
511 uint32_t w2
= xive_tctx_word2(ring
);
513 return g_strdup_printf("%02x %02x %02x %02x %02x "
514 "%02x %02x %02x %08x",
515 ring
[TM_NSR
], ring
[TM_CPPR
], ring
[TM_IPB
], ring
[TM_LSMFB
],
516 ring
[TM_ACK_CNT
], ring
[TM_INC
], ring
[TM_AGE
], ring
[TM_PIPR
],
520 static const char * const xive_tctx_ring_names
[] = {
521 "USER", "OS", "POOL", "PHYS",
524 void xive_tctx_pic_print_info(XiveTCTX
*tctx
, Monitor
*mon
)
529 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
530 * are hot plugged or unplugged.
536 cpu_index
= tctx
->cs
? tctx
->cs
->cpu_index
: -1;
538 if (kvm_irqchip_in_kernel()) {
539 Error
*local_err
= NULL
;
541 kvmppc_xive_cpu_synchronize_state(tctx
, &local_err
);
543 error_report_err(local_err
);
548 monitor_printf(mon
, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
551 for (i
= 0; i
< XIVE_TM_RING_COUNT
; i
++) {
552 char *s
= xive_tctx_ring_print(&tctx
->regs
[i
* XIVE_TM_RING_SIZE
]);
553 monitor_printf(mon
, "CPU[%04x]: %4s %s\n", cpu_index
,
554 xive_tctx_ring_names
[i
], s
);
559 void xive_tctx_reset(XiveTCTX
*tctx
)
561 memset(tctx
->regs
, 0, sizeof(tctx
->regs
));
563 /* Set some defaults */
564 tctx
->regs
[TM_QW1_OS
+ TM_LSMFB
] = 0xFF;
565 tctx
->regs
[TM_QW1_OS
+ TM_ACK_CNT
] = 0xFF;
566 tctx
->regs
[TM_QW1_OS
+ TM_AGE
] = 0xFF;
569 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
572 tctx
->regs
[TM_QW1_OS
+ TM_PIPR
] =
573 ipb_to_pipr(tctx
->regs
[TM_QW1_OS
+ TM_IPB
]);
574 tctx
->regs
[TM_QW3_HV_PHYS
+ TM_PIPR
] =
575 ipb_to_pipr(tctx
->regs
[TM_QW3_HV_PHYS
+ TM_IPB
]);
578 static void xive_tctx_realize(DeviceState
*dev
, Error
**errp
)
580 XiveTCTX
*tctx
= XIVE_TCTX(dev
);
583 Error
*local_err
= NULL
;
587 cpu
= POWERPC_CPU(tctx
->cs
);
589 switch (PPC_INPUT(env
)) {
590 case PPC_FLAGS_INPUT_POWER9
:
591 tctx
->hv_output
= env
->irq_inputs
[POWER9_INPUT_HINT
];
592 tctx
->os_output
= env
->irq_inputs
[POWER9_INPUT_INT
];
596 error_setg(errp
, "XIVE interrupt controller does not support "
597 "this CPU bus model");
601 /* Connect the presenter to the VCPU (required for CPU hotplug) */
602 if (kvm_irqchip_in_kernel()) {
603 kvmppc_xive_cpu_connect(tctx
, &local_err
);
605 error_propagate(errp
, local_err
);
611 static int vmstate_xive_tctx_pre_save(void *opaque
)
613 Error
*local_err
= NULL
;
615 if (kvm_irqchip_in_kernel()) {
616 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque
), &local_err
);
618 error_report_err(local_err
);
626 static int vmstate_xive_tctx_post_load(void *opaque
, int version_id
)
628 Error
*local_err
= NULL
;
630 if (kvm_irqchip_in_kernel()) {
632 * Required for hotplugged CPU, for which the state comes
633 * after all states of the machine.
635 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque
), &local_err
);
637 error_report_err(local_err
);
645 static const VMStateDescription vmstate_xive_tctx
= {
646 .name
= TYPE_XIVE_TCTX
,
648 .minimum_version_id
= 1,
649 .pre_save
= vmstate_xive_tctx_pre_save
,
650 .post_load
= vmstate_xive_tctx_post_load
,
651 .fields
= (VMStateField
[]) {
652 VMSTATE_BUFFER(regs
, XiveTCTX
),
653 VMSTATE_END_OF_LIST()
657 static Property xive_tctx_properties
[] = {
658 DEFINE_PROP_LINK("cpu", XiveTCTX
, cs
, TYPE_CPU
, CPUState
*),
659 DEFINE_PROP_END_OF_LIST(),
662 static void xive_tctx_class_init(ObjectClass
*klass
, void *data
)
664 DeviceClass
*dc
= DEVICE_CLASS(klass
);
666 dc
->desc
= "XIVE Interrupt Thread Context";
667 dc
->realize
= xive_tctx_realize
;
668 dc
->vmsd
= &vmstate_xive_tctx
;
669 dc
->props
= xive_tctx_properties
;
671 * Reason: part of XIVE interrupt controller, needs to be wired up
672 * by xive_tctx_create().
674 dc
->user_creatable
= false;
677 static const TypeInfo xive_tctx_info
= {
678 .name
= TYPE_XIVE_TCTX
,
679 .parent
= TYPE_DEVICE
,
680 .instance_size
= sizeof(XiveTCTX
),
681 .class_init
= xive_tctx_class_init
,
684 Object
*xive_tctx_create(Object
*cpu
, XiveRouter
*xrtr
, Error
**errp
)
686 Error
*local_err
= NULL
;
689 obj
= object_new(TYPE_XIVE_TCTX
);
690 object_property_add_child(cpu
, TYPE_XIVE_TCTX
, obj
, &error_abort
);
692 object_property_set_link(obj
, cpu
, "cpu", &error_abort
);
693 object_property_set_bool(obj
, true, "realized", &local_err
);
701 object_unparent(obj
);
702 error_propagate(errp
, local_err
);
706 void xive_tctx_destroy(XiveTCTX
*tctx
)
708 Object
*obj
= OBJECT(tctx
);
710 object_unparent(obj
);
717 static uint8_t xive_esb_set(uint8_t *pq
, uint8_t value
)
719 uint8_t old_pq
= *pq
& 0x3;
727 static bool xive_esb_trigger(uint8_t *pq
)
729 uint8_t old_pq
= *pq
& 0x3;
733 xive_esb_set(pq
, XIVE_ESB_PENDING
);
735 case XIVE_ESB_PENDING
:
736 case XIVE_ESB_QUEUED
:
737 xive_esb_set(pq
, XIVE_ESB_QUEUED
);
740 xive_esb_set(pq
, XIVE_ESB_OFF
);
743 g_assert_not_reached();
747 static bool xive_esb_eoi(uint8_t *pq
)
749 uint8_t old_pq
= *pq
& 0x3;
753 case XIVE_ESB_PENDING
:
754 xive_esb_set(pq
, XIVE_ESB_RESET
);
756 case XIVE_ESB_QUEUED
:
757 xive_esb_set(pq
, XIVE_ESB_PENDING
);
760 xive_esb_set(pq
, XIVE_ESB_OFF
);
763 g_assert_not_reached();
768 * XIVE Interrupt Source (or IVSE)
771 uint8_t xive_source_esb_get(XiveSource
*xsrc
, uint32_t srcno
)
773 assert(srcno
< xsrc
->nr_irqs
);
775 return xsrc
->status
[srcno
] & 0x3;
778 uint8_t xive_source_esb_set(XiveSource
*xsrc
, uint32_t srcno
, uint8_t pq
)
780 assert(srcno
< xsrc
->nr_irqs
);
782 return xive_esb_set(&xsrc
->status
[srcno
], pq
);
786 * Returns whether the event notification should be forwarded.
788 static bool xive_source_lsi_trigger(XiveSource
*xsrc
, uint32_t srcno
)
790 uint8_t old_pq
= xive_source_esb_get(xsrc
, srcno
);
792 xsrc
->status
[srcno
] |= XIVE_STATUS_ASSERTED
;
796 xive_source_esb_set(xsrc
, srcno
, XIVE_ESB_PENDING
);
804 * Returns whether the event notification should be forwarded.
806 static bool xive_source_esb_trigger(XiveSource
*xsrc
, uint32_t srcno
)
810 assert(srcno
< xsrc
->nr_irqs
);
812 ret
= xive_esb_trigger(&xsrc
->status
[srcno
]);
814 if (xive_source_irq_is_lsi(xsrc
, srcno
) &&
815 xive_source_esb_get(xsrc
, srcno
) == XIVE_ESB_QUEUED
) {
816 qemu_log_mask(LOG_GUEST_ERROR
,
817 "XIVE: queued an event on LSI IRQ %d\n", srcno
);
824 * Returns whether the event notification should be forwarded.
826 static bool xive_source_esb_eoi(XiveSource
*xsrc
, uint32_t srcno
)
830 assert(srcno
< xsrc
->nr_irqs
);
832 ret
= xive_esb_eoi(&xsrc
->status
[srcno
]);
835 * LSI sources do not set the Q bit but they can still be
836 * asserted, in which case we should forward a new event
839 if (xive_source_irq_is_lsi(xsrc
, srcno
) &&
840 xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
841 ret
= xive_source_lsi_trigger(xsrc
, srcno
);
848 * Forward the source event notification to the Router
850 static void xive_source_notify(XiveSource
*xsrc
, int srcno
)
852 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_GET_CLASS(xsrc
->xive
);
855 xnc
->notify(xsrc
->xive
, srcno
);
860 * In a two pages ESB MMIO setting, even page is the trigger page, odd
861 * page is for management
863 static inline bool addr_is_even(hwaddr addr
, uint32_t shift
)
865 return !((addr
>> shift
) & 1);
868 static inline bool xive_source_is_trigger_page(XiveSource
*xsrc
, hwaddr addr
)
870 return xive_source_esb_has_2page(xsrc
) &&
871 addr_is_even(addr
, xsrc
->esb_shift
- 1);
876 * Trigger page Management/EOI page
878 * ESB MMIO setting 2 pages 1 or 2 pages
880 * 0x000 .. 0x3FF -1 EOI and return 0|1
881 * 0x400 .. 0x7FF -1 EOI and return 0|1
882 * 0x800 .. 0xBFF -1 return PQ
883 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
884 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
885 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
886 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
888 static uint64_t xive_source_esb_read(void *opaque
, hwaddr addr
, unsigned size
)
890 XiveSource
*xsrc
= XIVE_SOURCE(opaque
);
891 uint32_t offset
= addr
& 0xFFF;
892 uint32_t srcno
= addr
>> xsrc
->esb_shift
;
895 /* In a two pages ESB MMIO setting, trigger page should not be read */
896 if (xive_source_is_trigger_page(xsrc
, addr
)) {
897 qemu_log_mask(LOG_GUEST_ERROR
,
898 "XIVE: invalid load on IRQ %d trigger page at "
899 "0x%"HWADDR_PRIx
"\n", srcno
, addr
);
904 case XIVE_ESB_LOAD_EOI
... XIVE_ESB_LOAD_EOI
+ 0x7FF:
905 ret
= xive_source_esb_eoi(xsrc
, srcno
);
907 /* Forward the source event notification for routing */
909 xive_source_notify(xsrc
, srcno
);
913 case XIVE_ESB_GET
... XIVE_ESB_GET
+ 0x3FF:
914 ret
= xive_source_esb_get(xsrc
, srcno
);
917 case XIVE_ESB_SET_PQ_00
... XIVE_ESB_SET_PQ_00
+ 0x0FF:
918 case XIVE_ESB_SET_PQ_01
... XIVE_ESB_SET_PQ_01
+ 0x0FF:
919 case XIVE_ESB_SET_PQ_10
... XIVE_ESB_SET_PQ_10
+ 0x0FF:
920 case XIVE_ESB_SET_PQ_11
... XIVE_ESB_SET_PQ_11
+ 0x0FF:
921 ret
= xive_source_esb_set(xsrc
, srcno
, (offset
>> 8) & 0x3);
924 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid ESB load addr %x\n",
933 * Trigger page Management/EOI page
935 * ESB MMIO setting 2 pages 1 or 2 pages
937 * 0x000 .. 0x3FF Trigger Trigger
938 * 0x400 .. 0x7FF Trigger EOI
939 * 0x800 .. 0xBFF Trigger undefined
940 * 0xC00 .. 0xCFF Trigger PQ=00
941 * 0xD00 .. 0xDFF Trigger PQ=01
942 * 0xE00 .. 0xDFF Trigger PQ=10
943 * 0xF00 .. 0xDFF Trigger PQ=11
945 static void xive_source_esb_write(void *opaque
, hwaddr addr
,
946 uint64_t value
, unsigned size
)
948 XiveSource
*xsrc
= XIVE_SOURCE(opaque
);
949 uint32_t offset
= addr
& 0xFFF;
950 uint32_t srcno
= addr
>> xsrc
->esb_shift
;
953 /* In a two pages ESB MMIO setting, trigger page only triggers */
954 if (xive_source_is_trigger_page(xsrc
, addr
)) {
955 notify
= xive_source_esb_trigger(xsrc
, srcno
);
961 notify
= xive_source_esb_trigger(xsrc
, srcno
);
964 case XIVE_ESB_STORE_EOI
... XIVE_ESB_STORE_EOI
+ 0x3FF:
965 if (!(xsrc
->esb_flags
& XIVE_SRC_STORE_EOI
)) {
966 qemu_log_mask(LOG_GUEST_ERROR
,
967 "XIVE: invalid Store EOI for IRQ %d\n", srcno
);
971 notify
= xive_source_esb_eoi(xsrc
, srcno
);
974 case XIVE_ESB_SET_PQ_00
... XIVE_ESB_SET_PQ_00
+ 0x0FF:
975 case XIVE_ESB_SET_PQ_01
... XIVE_ESB_SET_PQ_01
+ 0x0FF:
976 case XIVE_ESB_SET_PQ_10
... XIVE_ESB_SET_PQ_10
+ 0x0FF:
977 case XIVE_ESB_SET_PQ_11
... XIVE_ESB_SET_PQ_11
+ 0x0FF:
978 xive_source_esb_set(xsrc
, srcno
, (offset
>> 8) & 0x3);
982 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid ESB write addr %x\n",
988 /* Forward the source event notification for routing */
990 xive_source_notify(xsrc
, srcno
);
994 static const MemoryRegionOps xive_source_esb_ops
= {
995 .read
= xive_source_esb_read
,
996 .write
= xive_source_esb_write
,
997 .endianness
= DEVICE_BIG_ENDIAN
,
999 .min_access_size
= 8,
1000 .max_access_size
= 8,
1003 .min_access_size
= 8,
1004 .max_access_size
= 8,
1008 void xive_source_set_irq(void *opaque
, int srcno
, int val
)
1010 XiveSource
*xsrc
= XIVE_SOURCE(opaque
);
1011 bool notify
= false;
1013 if (xive_source_irq_is_lsi(xsrc
, srcno
)) {
1015 notify
= xive_source_lsi_trigger(xsrc
, srcno
);
1017 xsrc
->status
[srcno
] &= ~XIVE_STATUS_ASSERTED
;
1021 notify
= xive_source_esb_trigger(xsrc
, srcno
);
1025 /* Forward the source event notification for routing */
1027 xive_source_notify(xsrc
, srcno
);
1031 void xive_source_pic_print_info(XiveSource
*xsrc
, uint32_t offset
, Monitor
*mon
)
1035 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
1036 uint8_t pq
= xive_source_esb_get(xsrc
, i
);
1038 if (pq
== XIVE_ESB_OFF
) {
1042 monitor_printf(mon
, " %08x %s %c%c%c\n", i
+ offset
,
1043 xive_source_irq_is_lsi(xsrc
, i
) ? "LSI" : "MSI",
1044 pq
& XIVE_ESB_VAL_P
? 'P' : '-',
1045 pq
& XIVE_ESB_VAL_Q
? 'Q' : '-',
1046 xsrc
->status
[i
] & XIVE_STATUS_ASSERTED
? 'A' : ' ');
1050 static void xive_source_reset(void *dev
)
1052 XiveSource
*xsrc
= XIVE_SOURCE(dev
);
1054 /* Do not clear the LSI bitmap */
1056 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
1057 memset(xsrc
->status
, XIVE_ESB_OFF
, xsrc
->nr_irqs
);
1060 static void xive_source_realize(DeviceState
*dev
, Error
**errp
)
1062 XiveSource
*xsrc
= XIVE_SOURCE(dev
);
1066 if (!xsrc
->nr_irqs
) {
1067 error_setg(errp
, "Number of interrupt needs to be greater than 0");
1071 if (xsrc
->esb_shift
!= XIVE_ESB_4K
&&
1072 xsrc
->esb_shift
!= XIVE_ESB_4K_2PAGE
&&
1073 xsrc
->esb_shift
!= XIVE_ESB_64K
&&
1074 xsrc
->esb_shift
!= XIVE_ESB_64K_2PAGE
) {
1075 error_setg(errp
, "Invalid ESB shift setting");
1079 xsrc
->status
= g_malloc0(xsrc
->nr_irqs
);
1080 xsrc
->lsi_map
= bitmap_new(xsrc
->nr_irqs
);
1082 if (!kvm_irqchip_in_kernel()) {
1083 memory_region_init_io(&xsrc
->esb_mmio
, OBJECT(xsrc
),
1084 &xive_source_esb_ops
, xsrc
, "xive.esb",
1085 (1ull << xsrc
->esb_shift
) * xsrc
->nr_irqs
);
1088 qemu_register_reset(xive_source_reset
, dev
);
1091 static const VMStateDescription vmstate_xive_source
= {
1092 .name
= TYPE_XIVE_SOURCE
,
1094 .minimum_version_id
= 1,
1095 .fields
= (VMStateField
[]) {
1096 VMSTATE_UINT32_EQUAL(nr_irqs
, XiveSource
, NULL
),
1097 VMSTATE_VBUFFER_UINT32(status
, XiveSource
, 1, NULL
, nr_irqs
),
1098 VMSTATE_END_OF_LIST()
1103 * The default XIVE interrupt source setting for the ESB MMIOs is two
1104 * 64k pages without Store EOI, to be in sync with KVM.
1106 static Property xive_source_properties
[] = {
1107 DEFINE_PROP_UINT64("flags", XiveSource
, esb_flags
, 0),
1108 DEFINE_PROP_UINT32("nr-irqs", XiveSource
, nr_irqs
, 0),
1109 DEFINE_PROP_UINT32("shift", XiveSource
, esb_shift
, XIVE_ESB_64K_2PAGE
),
1110 DEFINE_PROP_LINK("xive", XiveSource
, xive
, TYPE_XIVE_NOTIFIER
,
1112 DEFINE_PROP_END_OF_LIST(),
1115 static void xive_source_class_init(ObjectClass
*klass
, void *data
)
1117 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1119 dc
->desc
= "XIVE Interrupt Source";
1120 dc
->props
= xive_source_properties
;
1121 dc
->realize
= xive_source_realize
;
1122 dc
->vmsd
= &vmstate_xive_source
;
1124 * Reason: part of XIVE interrupt controller, needs to be wired up,
1125 * e.g. by spapr_xive_instance_init().
1127 dc
->user_creatable
= false;
1130 static const TypeInfo xive_source_info
= {
1131 .name
= TYPE_XIVE_SOURCE
,
1132 .parent
= TYPE_DEVICE
,
1133 .instance_size
= sizeof(XiveSource
),
1134 .class_init
= xive_source_class_init
,
1141 void xive_end_queue_pic_print_info(XiveEND
*end
, uint32_t width
, Monitor
*mon
)
1143 uint64_t qaddr_base
= xive_end_qaddr(end
);
1144 uint32_t qsize
= xive_get_field32(END_W0_QSIZE
, end
->w0
);
1145 uint32_t qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
1146 uint32_t qentries
= 1 << (qsize
+ 10);
1150 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1152 monitor_printf(mon
, " [ ");
1153 qindex
= (qindex
- (width
- 1)) & (qentries
- 1);
1154 for (i
= 0; i
< width
; i
++) {
1155 uint64_t qaddr
= qaddr_base
+ (qindex
<< 2);
1156 uint32_t qdata
= -1;
1158 if (dma_memory_read(&address_space_memory
, qaddr
, &qdata
,
1160 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: failed to read EQ @0x%"
1161 HWADDR_PRIx
"\n", qaddr
);
1164 monitor_printf(mon
, "%s%08x ", i
== width
- 1 ? "^" : "",
1165 be32_to_cpu(qdata
));
1166 qindex
= (qindex
+ 1) & (qentries
- 1);
1168 monitor_printf(mon
, "]");
1171 void xive_end_pic_print_info(XiveEND
*end
, uint32_t end_idx
, Monitor
*mon
)
1173 uint64_t qaddr_base
= xive_end_qaddr(end
);
1174 uint32_t qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
1175 uint32_t qgen
= xive_get_field32(END_W1_GENERATION
, end
->w1
);
1176 uint32_t qsize
= xive_get_field32(END_W0_QSIZE
, end
->w0
);
1177 uint32_t qentries
= 1 << (qsize
+ 10);
1179 uint32_t nvt_blk
= xive_get_field32(END_W6_NVT_BLOCK
, end
->w6
);
1180 uint32_t nvt_idx
= xive_get_field32(END_W6_NVT_INDEX
, end
->w6
);
1181 uint8_t priority
= xive_get_field32(END_W7_F0_PRIORITY
, end
->w7
);
1184 if (!xive_end_is_valid(end
)) {
1188 pq
= xive_get_field32(END_W1_ESn
, end
->w1
);
1190 monitor_printf(mon
, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1192 pq
& XIVE_ESB_VAL_P
? 'P' : '-',
1193 pq
& XIVE_ESB_VAL_Q
? 'Q' : '-',
1194 xive_end_is_valid(end
) ? 'v' : '-',
1195 xive_end_is_enqueue(end
) ? 'q' : '-',
1196 xive_end_is_notify(end
) ? 'n' : '-',
1197 xive_end_is_backlog(end
) ? 'b' : '-',
1198 xive_end_is_escalate(end
) ? 'e' : '-',
1199 xive_end_is_uncond_escalation(end
) ? 'u' : '-',
1200 xive_end_is_silent_escalation(end
) ? 's' : '-',
1201 priority
, nvt_blk
, nvt_idx
);
1204 monitor_printf(mon
, " eq:@%08"PRIx64
"% 6d/%5d ^%d",
1205 qaddr_base
, qindex
, qentries
, qgen
);
1206 xive_end_queue_pic_print_info(end
, 6, mon
);
1208 monitor_printf(mon
, "\n");
1211 static void xive_end_enqueue(XiveEND
*end
, uint32_t data
)
1213 uint64_t qaddr_base
= xive_end_qaddr(end
);
1214 uint32_t qsize
= xive_get_field32(END_W0_QSIZE
, end
->w0
);
1215 uint32_t qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
1216 uint32_t qgen
= xive_get_field32(END_W1_GENERATION
, end
->w1
);
1218 uint64_t qaddr
= qaddr_base
+ (qindex
<< 2);
1219 uint32_t qdata
= cpu_to_be32((qgen
<< 31) | (data
& 0x7fffffff));
1220 uint32_t qentries
= 1 << (qsize
+ 10);
1222 if (dma_memory_write(&address_space_memory
, qaddr
, &qdata
, sizeof(qdata
))) {
1223 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: failed to write END data @0x%"
1224 HWADDR_PRIx
"\n", qaddr
);
1228 qindex
= (qindex
+ 1) & (qentries
- 1);
1231 end
->w1
= xive_set_field32(END_W1_GENERATION
, end
->w1
, qgen
);
1233 end
->w1
= xive_set_field32(END_W1_PAGE_OFF
, end
->w1
, qindex
);
1236 void xive_end_eas_pic_print_info(XiveEND
*end
, uint32_t end_idx
,
1239 XiveEAS
*eas
= (XiveEAS
*) &end
->w4
;
1242 if (!xive_end_is_escalate(end
)) {
1246 pq
= xive_get_field32(END_W1_ESe
, end
->w1
);
1248 monitor_printf(mon
, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1250 pq
& XIVE_ESB_VAL_P
? 'P' : '-',
1251 pq
& XIVE_ESB_VAL_Q
? 'Q' : '-',
1252 xive_eas_is_valid(eas
) ? 'V' : ' ',
1253 xive_eas_is_masked(eas
) ? 'M' : ' ',
1254 (uint8_t) xive_get_field64(EAS_END_BLOCK
, eas
->w
),
1255 (uint32_t) xive_get_field64(EAS_END_INDEX
, eas
->w
),
1256 (uint32_t) xive_get_field64(EAS_END_DATA
, eas
->w
));
1260 * XIVE Router (aka. Virtualization Controller or IVRE)
1263 int xive_router_get_eas(XiveRouter
*xrtr
, uint8_t eas_blk
, uint32_t eas_idx
,
1266 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1268 return xrc
->get_eas(xrtr
, eas_blk
, eas_idx
, eas
);
1271 int xive_router_get_end(XiveRouter
*xrtr
, uint8_t end_blk
, uint32_t end_idx
,
1274 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1276 return xrc
->get_end(xrtr
, end_blk
, end_idx
, end
);
1279 int xive_router_write_end(XiveRouter
*xrtr
, uint8_t end_blk
, uint32_t end_idx
,
1280 XiveEND
*end
, uint8_t word_number
)
1282 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1284 return xrc
->write_end(xrtr
, end_blk
, end_idx
, end
, word_number
);
1287 int xive_router_get_nvt(XiveRouter
*xrtr
, uint8_t nvt_blk
, uint32_t nvt_idx
,
1290 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1292 return xrc
->get_nvt(xrtr
, nvt_blk
, nvt_idx
, nvt
);
1295 int xive_router_write_nvt(XiveRouter
*xrtr
, uint8_t nvt_blk
, uint32_t nvt_idx
,
1296 XiveNVT
*nvt
, uint8_t word_number
)
1298 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1300 return xrc
->write_nvt(xrtr
, nvt_blk
, nvt_idx
, nvt
, word_number
);
1303 XiveTCTX
*xive_router_get_tctx(XiveRouter
*xrtr
, CPUState
*cs
)
1305 XiveRouterClass
*xrc
= XIVE_ROUTER_GET_CLASS(xrtr
);
1307 return xrc
->get_tctx(xrtr
, cs
);
1311 * Encode the HW CAM line in the block group mode format :
1313 * chip << 19 | 0000000 0 0001 thread (7Bit)
1315 static uint32_t xive_tctx_hw_cam_line(XiveTCTX
*tctx
)
1317 CPUPPCState
*env
= &POWERPC_CPU(tctx
->cs
)->env
;
1318 uint32_t pir
= env
->spr_cb
[SPR_PIR
].default_value
;
1320 return xive_nvt_cam_line((pir
>> 8) & 0xf, 1 << 7 | (pir
& 0x7f));
1324 * The thread context register words are in big-endian format.
1326 static int xive_presenter_tctx_match(XiveTCTX
*tctx
, uint8_t format
,
1327 uint8_t nvt_blk
, uint32_t nvt_idx
,
1328 bool cam_ignore
, uint32_t logic_serv
)
1330 uint32_t cam
= xive_nvt_cam_line(nvt_blk
, nvt_idx
);
1331 uint32_t qw3w2
= xive_tctx_word2(&tctx
->regs
[TM_QW3_HV_PHYS
]);
1332 uint32_t qw2w2
= xive_tctx_word2(&tctx
->regs
[TM_QW2_HV_POOL
]);
1333 uint32_t qw1w2
= xive_tctx_word2(&tctx
->regs
[TM_QW1_OS
]);
1334 uint32_t qw0w2
= xive_tctx_word2(&tctx
->regs
[TM_QW0_USER
]);
1337 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1338 * identifier are ignored in the "CAM" match.
1342 if (cam_ignore
== true) {
1344 * F=0 & i=1: Logical server notification (bits ignored at
1345 * the end of the NVT identifier)
1347 qemu_log_mask(LOG_UNIMP
, "XIVE: no support for LS NVT %x/%x\n",
1352 /* F=0 & i=0: Specific NVT notification */
1355 if ((be32_to_cpu(qw3w2
) & TM_QW3W2_VT
) &&
1356 cam
== xive_tctx_hw_cam_line(tctx
)) {
1357 return TM_QW3_HV_PHYS
;
1361 if ((be32_to_cpu(qw2w2
) & TM_QW2W2_VP
) &&
1362 cam
== xive_get_field32(TM_QW2W2_POOL_CAM
, qw2w2
)) {
1363 return TM_QW2_HV_POOL
;
1367 if ((be32_to_cpu(qw1w2
) & TM_QW1W2_VO
) &&
1368 cam
== xive_get_field32(TM_QW1W2_OS_CAM
, qw1w2
)) {
1372 /* F=1 : User level Event-Based Branch (EBB) notification */
1375 if ((be32_to_cpu(qw1w2
) & TM_QW1W2_VO
) &&
1376 (cam
== xive_get_field32(TM_QW1W2_OS_CAM
, qw1w2
)) &&
1377 (be32_to_cpu(qw0w2
) & TM_QW0W2_VU
) &&
1378 (logic_serv
== xive_get_field32(TM_QW0W2_LOGIC_SERV
, qw0w2
))) {
1385 typedef struct XiveTCTXMatch
{
1390 static bool xive_presenter_match(XiveRouter
*xrtr
, uint8_t format
,
1391 uint8_t nvt_blk
, uint32_t nvt_idx
,
1392 bool cam_ignore
, uint8_t priority
,
1393 uint32_t logic_serv
, XiveTCTXMatch
*match
)
1398 * TODO (PowerNV): handle chip_id overwrite of block field for
1399 * hardwired CAM compares
1403 XiveTCTX
*tctx
= xive_router_get_tctx(xrtr
, cs
);
1407 * Skip partially initialized vCPUs. This can happen when
1408 * vCPUs are hotplugged.
1415 * HW checks that the CPU is enabled in the Physical Thread
1416 * Enable Register (PTER).
1420 * Check the thread context CAM lines and record matches. We
1421 * will handle CPU exception delivery later
1423 ring
= xive_presenter_tctx_match(tctx
, format
, nvt_blk
, nvt_idx
,
1424 cam_ignore
, logic_serv
);
1426 * Save the context and follow on to catch duplicates, that we
1427 * don't support yet.
1431 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a thread "
1432 "context NVT %x/%x\n", nvt_blk
, nvt_idx
);
1442 qemu_log_mask(LOG_UNIMP
, "XIVE: NVT %x/%x is not dispatched\n",
1451 * This is our simple Xive Presenter Engine model. It is merged in the
1452 * Router as it does not require an extra object.
1454 * It receives notification requests sent by the IVRE to find one
1455 * matching NVT (or more) dispatched on the processor threads. In case
1456 * of a single NVT notification, the process is abreviated and the
1457 * thread is signaled if a match is found. In case of a logical server
1458 * notification (bits ignored at the end of the NVT identifier), the
1459 * IVPE and IVRE select a winning thread using different filters. This
1460 * involves 2 or 3 exchanges on the PowerBus that the model does not
1463 * The parameters represent what is sent on the PowerBus
1465 static bool xive_presenter_notify(XiveRouter
*xrtr
, uint8_t format
,
1466 uint8_t nvt_blk
, uint32_t nvt_idx
,
1467 bool cam_ignore
, uint8_t priority
,
1468 uint32_t logic_serv
)
1470 XiveTCTXMatch match
= { .tctx
= NULL
, .ring
= 0 };
1473 found
= xive_presenter_match(xrtr
, format
, nvt_blk
, nvt_idx
, cam_ignore
,
1474 priority
, logic_serv
, &match
);
1476 ipb_update(&match
.tctx
->regs
[match
.ring
], priority
);
1477 xive_tctx_notify(match
.tctx
, match
.ring
);
1484 * Notification using the END ESe/ESn bit (Event State Buffer for
1485 * escalation and notification). Profide futher coalescing in the
1488 static bool xive_router_end_es_notify(XiveRouter
*xrtr
, uint8_t end_blk
,
1489 uint32_t end_idx
, XiveEND
*end
,
1490 uint32_t end_esmask
)
1492 uint8_t pq
= xive_get_field32(end_esmask
, end
->w1
);
1493 bool notify
= xive_esb_trigger(&pq
);
1495 if (pq
!= xive_get_field32(end_esmask
, end
->w1
)) {
1496 end
->w1
= xive_set_field32(end_esmask
, end
->w1
, pq
);
1497 xive_router_write_end(xrtr
, end_blk
, end_idx
, end
, 1);
1500 /* ESe/n[Q]=1 : end of notification */
1505 * An END trigger can come from an event trigger (IPI or HW) or from
1506 * another chip. We don't model the PowerBus but the END trigger
1507 * message has the same parameters than in the function below.
1509 static void xive_router_end_notify(XiveRouter
*xrtr
, uint8_t end_blk
,
1510 uint32_t end_idx
, uint32_t end_data
)
1520 /* END cache lookup */
1521 if (xive_router_get_end(xrtr
, end_blk
, end_idx
, &end
)) {
1522 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No END %x/%x\n", end_blk
,
1527 if (!xive_end_is_valid(&end
)) {
1528 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: END %x/%x is invalid\n",
1533 if (xive_end_is_enqueue(&end
)) {
1534 xive_end_enqueue(&end
, end_data
);
1535 /* Enqueuing event data modifies the EQ toggle and index */
1536 xive_router_write_end(xrtr
, end_blk
, end_idx
, &end
, 1);
1540 * When the END is silent, we skip the notification part.
1542 if (xive_end_is_silent_escalation(&end
)) {
1547 * The W7 format depends on the F bit in W6. It defines the type
1548 * of the notification :
1550 * F=0 : single or multiple NVT notification
1551 * F=1 : User level Event-Based Branch (EBB) notification, no
1554 format
= xive_get_field32(END_W6_FORMAT_BIT
, end
.w6
);
1555 priority
= xive_get_field32(END_W7_F0_PRIORITY
, end
.w7
);
1557 /* The END is masked */
1558 if (format
== 0 && priority
== 0xff) {
1563 * Check the END ESn (Event State Buffer for notification) for
1564 * even futher coalescing in the Router
1566 if (!xive_end_is_notify(&end
)) {
1567 /* ESn[Q]=1 : end of notification */
1568 if (!xive_router_end_es_notify(xrtr
, end_blk
, end_idx
,
1569 &end
, END_W1_ESn
)) {
1575 * Follows IVPE notification
1577 nvt_blk
= xive_get_field32(END_W6_NVT_BLOCK
, end
.w6
);
1578 nvt_idx
= xive_get_field32(END_W6_NVT_INDEX
, end
.w6
);
1580 /* NVT cache lookup */
1581 if (xive_router_get_nvt(xrtr
, nvt_blk
, nvt_idx
, &nvt
)) {
1582 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: no NVT %x/%x\n",
1587 if (!xive_nvt_is_valid(&nvt
)) {
1588 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVT %x/%x is invalid\n",
1593 found
= xive_presenter_notify(xrtr
, format
, nvt_blk
, nvt_idx
,
1594 xive_get_field32(END_W7_F0_IGNORE
, end
.w7
),
1596 xive_get_field32(END_W7_F1_LOG_SERVER_ID
, end
.w7
));
1598 /* TODO: Auto EOI. */
1605 * If no matching NVT is dispatched on a HW thread :
1606 * - specific VP: update the NVT structure if backlog is activated
1607 * - logical server : forward request to IVPE (not supported)
1609 if (xive_end_is_backlog(&end
)) {
1611 qemu_log_mask(LOG_GUEST_ERROR
,
1612 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1616 /* Record the IPB in the associated NVT structure */
1617 ipb_update((uint8_t *) &nvt
.w4
, priority
);
1618 xive_router_write_nvt(xrtr
, nvt_blk
, nvt_idx
, &nvt
, 4);
1621 * On HW, follows a "Broadcast Backlog" to IVPEs
1627 * If activated, escalate notification using the ESe PQ bits and
1630 if (!xive_end_is_escalate(&end
)) {
1635 * Check the END ESe (Event State Buffer for escalation) for even
1636 * futher coalescing in the Router
1638 if (!xive_end_is_uncond_escalation(&end
)) {
1639 /* ESe[Q]=1 : end of notification */
1640 if (!xive_router_end_es_notify(xrtr
, end_blk
, end_idx
,
1641 &end
, END_W1_ESe
)) {
1647 * The END trigger becomes an Escalation trigger
1649 xive_router_end_notify(xrtr
,
1650 xive_get_field32(END_W4_ESC_END_BLOCK
, end
.w4
),
1651 xive_get_field32(END_W4_ESC_END_INDEX
, end
.w4
),
1652 xive_get_field32(END_W5_ESC_END_DATA
, end
.w5
));
1655 void xive_router_notify(XiveNotifier
*xn
, uint32_t lisn
)
1657 XiveRouter
*xrtr
= XIVE_ROUTER(xn
);
1658 uint8_t eas_blk
= XIVE_EAS_BLOCK(lisn
);
1659 uint32_t eas_idx
= XIVE_EAS_INDEX(lisn
);
1662 /* EAS cache lookup */
1663 if (xive_router_get_eas(xrtr
, eas_blk
, eas_idx
, &eas
)) {
1664 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: Unknown LISN %x\n", lisn
);
1669 * The IVRE checks the State Bit Cache at this point. We skip the
1670 * SBC lookup because the state bits of the sources are modeled
1671 * internally in QEMU.
1674 if (!xive_eas_is_valid(&eas
)) {
1675 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid LISN %x\n", lisn
);
1679 if (xive_eas_is_masked(&eas
)) {
1680 /* Notification completed */
1685 * The event trigger becomes an END trigger
1687 xive_router_end_notify(xrtr
,
1688 xive_get_field64(EAS_END_BLOCK
, eas
.w
),
1689 xive_get_field64(EAS_END_INDEX
, eas
.w
),
1690 xive_get_field64(EAS_END_DATA
, eas
.w
));
1693 static void xive_router_class_init(ObjectClass
*klass
, void *data
)
1695 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1696 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1698 dc
->desc
= "XIVE Router Engine";
1699 xnc
->notify
= xive_router_notify
;
1702 static const TypeInfo xive_router_info
= {
1703 .name
= TYPE_XIVE_ROUTER
,
1704 .parent
= TYPE_SYS_BUS_DEVICE
,
1706 .class_size
= sizeof(XiveRouterClass
),
1707 .class_init
= xive_router_class_init
,
1708 .interfaces
= (InterfaceInfo
[]) {
1709 { TYPE_XIVE_NOTIFIER
},
1714 void xive_eas_pic_print_info(XiveEAS
*eas
, uint32_t lisn
, Monitor
*mon
)
1716 if (!xive_eas_is_valid(eas
)) {
1720 monitor_printf(mon
, " %08x %s end:%02x/%04x data:%08x\n",
1721 lisn
, xive_eas_is_masked(eas
) ? "M" : " ",
1722 (uint8_t) xive_get_field64(EAS_END_BLOCK
, eas
->w
),
1723 (uint32_t) xive_get_field64(EAS_END_INDEX
, eas
->w
),
1724 (uint32_t) xive_get_field64(EAS_END_DATA
, eas
->w
));
1728 * END ESB MMIO loads
1730 static uint64_t xive_end_source_read(void *opaque
, hwaddr addr
, unsigned size
)
1732 XiveENDSource
*xsrc
= XIVE_END_SOURCE(opaque
);
1733 uint32_t offset
= addr
& 0xFFF;
1737 uint32_t end_esmask
;
1741 end_blk
= xsrc
->block_id
;
1742 end_idx
= addr
>> (xsrc
->esb_shift
+ 1);
1744 if (xive_router_get_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
)) {
1745 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: No END %x/%x\n", end_blk
,
1750 if (!xive_end_is_valid(&end
)) {
1751 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: END %x/%x is invalid\n",
1756 end_esmask
= addr_is_even(addr
, xsrc
->esb_shift
) ? END_W1_ESn
: END_W1_ESe
;
1757 pq
= xive_get_field32(end_esmask
, end
.w1
);
1760 case XIVE_ESB_LOAD_EOI
... XIVE_ESB_LOAD_EOI
+ 0x7FF:
1761 ret
= xive_esb_eoi(&pq
);
1763 /* Forward the source event notification for routing ?? */
1766 case XIVE_ESB_GET
... XIVE_ESB_GET
+ 0x3FF:
1770 case XIVE_ESB_SET_PQ_00
... XIVE_ESB_SET_PQ_00
+ 0x0FF:
1771 case XIVE_ESB_SET_PQ_01
... XIVE_ESB_SET_PQ_01
+ 0x0FF:
1772 case XIVE_ESB_SET_PQ_10
... XIVE_ESB_SET_PQ_10
+ 0x0FF:
1773 case XIVE_ESB_SET_PQ_11
... XIVE_ESB_SET_PQ_11
+ 0x0FF:
1774 ret
= xive_esb_set(&pq
, (offset
>> 8) & 0x3);
1777 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid END ESB load addr %d\n",
1782 if (pq
!= xive_get_field32(end_esmask
, end
.w1
)) {
1783 end
.w1
= xive_set_field32(end_esmask
, end
.w1
, pq
);
1784 xive_router_write_end(xsrc
->xrtr
, end_blk
, end_idx
, &end
, 1);
1791 * END ESB MMIO stores are invalid
1793 static void xive_end_source_write(void *opaque
, hwaddr addr
,
1794 uint64_t value
, unsigned size
)
1796 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: invalid ESB write addr 0x%"
1797 HWADDR_PRIx
"\n", addr
);
1800 static const MemoryRegionOps xive_end_source_ops
= {
1801 .read
= xive_end_source_read
,
1802 .write
= xive_end_source_write
,
1803 .endianness
= DEVICE_BIG_ENDIAN
,
1805 .min_access_size
= 8,
1806 .max_access_size
= 8,
1809 .min_access_size
= 8,
1810 .max_access_size
= 8,
1814 static void xive_end_source_realize(DeviceState
*dev
, Error
**errp
)
1816 XiveENDSource
*xsrc
= XIVE_END_SOURCE(dev
);
1818 Error
*local_err
= NULL
;
1820 obj
= object_property_get_link(OBJECT(dev
), "xive", &local_err
);
1822 error_propagate(errp
, local_err
);
1823 error_prepend(errp
, "required link 'xive' not found: ");
1827 xsrc
->xrtr
= XIVE_ROUTER(obj
);
1829 if (!xsrc
->nr_ends
) {
1830 error_setg(errp
, "Number of interrupt needs to be greater than 0");
1834 if (xsrc
->esb_shift
!= XIVE_ESB_4K
&&
1835 xsrc
->esb_shift
!= XIVE_ESB_64K
) {
1836 error_setg(errp
, "Invalid ESB shift setting");
1841 * Each END is assigned an even/odd pair of MMIO pages, the even page
1842 * manages the ESn field while the odd page manages the ESe field.
1844 memory_region_init_io(&xsrc
->esb_mmio
, OBJECT(xsrc
),
1845 &xive_end_source_ops
, xsrc
, "xive.end",
1846 (1ull << (xsrc
->esb_shift
+ 1)) * xsrc
->nr_ends
);
1849 static Property xive_end_source_properties
[] = {
1850 DEFINE_PROP_UINT8("block-id", XiveENDSource
, block_id
, 0),
1851 DEFINE_PROP_UINT32("nr-ends", XiveENDSource
, nr_ends
, 0),
1852 DEFINE_PROP_UINT32("shift", XiveENDSource
, esb_shift
, XIVE_ESB_64K
),
1853 DEFINE_PROP_END_OF_LIST(),
1856 static void xive_end_source_class_init(ObjectClass
*klass
, void *data
)
1858 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1860 dc
->desc
= "XIVE END Source";
1861 dc
->props
= xive_end_source_properties
;
1862 dc
->realize
= xive_end_source_realize
;
1864 * Reason: part of XIVE interrupt controller, needs to be wired up,
1865 * e.g. by spapr_xive_instance_init().
1867 dc
->user_creatable
= false;
1870 static const TypeInfo xive_end_source_info
= {
1871 .name
= TYPE_XIVE_END_SOURCE
,
1872 .parent
= TYPE_DEVICE
,
1873 .instance_size
= sizeof(XiveENDSource
),
1874 .class_init
= xive_end_source_class_init
,
1880 static const TypeInfo xive_notifier_info
= {
1881 .name
= TYPE_XIVE_NOTIFIER
,
1882 .parent
= TYPE_INTERFACE
,
1883 .class_size
= sizeof(XiveNotifierClass
),
1886 static void xive_register_types(void)
1888 type_register_static(&xive_source_info
);
1889 type_register_static(&xive_notifier_info
);
1890 type_register_static(&xive_router_info
);
1891 type_register_static(&xive_end_source_info
);
1892 type_register_static(&xive_tctx_info
);
1895 type_init(xive_register_types
)