2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "monitor/monitor.h"
18 #include "hw/ppc/fdt.h"
19 #include "hw/ppc/pnv.h"
20 #include "hw/ppc/pnv_core.h"
21 #include "hw/ppc/pnv_xscom.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/ppc.h"
28 #include "pnv_xive_regs.h"
33 * Virtual structures table (VST)
35 #define SBE_PER_BYTE 4
37 typedef struct XiveVstInfo
{
43 static const XiveVstInfo vst_infos
[] = {
44 [VST_TSEL_IVT
] = { "EAT", sizeof(XiveEAS
), 16 },
45 [VST_TSEL_SBE
] = { "SBE", 1, 16 },
46 [VST_TSEL_EQDT
] = { "ENDT", sizeof(XiveEND
), 16 },
47 [VST_TSEL_VPDT
] = { "VPDT", sizeof(XiveNVT
), 32 },
50 * Interrupt fifo backing store table (not modeled) :
55 * 3 - Second escalate,
57 * 5 - IPI cascaded queue ?
59 [VST_TSEL_IRQ
] = { "IRQ", 1, 6 },
62 #define xive_error(xive, fmt, ...) \
63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
64 (xive)->chip->chip_id, ## __VA_ARGS__);
67 * QEMU version of the GETFIELD/SETFIELD macros
69 * TODO: It might be better to use the existing extract64() and
70 * deposit64() but this means that all the register definitions will
71 * change and become incompatible with the ones found in skiboot.
73 * Keep it as it is for now until we find a common ground.
75 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
77 return (word
& mask
) >> ctz64(mask
);
80 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
83 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
87 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
88 * of the chips is good enough.
90 * TODO: Block scope support
92 static PnvXive
*pnv_xive_get_ic(uint8_t blk
)
94 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
97 for (i
= 0; i
< pnv
->num_chips
; i
++) {
98 Pnv9Chip
*chip9
= PNV9_CHIP(pnv
->chips
[i
]);
99 PnvXive
*xive
= &chip9
->xive
;
101 if (xive
->chip
->chip_id
== blk
) {
109 * VST accessors for SBE, EAT, ENDT, NVT
111 * Indirect VST tables are arrays of VSDs pointing to a page (of same
112 * size). Each page is a direct VST table.
115 #define XIVE_VSD_SIZE 8
117 /* Indirect page size can be 4K, 64K, 2M, 16M. */
118 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift
)
120 return page_shift
== 12 || page_shift
== 16 ||
121 page_shift
== 21 || page_shift
== 24;
124 static uint64_t pnv_xive_vst_size(uint64_t vsd
)
126 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
129 * Read the first descriptor to get the page size of the indirect
132 if (VSD_INDIRECT
& vsd
) {
133 uint32_t nr_pages
= vst_tsize
/ XIVE_VSD_SIZE
;
136 vsd
= ldq_be_dma(&address_space_memory
, vsd
& VSD_ADDRESS_MASK
);
137 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
139 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
143 return nr_pages
* (1ull << page_shift
);
149 static uint64_t pnv_xive_vst_addr_direct(PnvXive
*xive
, uint32_t type
,
150 uint64_t vsd
, uint32_t idx
)
152 const XiveVstInfo
*info
= &vst_infos
[type
];
153 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
155 return vst_addr
+ idx
* info
->size
;
158 static uint64_t pnv_xive_vst_addr_indirect(PnvXive
*xive
, uint32_t type
,
159 uint64_t vsd
, uint32_t idx
)
161 const XiveVstInfo
*info
= &vst_infos
[type
];
165 uint32_t vst_per_page
;
167 /* Get the page size of the indirect table. */
168 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
169 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
171 if (!(vsd
& VSD_ADDRESS_MASK
)) {
172 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
176 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
178 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
179 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
184 vst_per_page
= (1ull << page_shift
) / info
->size
;
185 vsd_idx
= idx
/ vst_per_page
;
187 /* Load the VSD we are looking for, if not already done */
189 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
190 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
192 if (!(vsd
& VSD_ADDRESS_MASK
)) {
193 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
198 * Check that the pages have a consistent size across the
201 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
202 xive_error(xive
, "VST: %s entry %x indirect page size differ !?",
208 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
211 static uint64_t pnv_xive_vst_addr(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
214 const XiveVstInfo
*info
= &vst_infos
[type
];
218 if (blk
>= info
->max_blocks
) {
219 xive_error(xive
, "VST: invalid block id %d for VST %s %d !?",
220 blk
, info
->name
, idx
);
224 vsd
= xive
->vsds
[type
][blk
];
226 /* Remote VST access */
227 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
228 xive
= pnv_xive_get_ic(blk
);
230 return xive
? pnv_xive_vst_addr(xive
, type
, blk
, idx
) : 0;
233 idx_max
= pnv_xive_vst_size(vsd
) / info
->size
- 1;
236 xive_error(xive
, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?",
237 info
->name
, blk
, idx
, idx_max
);
242 if (VSD_INDIRECT
& vsd
) {
243 return pnv_xive_vst_addr_indirect(xive
, type
, vsd
, idx
);
246 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, idx
);
249 static int pnv_xive_vst_read(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
250 uint32_t idx
, void *data
)
252 const XiveVstInfo
*info
= &vst_infos
[type
];
253 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
259 cpu_physical_memory_read(addr
, data
, info
->size
);
263 #define XIVE_VST_WORD_ALL -1
265 static int pnv_xive_vst_write(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
266 uint32_t idx
, void *data
, uint32_t word_number
)
268 const XiveVstInfo
*info
= &vst_infos
[type
];
269 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
275 if (word_number
== XIVE_VST_WORD_ALL
) {
276 cpu_physical_memory_write(addr
, data
, info
->size
);
278 cpu_physical_memory_write(addr
+ word_number
* 4,
279 data
+ word_number
* 4, 4);
284 static int pnv_xive_get_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
287 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
);
290 static int pnv_xive_write_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
291 XiveEND
*end
, uint8_t word_number
)
293 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
,
297 static int pnv_xive_end_update(PnvXive
*xive
)
299 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
300 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
301 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
302 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
304 uint64_t eqc_watch
[4];
306 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
307 eqc_watch
[i
] = cpu_to_be64(xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
]);
310 return pnv_xive_vst_write(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
,
314 static void pnv_xive_end_cache_load(PnvXive
*xive
)
316 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
317 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
318 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
319 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
320 uint64_t eqc_watch
[4] = { 0 };
323 if (pnv_xive_vst_read(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
)) {
324 xive_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
327 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
328 xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(eqc_watch
[i
]);
332 static int pnv_xive_get_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
335 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
);
338 static int pnv_xive_write_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
339 XiveNVT
*nvt
, uint8_t word_number
)
341 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
,
345 static int pnv_xive_nvt_update(PnvXive
*xive
)
347 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
348 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
349 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
350 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
352 uint64_t vpc_watch
[8];
354 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
355 vpc_watch
[i
] = cpu_to_be64(xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
]);
358 return pnv_xive_vst_write(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
,
362 static void pnv_xive_nvt_cache_load(PnvXive
*xive
)
364 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
365 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
366 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
367 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
368 uint64_t vpc_watch
[8] = { 0 };
371 if (pnv_xive_vst_read(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
)) {
372 xive_error(xive
, "VST: no NVT entry %x/%x !?", blk
, idx
);
375 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
376 xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(vpc_watch
[i
]);
380 static int pnv_xive_get_eas(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
383 PnvXive
*xive
= PNV_XIVE(xrtr
);
385 if (pnv_xive_get_ic(blk
) != xive
) {
386 xive_error(xive
, "VST: EAS %x is remote !?", XIVE_SRCNO(blk
, idx
));
390 return pnv_xive_vst_read(xive
, VST_TSEL_IVT
, blk
, idx
, eas
);
393 static XiveTCTX
*pnv_xive_get_tctx(XiveRouter
*xrtr
, CPUState
*cs
)
395 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
396 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
397 PnvXive
*xive
= NULL
;
398 CPUPPCState
*env
= &cpu
->env
;
399 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
402 * Perform an extra check on the HW thread enablement.
404 * The TIMA is shared among the chips and to identify the chip
405 * from which the access is being done, we extract the chip id
408 xive
= pnv_xive_get_ic((pir
>> 8) & 0xf);
413 if (!(xive
->regs
[PC_THREAD_EN_REG0
>> 3] & PPC_BIT(pir
& 0x3f))) {
414 xive_error(PNV_XIVE(xrtr
), "IC: CPU %x is not enabled", pir
);
421 * The internal sources (IPIs) of the interrupt controller have no
422 * knowledge of the XIVE chip on which they reside. Encode the block
423 * id in the source interrupt number before forwarding the source
424 * event notification to the Router. This is required on a multichip
427 static void pnv_xive_notify(XiveNotifier
*xn
, uint32_t srcno
)
429 PnvXive
*xive
= PNV_XIVE(xn
);
430 uint8_t blk
= xive
->chip
->chip_id
;
432 xive_router_notify(xn
, XIVE_SRCNO(blk
, srcno
));
439 static uint64_t pnv_xive_vc_size(PnvXive
*xive
)
441 return (~xive
->regs
[CQ_VC_BARM
>> 3] + 1) & CQ_VC_BARM_MASK
;
444 static uint64_t pnv_xive_edt_shift(PnvXive
*xive
)
446 return ctz64(pnv_xive_vc_size(xive
) / XIVE_TABLE_EDT_MAX
);
449 static uint64_t pnv_xive_pc_size(PnvXive
*xive
)
451 return (~xive
->regs
[CQ_PC_BARM
>> 3] + 1) & CQ_PC_BARM_MASK
;
454 static uint32_t pnv_xive_nr_ipis(PnvXive
*xive
)
456 uint8_t blk
= xive
->chip
->chip_id
;
458 return pnv_xive_vst_size(xive
->vsds
[VST_TSEL_SBE
][blk
]) * SBE_PER_BYTE
;
461 static uint32_t pnv_xive_nr_ends(PnvXive
*xive
)
463 uint8_t blk
= xive
->chip
->chip_id
;
465 return pnv_xive_vst_size(xive
->vsds
[VST_TSEL_EQDT
][blk
])
466 / vst_infos
[VST_TSEL_EQDT
].size
;
472 * The Virtualization Controller MMIO region containing the IPI ESB
473 * pages and END ESB pages is sub-divided into "sets" which map
474 * portions of the VC region to the different ESB pages. It is
475 * configured at runtime through the EDT "Domain Table" to let the
476 * firmware decide how to split the VC address space between IPI ESB
477 * pages and END ESB pages.
481 * Computes the overall size of the IPI or the END ESB pages
483 static uint64_t pnv_xive_edt_size(PnvXive
*xive
, uint64_t type
)
485 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
489 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
; i
++) {
490 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
492 if (edt_type
== type
) {
501 * Maps an offset of the VC region in the IPI or END region using the
502 * layout defined by the EDT "Domaine Table"
504 static uint64_t pnv_xive_edt_offset(PnvXive
*xive
, uint64_t vc_offset
,
508 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
509 uint64_t edt_offset
= vc_offset
;
511 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
&& (i
* edt_size
) < vc_offset
; i
++) {
512 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
514 if (edt_type
!= type
) {
515 edt_offset
-= edt_size
;
522 static void pnv_xive_edt_resize(PnvXive
*xive
)
524 uint64_t ipi_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_IPI
);
525 uint64_t end_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_EQ
);
527 memory_region_set_size(&xive
->ipi_edt_mmio
, ipi_edt_size
);
528 memory_region_add_subregion(&xive
->ipi_mmio
, 0, &xive
->ipi_edt_mmio
);
530 memory_region_set_size(&xive
->end_edt_mmio
, end_edt_size
);
531 memory_region_add_subregion(&xive
->end_mmio
, 0, &xive
->end_edt_mmio
);
535 * XIVE Table configuration. Only EDT is supported.
537 static int pnv_xive_table_set_data(PnvXive
*xive
, uint64_t val
)
539 uint64_t tsel
= xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TSEL
;
540 uint8_t tsel_index
= GETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3]);
541 uint64_t *xive_table
;
545 case CQ_TAR_TSEL_BLK
:
546 max_index
= ARRAY_SIZE(xive
->blk
);
547 xive_table
= xive
->blk
;
549 case CQ_TAR_TSEL_MIG
:
550 max_index
= ARRAY_SIZE(xive
->mig
);
551 xive_table
= xive
->mig
;
553 case CQ_TAR_TSEL_EDT
:
554 max_index
= ARRAY_SIZE(xive
->edt
);
555 xive_table
= xive
->edt
;
557 case CQ_TAR_TSEL_VDT
:
558 max_index
= ARRAY_SIZE(xive
->vdt
);
559 xive_table
= xive
->vdt
;
562 xive_error(xive
, "IC: invalid table %d", (int) tsel
);
566 if (tsel_index
>= max_index
) {
567 xive_error(xive
, "IC: invalid index %d", (int) tsel_index
);
571 xive_table
[tsel_index
] = val
;
573 if (xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TBL_AUTOINC
) {
574 xive
->regs
[CQ_TAR
>> 3] =
575 SETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3], ++tsel_index
);
579 * EDT configuration is complete. Resize the MMIO windows exposing
580 * the IPI and the END ESBs in the VC region.
582 if (tsel
== CQ_TAR_TSEL_EDT
&& tsel_index
== ARRAY_SIZE(xive
->edt
)) {
583 pnv_xive_edt_resize(xive
);
590 * Virtual Structure Tables (VST) configuration
592 static void pnv_xive_vst_set_exclusive(PnvXive
*xive
, uint8_t type
,
593 uint8_t blk
, uint64_t vsd
)
595 XiveENDSource
*end_xsrc
= &xive
->end_source
;
596 XiveSource
*xsrc
= &xive
->ipi_source
;
597 const XiveVstInfo
*info
= &vst_infos
[type
];
598 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
599 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
603 if (VSD_INDIRECT
& vsd
) {
604 if (!(xive
->regs
[VC_GLOBAL_CONFIG
>> 3] & VC_GCONF_INDIRECT
)) {
605 xive_error(xive
, "VST: %s indirect tables are not enabled",
610 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
611 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
617 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
618 xive_error(xive
, "VST: %s table address 0x%"PRIx64
" is not aligned with"
619 " page shift %d", info
->name
, vst_addr
, page_shift
);
623 /* Record the table configuration (in SRAM on HW) */
624 xive
->vsds
[type
][blk
] = vsd
;
626 /* Now tune the models with the configuration provided by the FW */
629 case VST_TSEL_IVT
: /* Nothing to be done */
634 * Backing store pages for the END. Compute the number of ENDs
635 * provisioned by FW and resize the END ESB window accordingly.
637 memory_region_set_size(&end_xsrc
->esb_mmio
, pnv_xive_nr_ends(xive
) *
638 (1ull << (end_xsrc
->esb_shift
+ 1)));
639 memory_region_add_subregion(&xive
->end_edt_mmio
, 0,
640 &end_xsrc
->esb_mmio
);
645 * Backing store pages for the source PQ bits. The model does
646 * not use these PQ bits backed in RAM because the XiveSource
647 * model has its own. Compute the number of IRQs provisioned
648 * by FW and resize the IPI ESB window accordingly.
650 memory_region_set_size(&xsrc
->esb_mmio
, pnv_xive_nr_ipis(xive
) *
651 (1ull << xsrc
->esb_shift
));
652 memory_region_add_subregion(&xive
->ipi_edt_mmio
, 0, &xsrc
->esb_mmio
);
655 case VST_TSEL_VPDT
: /* Not modeled */
656 case VST_TSEL_IRQ
: /* Not modeled */
658 * These tables contains the backing store pages for the
659 * interrupt fifos of the VC sub-engine in case of overflow.
664 g_assert_not_reached();
669 * Both PC and VC sub-engines are configured as each use the Virtual
670 * Structure Tables : SBE, EAS, END and NVT.
672 static void pnv_xive_vst_set_data(PnvXive
*xive
, uint64_t vsd
, bool pc_engine
)
674 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
675 uint8_t type
= GETFIELD(VST_TABLE_SELECT
,
676 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
677 uint8_t blk
= GETFIELD(VST_TABLE_BLOCK
,
678 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
679 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
681 if (type
> VST_TSEL_IRQ
) {
682 xive_error(xive
, "VST: invalid table type %d", type
);
686 if (blk
>= vst_infos
[type
].max_blocks
) {
687 xive_error(xive
, "VST: invalid block id %d for"
688 " %s table", blk
, vst_infos
[type
].name
);
693 * Only take the VC sub-engine configuration into account because
694 * the XiveRouter model combines both VC and PC sub-engines
701 xive_error(xive
, "VST: invalid %s table address", vst_infos
[type
].name
);
706 case VSD_MODE_FORWARD
:
707 xive
->vsds
[type
][blk
] = vsd
;
710 case VSD_MODE_EXCLUSIVE
:
711 pnv_xive_vst_set_exclusive(xive
, type
, blk
, vsd
);
715 xive_error(xive
, "VST: unsupported table mode %d", mode
);
721 * Interrupt controller MMIO region. The layout is compatible between
724 * Page 0 sub-engine BARs
725 * 0x000 - 0x3FF IC registers
726 * 0x400 - 0x7FF PC registers
727 * 0x800 - 0xFFF VC registers
729 * Page 1 Notify page (writes only)
730 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
731 * 0x800 - 0xFFF forwards and syncs
733 * Page 2 LSI Trigger page (writes only) (not modeled)
734 * Page 3 LSI SB EOI page (reads only) (not modeled)
736 * Page 4-7 indirect TIMA
740 * IC - registers MMIO
742 static void pnv_xive_ic_reg_write(void *opaque
, hwaddr offset
,
743 uint64_t val
, unsigned size
)
745 PnvXive
*xive
= PNV_XIVE(opaque
);
746 MemoryRegion
*sysmem
= get_system_memory();
747 uint32_t reg
= offset
>> 3;
748 bool is_chip0
= xive
->chip
->chip_id
== 0;
753 * XIVE CQ (PowerBus bridge) settings
755 case CQ_MSGSND
: /* msgsnd for doorbells */
756 case CQ_FIRMASK_OR
: /* FIR error reporting */
759 if (val
& CQ_PBI_PC_64K
) {
762 if (val
& CQ_PBI_VC_64K
) {
766 case CQ_CFG_PB_GEN
: /* PowerBus General Configuration */
768 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
773 * XIVE Virtualization Controller settings
775 case VC_GLOBAL_CONFIG
:
779 * XIVE Presenter Controller settings
781 case PC_GLOBAL_CONFIG
:
783 * PC_GCONF_CHIPID_OVR
784 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
789 * TODO: block group support
791 * PC_TCTXT_CFG_BLKGRP_EN
792 * PC_TCTXT_CFG_HARD_CHIPID_BLK :
793 * Moves the chipid into block field for hardwired CAM compares.
794 * Block offset value is adjusted to 0b0..01 & ThrdId
796 * Will require changes in xive_presenter_tctx_match(). I am
797 * not sure how to handle that yet.
800 /* Overrides hardwired chip ID with the chip ID field */
801 if (val
& PC_TCTXT_CHIPID_OVERRIDE
) {
802 xive
->tctx_chipid
= GETFIELD(PC_TCTXT_CHIPID
, val
);
808 * enable block tracking and exchange of block ownership
809 * information between Interrupt controllers
816 case VC_SBC_CONFIG
: /* Store EOI configuration */
818 * Configure store EOI if required by firwmare (skiboot has removed
819 * support recently though)
821 if (val
& (VC_SBC_CONF_CPLX_CIST
| VC_SBC_CONF_CIST_BOTH
)) {
822 xive
->ipi_source
.esb_flags
|= XIVE_SRC_STORE_EOI
;
826 case VC_EQC_CONFIG
: /* TODO: silent escalation */
827 case VC_AIB_TX_ORDER_TAG2
: /* relax ordering */
831 * XIVE BAR settings (XSCOM only)
834 /* bit4: resets all BAR registers */
837 case CQ_IC_BAR
: /* IC BAR. 8 pages */
838 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
839 if (!(val
& CQ_IC_BAR_VALID
)) {
841 if (xive
->regs
[reg
] & CQ_IC_BAR_VALID
) {
842 memory_region_del_subregion(&xive
->ic_mmio
,
844 memory_region_del_subregion(&xive
->ic_mmio
,
845 &xive
->ic_notify_mmio
);
846 memory_region_del_subregion(&xive
->ic_mmio
,
848 memory_region_del_subregion(&xive
->ic_mmio
,
849 &xive
->tm_indirect_mmio
);
851 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
854 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
855 if (!(xive
->regs
[reg
] & CQ_IC_BAR_VALID
)) {
856 memory_region_add_subregion(sysmem
, xive
->ic_base
,
859 memory_region_add_subregion(&xive
->ic_mmio
, 0,
861 memory_region_add_subregion(&xive
->ic_mmio
,
862 1ul << xive
->ic_shift
,
863 &xive
->ic_notify_mmio
);
864 memory_region_add_subregion(&xive
->ic_mmio
,
865 2ul << xive
->ic_shift
,
867 memory_region_add_subregion(&xive
->ic_mmio
,
868 4ull << xive
->ic_shift
,
869 &xive
->tm_indirect_mmio
);
874 case CQ_TM1_BAR
: /* TM BAR. 4 pages. Map only once */
875 case CQ_TM2_BAR
: /* second TM BAR. for hotplug. Not modeled */
876 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
877 if (!(val
& CQ_TM_BAR_VALID
)) {
879 if (xive
->regs
[reg
] & CQ_TM_BAR_VALID
&& is_chip0
) {
880 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
883 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
884 if (!(xive
->regs
[reg
] & CQ_TM_BAR_VALID
) && is_chip0
) {
885 memory_region_add_subregion(sysmem
, xive
->tm_base
,
892 xive
->regs
[reg
] = val
;
893 memory_region_set_size(&xive
->pc_mmio
, pnv_xive_pc_size(xive
));
895 case CQ_PC_BAR
: /* From 32M to 512G */
896 if (!(val
& CQ_PC_BAR_VALID
)) {
898 if (xive
->regs
[reg
] & CQ_PC_BAR_VALID
) {
899 memory_region_del_subregion(sysmem
, &xive
->pc_mmio
);
902 xive
->pc_base
= val
& ~(CQ_PC_BAR_VALID
);
903 if (!(xive
->regs
[reg
] & CQ_PC_BAR_VALID
)) {
904 memory_region_add_subregion(sysmem
, xive
->pc_base
,
911 xive
->regs
[reg
] = val
;
912 memory_region_set_size(&xive
->vc_mmio
, pnv_xive_vc_size(xive
));
914 case CQ_VC_BAR
: /* From 64M to 4TB */
915 if (!(val
& CQ_VC_BAR_VALID
)) {
917 if (xive
->regs
[reg
] & CQ_VC_BAR_VALID
) {
918 memory_region_del_subregion(sysmem
, &xive
->vc_mmio
);
921 xive
->vc_base
= val
& ~(CQ_VC_BAR_VALID
);
922 if (!(xive
->regs
[reg
] & CQ_VC_BAR_VALID
)) {
923 memory_region_add_subregion(sysmem
, xive
->vc_base
,
930 * XIVE Table settings.
932 case CQ_TAR
: /* Table Address */
934 case CQ_TDR
: /* Table Data */
935 pnv_xive_table_set_data(xive
, val
);
939 * XIVE VC & PC Virtual Structure Table settings
941 case VC_VSD_TABLE_ADDR
:
942 case PC_VSD_TABLE_ADDR
: /* Virtual table selector */
944 case VC_VSD_TABLE_DATA
: /* Virtual table setting */
945 case PC_VSD_TABLE_DATA
:
946 pnv_xive_vst_set_data(xive
, val
, offset
== PC_VSD_TABLE_DATA
);
950 * Interrupt fifo overflow in memory backing store (Not modeled)
952 case VC_IRQ_CONFIG_IPI
:
953 case VC_IRQ_CONFIG_HW
:
954 case VC_IRQ_CONFIG_CASCADE1
:
955 case VC_IRQ_CONFIG_CASCADE2
:
956 case VC_IRQ_CONFIG_REDIST
:
957 case VC_IRQ_CONFIG_IPI_CASC
:
961 * XIVE hardware thread enablement
963 case PC_THREAD_EN_REG0
: /* Physical Thread Enable */
964 case PC_THREAD_EN_REG1
: /* Physical Thread Enable (fused core) */
967 case PC_THREAD_EN_REG0_SET
:
968 xive
->regs
[PC_THREAD_EN_REG0
>> 3] |= val
;
970 case PC_THREAD_EN_REG1_SET
:
971 xive
->regs
[PC_THREAD_EN_REG1
>> 3] |= val
;
973 case PC_THREAD_EN_REG0_CLR
:
974 xive
->regs
[PC_THREAD_EN_REG0
>> 3] &= ~val
;
976 case PC_THREAD_EN_REG1_CLR
:
977 xive
->regs
[PC_THREAD_EN_REG1
>> 3] &= ~val
;
981 * Indirect TIMA access set up. Defines the PIR of the HW thread
984 case PC_TCTXT_INDIR0
... PC_TCTXT_INDIR3
:
988 * XIVE PC & VC cache updates for EAS, NVT and END
990 case VC_IVC_SCRUB_MASK
:
991 case VC_IVC_SCRUB_TRIG
:
994 case VC_EQC_CWATCH_SPEC
:
995 val
&= ~VC_EQC_CWATCH_CONFLICT
; /* HW resets this bit */
997 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
999 case VC_EQC_CWATCH_DAT0
:
1000 /* writing to DATA0 triggers the cache write */
1001 xive
->regs
[reg
] = val
;
1002 pnv_xive_end_update(xive
);
1004 case VC_EQC_SCRUB_MASK
:
1005 case VC_EQC_SCRUB_TRIG
:
1007 * The scrubbing registers flush the cache in RAM and can also
1012 case PC_VPC_CWATCH_SPEC
:
1013 val
&= ~PC_VPC_CWATCH_CONFLICT
; /* HW resets this bit */
1015 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1017 case PC_VPC_CWATCH_DAT0
:
1018 /* writing to DATA0 triggers the cache write */
1019 xive
->regs
[reg
] = val
;
1020 pnv_xive_nvt_update(xive
);
1022 case PC_VPC_SCRUB_MASK
:
1023 case PC_VPC_SCRUB_TRIG
:
1025 * The scrubbing registers flush the cache in RAM and can also
1032 * XIVE PC & VC cache invalidation
1036 case VC_AT_MACRO_KILL
:
1038 case PC_AT_KILL_MASK
:
1039 case VC_AT_MACRO_KILL_MASK
:
1043 xive_error(xive
, "IC: invalid write to reg=0x%"HWADDR_PRIx
, offset
);
1047 xive
->regs
[reg
] = val
;
1050 static uint64_t pnv_xive_ic_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
1052 PnvXive
*xive
= PNV_XIVE(opaque
);
1054 uint32_t reg
= offset
>> 3;
1070 case PC_TCTXT_TRACK
:
1071 case PC_TCTXT_INDIR0
:
1072 case PC_TCTXT_INDIR1
:
1073 case PC_TCTXT_INDIR2
:
1074 case PC_TCTXT_INDIR3
:
1075 case PC_GLOBAL_CONFIG
:
1077 case PC_VPC_SCRUB_MASK
:
1079 case VC_GLOBAL_CONFIG
:
1080 case VC_AIB_TX_ORDER_TAG2
:
1082 case VC_IRQ_CONFIG_IPI
:
1083 case VC_IRQ_CONFIG_HW
:
1084 case VC_IRQ_CONFIG_CASCADE1
:
1085 case VC_IRQ_CONFIG_CASCADE2
:
1086 case VC_IRQ_CONFIG_REDIST
:
1087 case VC_IRQ_CONFIG_IPI_CASC
:
1089 case VC_EQC_SCRUB_MASK
:
1090 case VC_IVC_SCRUB_MASK
:
1092 case VC_AT_MACRO_KILL_MASK
:
1093 case VC_VSD_TABLE_ADDR
:
1094 case PC_VSD_TABLE_ADDR
:
1095 case VC_VSD_TABLE_DATA
:
1096 case PC_VSD_TABLE_DATA
:
1097 case PC_THREAD_EN_REG0
:
1098 case PC_THREAD_EN_REG1
:
1099 val
= xive
->regs
[reg
];
1103 * XIVE hardware thread enablement
1105 case PC_THREAD_EN_REG0_SET
:
1106 case PC_THREAD_EN_REG0_CLR
:
1107 val
= xive
->regs
[PC_THREAD_EN_REG0
>> 3];
1109 case PC_THREAD_EN_REG1_SET
:
1110 case PC_THREAD_EN_REG1_CLR
:
1111 val
= xive
->regs
[PC_THREAD_EN_REG1
>> 3];
1114 case CQ_MSGSND
: /* Identifies which cores have msgsnd enabled. */
1115 val
= 0xffffff0000000000;
1119 * XIVE PC & VC cache updates for EAS, NVT and END
1121 case VC_EQC_CWATCH_SPEC
:
1122 xive
->regs
[reg
] = ~(VC_EQC_CWATCH_FULL
| VC_EQC_CWATCH_CONFLICT
);
1123 val
= xive
->regs
[reg
];
1125 case VC_EQC_CWATCH_DAT0
:
1127 * Load DATA registers from cache with data requested by the
1130 pnv_xive_end_cache_load(xive
);
1131 val
= xive
->regs
[reg
];
1133 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1134 val
= xive
->regs
[reg
];
1137 case PC_VPC_CWATCH_SPEC
:
1138 xive
->regs
[reg
] = ~(PC_VPC_CWATCH_FULL
| PC_VPC_CWATCH_CONFLICT
);
1139 val
= xive
->regs
[reg
];
1141 case PC_VPC_CWATCH_DAT0
:
1143 * Load DATA registers from cache with data requested by the
1146 pnv_xive_nvt_cache_load(xive
);
1147 val
= xive
->regs
[reg
];
1149 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1150 val
= xive
->regs
[reg
];
1153 case PC_VPC_SCRUB_TRIG
:
1154 case VC_IVC_SCRUB_TRIG
:
1155 case VC_EQC_SCRUB_TRIG
:
1156 xive
->regs
[reg
] &= ~VC_SCRUB_VALID
;
1157 val
= xive
->regs
[reg
];
1161 * XIVE PC & VC cache invalidation
1164 xive
->regs
[reg
] &= ~PC_AT_KILL_VALID
;
1165 val
= xive
->regs
[reg
];
1167 case VC_AT_MACRO_KILL
:
1168 xive
->regs
[reg
] &= ~VC_KILL_VALID
;
1169 val
= xive
->regs
[reg
];
1173 * XIVE synchronisation
1176 val
= VC_EQC_SYNC_MASK
;
1180 xive_error(xive
, "IC: invalid read reg=0x%"HWADDR_PRIx
, offset
);
1186 static const MemoryRegionOps pnv_xive_ic_reg_ops
= {
1187 .read
= pnv_xive_ic_reg_read
,
1188 .write
= pnv_xive_ic_reg_write
,
1189 .endianness
= DEVICE_BIG_ENDIAN
,
1191 .min_access_size
= 8,
1192 .max_access_size
= 8,
1195 .min_access_size
= 8,
1196 .max_access_size
= 8,
1201 * IC - Notify MMIO port page (write only)
1203 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1204 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1205 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1206 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1207 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1208 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1209 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1210 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1212 /* VC synchronisation */
1213 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1214 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1215 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1216 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1217 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1219 /* PC synchronisation */
1220 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1221 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1222 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1224 static void pnv_xive_ic_hw_trigger(PnvXive
*xive
, hwaddr addr
, uint64_t val
)
1227 * Forward the source event notification directly to the Router.
1228 * The source interrupt number should already be correctly encoded
1229 * with the chip block id by the sending device (PHB, PSI).
1231 xive_router_notify(XIVE_NOTIFIER(xive
), val
);
1234 static void pnv_xive_ic_notify_write(void *opaque
, hwaddr addr
, uint64_t val
,
1237 PnvXive
*xive
= PNV_XIVE(opaque
);
1239 /* VC: HW triggers */
1241 case 0x000 ... 0x7FF:
1242 pnv_xive_ic_hw_trigger(opaque
, addr
, val
);
1245 /* VC: Forwarded IRQs */
1246 case PNV_XIVE_FORWARD_IPI
:
1247 case PNV_XIVE_FORWARD_HW
:
1248 case PNV_XIVE_FORWARD_OS_ESC
:
1249 case PNV_XIVE_FORWARD_HW_ESC
:
1250 case PNV_XIVE_FORWARD_REDIS
:
1251 /* TODO: forwarded IRQs. Should be like HW triggers */
1252 xive_error(xive
, "IC: forwarded at @0x%"HWADDR_PRIx
" IRQ 0x%"PRIx64
,
1257 case PNV_XIVE_SYNC_IPI
:
1258 case PNV_XIVE_SYNC_HW
:
1259 case PNV_XIVE_SYNC_OS_ESC
:
1260 case PNV_XIVE_SYNC_HW_ESC
:
1261 case PNV_XIVE_SYNC_REDIS
:
1265 case PNV_XIVE_SYNC_PULL
:
1266 case PNV_XIVE_SYNC_PUSH
:
1267 case PNV_XIVE_SYNC_VPC
:
1271 xive_error(xive
, "IC: invalid notify write @%"HWADDR_PRIx
, addr
);
1275 static uint64_t pnv_xive_ic_notify_read(void *opaque
, hwaddr addr
,
1278 PnvXive
*xive
= PNV_XIVE(opaque
);
1280 /* loads are invalid */
1281 xive_error(xive
, "IC: invalid notify read @%"HWADDR_PRIx
, addr
);
1285 static const MemoryRegionOps pnv_xive_ic_notify_ops
= {
1286 .read
= pnv_xive_ic_notify_read
,
1287 .write
= pnv_xive_ic_notify_write
,
1288 .endianness
= DEVICE_BIG_ENDIAN
,
1290 .min_access_size
= 8,
1291 .max_access_size
= 8,
1294 .min_access_size
= 8,
1295 .max_access_size
= 8,
1300 * IC - LSI MMIO handlers (not modeled)
1303 static void pnv_xive_ic_lsi_write(void *opaque
, hwaddr addr
,
1304 uint64_t val
, unsigned size
)
1306 PnvXive
*xive
= PNV_XIVE(opaque
);
1308 xive_error(xive
, "IC: LSI invalid write @%"HWADDR_PRIx
, addr
);
1311 static uint64_t pnv_xive_ic_lsi_read(void *opaque
, hwaddr addr
, unsigned size
)
1313 PnvXive
*xive
= PNV_XIVE(opaque
);
1315 xive_error(xive
, "IC: LSI invalid read @%"HWADDR_PRIx
, addr
);
1319 static const MemoryRegionOps pnv_xive_ic_lsi_ops
= {
1320 .read
= pnv_xive_ic_lsi_read
,
1321 .write
= pnv_xive_ic_lsi_write
,
1322 .endianness
= DEVICE_BIG_ENDIAN
,
1324 .min_access_size
= 8,
1325 .max_access_size
= 8,
1328 .min_access_size
= 8,
1329 .max_access_size
= 8,
1334 * IC - Indirect TIMA MMIO handlers
1338 * When the TIMA is accessed from the indirect page, the thread id
1339 * (PIR) has to be configured in the IC registers before. This is used
1340 * for resets and for debug purpose also.
1342 static XiveTCTX
*pnv_xive_get_indirect_tctx(PnvXive
*xive
)
1344 uint64_t tctxt_indir
= xive
->regs
[PC_TCTXT_INDIR0
>> 3];
1345 PowerPCCPU
*cpu
= NULL
;
1348 if (!(tctxt_indir
& PC_TCTXT_INDIR_VALID
)) {
1349 xive_error(xive
, "IC: no indirect TIMA access in progress");
1353 pir
= GETFIELD(PC_TCTXT_INDIR_THRDID
, tctxt_indir
) & 0xff;
1354 cpu
= ppc_get_vcpu_by_pir(pir
);
1356 xive_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1360 /* Check that HW thread is XIVE enabled */
1361 if (!(xive
->regs
[PC_THREAD_EN_REG0
>> 3] & PPC_BIT(pir
& 0x3f))) {
1362 xive_error(xive
, "IC: CPU %x is not enabled", pir
);
1365 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1368 static void xive_tm_indirect_write(void *opaque
, hwaddr offset
,
1369 uint64_t value
, unsigned size
)
1371 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1373 xive_tctx_tm_write(tctx
, offset
, value
, size
);
1376 static uint64_t xive_tm_indirect_read(void *opaque
, hwaddr offset
,
1379 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1381 return xive_tctx_tm_read(tctx
, offset
, size
);
1384 static const MemoryRegionOps xive_tm_indirect_ops
= {
1385 .read
= xive_tm_indirect_read
,
1386 .write
= xive_tm_indirect_write
,
1387 .endianness
= DEVICE_BIG_ENDIAN
,
1389 .min_access_size
= 1,
1390 .max_access_size
= 8,
1393 .min_access_size
= 1,
1394 .max_access_size
= 8,
1399 * Interrupt controller XSCOM region.
1401 static uint64_t pnv_xive_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
1403 switch (addr
>> 3) {
1404 case X_VC_EQC_CONFIG
:
1405 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1406 return VC_EQC_SYNC_MASK
;
1408 return pnv_xive_ic_reg_read(opaque
, addr
, size
);
1412 static void pnv_xive_xscom_write(void *opaque
, hwaddr addr
,
1413 uint64_t val
, unsigned size
)
1415 pnv_xive_ic_reg_write(opaque
, addr
, val
, size
);
1418 static const MemoryRegionOps pnv_xive_xscom_ops
= {
1419 .read
= pnv_xive_xscom_read
,
1420 .write
= pnv_xive_xscom_write
,
1421 .endianness
= DEVICE_BIG_ENDIAN
,
1423 .min_access_size
= 8,
1424 .max_access_size
= 8,
1427 .min_access_size
= 8,
1428 .max_access_size
= 8,
1433 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1435 static uint64_t pnv_xive_vc_read(void *opaque
, hwaddr offset
,
1438 PnvXive
*xive
= PNV_XIVE(opaque
);
1439 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1440 uint64_t edt_type
= 0;
1441 uint64_t edt_offset
;
1443 AddressSpace
*edt_as
= NULL
;
1446 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1447 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1451 case CQ_TDR_EDT_IPI
:
1452 edt_as
= &xive
->ipi_as
;
1455 edt_as
= &xive
->end_as
;
1458 xive_error(xive
, "VC: invalid EDT type for read @%"HWADDR_PRIx
, offset
);
1462 /* Remap the offset for the targeted address space */
1463 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1465 ret
= address_space_ldq(edt_as
, edt_offset
, MEMTXATTRS_UNSPECIFIED
,
1468 if (result
!= MEMTX_OK
) {
1469 xive_error(xive
, "VC: %s read failed at @0x%"HWADDR_PRIx
" -> @0x%"
1470 HWADDR_PRIx
, edt_type
== CQ_TDR_EDT_IPI
? "IPI" : "END",
1471 offset
, edt_offset
);
1478 static void pnv_xive_vc_write(void *opaque
, hwaddr offset
,
1479 uint64_t val
, unsigned size
)
1481 PnvXive
*xive
= PNV_XIVE(opaque
);
1482 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1483 uint64_t edt_type
= 0;
1484 uint64_t edt_offset
;
1486 AddressSpace
*edt_as
= NULL
;
1488 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1489 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1493 case CQ_TDR_EDT_IPI
:
1494 edt_as
= &xive
->ipi_as
;
1497 edt_as
= &xive
->end_as
;
1500 xive_error(xive
, "VC: invalid EDT type for write @%"HWADDR_PRIx
,
1505 /* Remap the offset for the targeted address space */
1506 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1508 address_space_stq(edt_as
, edt_offset
, val
, MEMTXATTRS_UNSPECIFIED
, &result
);
1509 if (result
!= MEMTX_OK
) {
1510 xive_error(xive
, "VC: write failed at @0x%"HWADDR_PRIx
, edt_offset
);
1514 static const MemoryRegionOps pnv_xive_vc_ops
= {
1515 .read
= pnv_xive_vc_read
,
1516 .write
= pnv_xive_vc_write
,
1517 .endianness
= DEVICE_BIG_ENDIAN
,
1519 .min_access_size
= 8,
1520 .max_access_size
= 8,
1523 .min_access_size
= 8,
1524 .max_access_size
= 8,
1529 * Presenter Controller MMIO region. The Virtualization Controller
1530 * updates the IPB in the NVT table when required. Not modeled.
1532 static uint64_t pnv_xive_pc_read(void *opaque
, hwaddr addr
,
1535 PnvXive
*xive
= PNV_XIVE(opaque
);
1537 xive_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, addr
);
1541 static void pnv_xive_pc_write(void *opaque
, hwaddr addr
,
1542 uint64_t value
, unsigned size
)
1544 PnvXive
*xive
= PNV_XIVE(opaque
);
1546 xive_error(xive
, "PC: invalid write to VC @%"HWADDR_PRIx
, addr
);
1549 static const MemoryRegionOps pnv_xive_pc_ops
= {
1550 .read
= pnv_xive_pc_read
,
1551 .write
= pnv_xive_pc_write
,
1552 .endianness
= DEVICE_BIG_ENDIAN
,
1554 .min_access_size
= 8,
1555 .max_access_size
= 8,
1558 .min_access_size
= 8,
1559 .max_access_size
= 8,
1563 void pnv_xive_pic_print_info(PnvXive
*xive
, Monitor
*mon
)
1565 XiveRouter
*xrtr
= XIVE_ROUTER(xive
);
1566 uint8_t blk
= xive
->chip
->chip_id
;
1567 uint32_t srcno0
= XIVE_SRCNO(blk
, 0);
1568 uint32_t nr_ipis
= pnv_xive_nr_ipis(xive
);
1569 uint32_t nr_ends
= pnv_xive_nr_ends(xive
);
1574 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
1575 srcno0
+ nr_ipis
- 1);
1576 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
1578 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
1579 srcno0
+ nr_ipis
- 1);
1580 for (i
= 0; i
< nr_ipis
; i
++) {
1581 if (xive_router_get_eas(xrtr
, blk
, i
, &eas
)) {
1584 if (!xive_eas_is_masked(&eas
)) {
1585 xive_eas_pic_print_info(&eas
, i
, mon
);
1589 monitor_printf(mon
, "XIVE[%x] ENDT %08x .. %08x\n", blk
, 0, nr_ends
- 1);
1590 for (i
= 0; i
< nr_ends
; i
++) {
1591 if (xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1594 xive_end_pic_print_info(&end
, i
, mon
);
1598 static void pnv_xive_reset(void *dev
)
1600 PnvXive
*xive
= PNV_XIVE(dev
);
1601 XiveSource
*xsrc
= &xive
->ipi_source
;
1602 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1605 * Use the PnvChip id to identify the XIVE interrupt controller.
1606 * It can be overriden by configuration at runtime.
1608 xive
->tctx_chipid
= xive
->chip
->chip_id
;
1610 /* Default page size (Should be changed at runtime to 64k) */
1611 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1613 /* Clear subregions */
1614 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1615 memory_region_del_subregion(&xive
->ipi_edt_mmio
, &xsrc
->esb_mmio
);
1618 if (memory_region_is_mapped(&xive
->ipi_edt_mmio
)) {
1619 memory_region_del_subregion(&xive
->ipi_mmio
, &xive
->ipi_edt_mmio
);
1622 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1623 memory_region_del_subregion(&xive
->end_edt_mmio
, &end_xsrc
->esb_mmio
);
1626 if (memory_region_is_mapped(&xive
->end_edt_mmio
)) {
1627 memory_region_del_subregion(&xive
->end_mmio
, &xive
->end_edt_mmio
);
1631 static void pnv_xive_init(Object
*obj
)
1633 PnvXive
*xive
= PNV_XIVE(obj
);
1635 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1636 sizeof(xive
->ipi_source
), TYPE_XIVE_SOURCE
,
1637 &error_abort
, NULL
);
1638 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1639 sizeof(xive
->end_source
), TYPE_XIVE_END_SOURCE
,
1640 &error_abort
, NULL
);
1644 * Maximum number of IRQs and ENDs supported by HW
1646 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1647 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1649 static void pnv_xive_realize(DeviceState
*dev
, Error
**errp
)
1651 PnvXive
*xive
= PNV_XIVE(dev
);
1652 XiveSource
*xsrc
= &xive
->ipi_source
;
1653 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1654 Error
*local_err
= NULL
;
1657 obj
= object_property_get_link(OBJECT(dev
), "chip", &local_err
);
1659 error_propagate(errp
, local_err
);
1660 error_prepend(errp
, "required link 'chip' not found: ");
1664 /* The PnvChip id identifies the XIVE interrupt controller. */
1665 xive
->chip
= PNV_CHIP(obj
);
1668 * The XiveSource and XiveENDSource objects are realized with the
1669 * maximum allowed HW configuration. The ESB MMIO regions will be
1670 * resized dynamically when the controller is configured by the FW
1671 * to limit accesses to resources not provisioned.
1673 object_property_set_int(OBJECT(xsrc
), PNV_XIVE_NR_IRQS
, "nr-irqs",
1675 object_property_add_const_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
1677 object_property_set_bool(OBJECT(xsrc
), true, "realized", &local_err
);
1679 error_propagate(errp
, local_err
);
1683 object_property_set_int(OBJECT(end_xsrc
), PNV_XIVE_NR_ENDS
, "nr-ends",
1685 object_property_add_const_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
1687 object_property_set_bool(OBJECT(end_xsrc
), true, "realized", &local_err
);
1689 error_propagate(errp
, local_err
);
1693 /* Default page size. Generally changed at runtime to 64k */
1694 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1696 /* XSCOM region, used for initial configuration of the BARs */
1697 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
), &pnv_xive_xscom_ops
,
1698 xive
, "xscom-xive", PNV9_XSCOM_XIVE_SIZE
<< 3);
1700 /* Interrupt controller MMIO regions */
1701 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1704 memory_region_init_io(&xive
->ic_reg_mmio
, OBJECT(dev
), &pnv_xive_ic_reg_ops
,
1705 xive
, "xive-ic-reg", 1 << xive
->ic_shift
);
1706 memory_region_init_io(&xive
->ic_notify_mmio
, OBJECT(dev
),
1707 &pnv_xive_ic_notify_ops
,
1708 xive
, "xive-ic-notify", 1 << xive
->ic_shift
);
1710 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1711 memory_region_init_io(&xive
->ic_lsi_mmio
, OBJECT(dev
), &pnv_xive_ic_lsi_ops
,
1712 xive
, "xive-ic-lsi", 2 << xive
->ic_shift
);
1714 /* Thread Interrupt Management Area (Indirect) */
1715 memory_region_init_io(&xive
->tm_indirect_mmio
, OBJECT(dev
),
1716 &xive_tm_indirect_ops
,
1717 xive
, "xive-tima-indirect", PNV9_XIVE_TM_SIZE
);
1719 * Overall Virtualization Controller MMIO region containing the
1720 * IPI ESB pages and END ESB pages. The layout is defined by the
1721 * EDT "Domain table" and the accesses are dispatched using
1722 * address spaces for each.
1724 memory_region_init_io(&xive
->vc_mmio
, OBJECT(xive
), &pnv_xive_vc_ops
, xive
,
1725 "xive-vc", PNV9_XIVE_VC_SIZE
);
1727 memory_region_init(&xive
->ipi_mmio
, OBJECT(xive
), "xive-vc-ipi",
1729 address_space_init(&xive
->ipi_as
, &xive
->ipi_mmio
, "xive-vc-ipi");
1730 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-vc-end",
1732 address_space_init(&xive
->end_as
, &xive
->end_mmio
, "xive-vc-end");
1735 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1736 * VC region. Their size is configured by the FW in the EDT table.
1738 memory_region_init(&xive
->ipi_edt_mmio
, OBJECT(xive
), "xive-vc-ipi-edt", 0);
1739 memory_region_init(&xive
->end_edt_mmio
, OBJECT(xive
), "xive-vc-end-edt", 0);
1741 /* Presenter Controller MMIO region (not modeled) */
1742 memory_region_init_io(&xive
->pc_mmio
, OBJECT(xive
), &pnv_xive_pc_ops
, xive
,
1743 "xive-pc", PNV9_XIVE_PC_SIZE
);
1745 /* Thread Interrupt Management Area (Direct) */
1746 memory_region_init_io(&xive
->tm_mmio
, OBJECT(xive
), &xive_tm_ops
,
1747 xive
, "xive-tima", PNV9_XIVE_TM_SIZE
);
1749 qemu_register_reset(pnv_xive_reset
, dev
);
1752 static int pnv_xive_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1755 const char compat
[] = "ibm,power9-xive-x";
1758 uint32_t lpc_pcba
= PNV9_XSCOM_XIVE_BASE
;
1760 cpu_to_be32(lpc_pcba
),
1761 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE
)
1764 name
= g_strdup_printf("xive@%x", lpc_pcba
);
1765 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1769 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1770 _FDT((fdt_setprop(fdt
, offset
, "compatible", compat
,
1775 static Property pnv_xive_properties
[] = {
1776 DEFINE_PROP_UINT64("ic-bar", PnvXive
, ic_base
, 0),
1777 DEFINE_PROP_UINT64("vc-bar", PnvXive
, vc_base
, 0),
1778 DEFINE_PROP_UINT64("pc-bar", PnvXive
, pc_base
, 0),
1779 DEFINE_PROP_UINT64("tm-bar", PnvXive
, tm_base
, 0),
1780 DEFINE_PROP_END_OF_LIST(),
1783 static void pnv_xive_class_init(ObjectClass
*klass
, void *data
)
1785 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1786 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1787 XiveRouterClass
*xrc
= XIVE_ROUTER_CLASS(klass
);
1788 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1790 xdc
->dt_xscom
= pnv_xive_dt_xscom
;
1792 dc
->desc
= "PowerNV XIVE Interrupt Controller";
1793 dc
->realize
= pnv_xive_realize
;
1794 dc
->props
= pnv_xive_properties
;
1796 xrc
->get_eas
= pnv_xive_get_eas
;
1797 xrc
->get_end
= pnv_xive_get_end
;
1798 xrc
->write_end
= pnv_xive_write_end
;
1799 xrc
->get_nvt
= pnv_xive_get_nvt
;
1800 xrc
->write_nvt
= pnv_xive_write_nvt
;
1801 xrc
->get_tctx
= pnv_xive_get_tctx
;
1803 xnc
->notify
= pnv_xive_notify
;
1806 static const TypeInfo pnv_xive_info
= {
1807 .name
= TYPE_PNV_XIVE
,
1808 .parent
= TYPE_XIVE_ROUTER
,
1809 .instance_init
= pnv_xive_init
,
1810 .instance_size
= sizeof(PnvXive
),
1811 .class_init
= pnv_xive_class_init
,
1812 .interfaces
= (InterfaceInfo
[]) {
1813 { TYPE_PNV_XSCOM_INTERFACE
},
1818 static void pnv_xive_register_types(void)
1820 type_register_static(&pnv_xive_info
);
1823 type_init(pnv_xive_register_types
)