2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
31 #include "pnv_xive2_regs.h"
36 * Virtual structures table (VST)
38 #define SBE_PER_BYTE 4
40 typedef struct XiveVstInfo
{
46 static const XiveVstInfo vst_infos
[] = {
48 [VST_EAS
] = { "EAT", sizeof(Xive2Eas
), 16 },
49 [VST_ESB
] = { "ESB", 1, 16 },
50 [VST_END
] = { "ENDT", sizeof(Xive2End
), 16 },
52 [VST_NVP
] = { "NVPT", sizeof(Xive2Nvp
), 16 },
53 [VST_NVG
] = { "NVGT", sizeof(Xive2Nvgc
), 16 },
54 [VST_NVC
] = { "NVCT", sizeof(Xive2Nvgc
), 16 },
56 [VST_IC
] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
57 [VST_SYNC
] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
60 * This table contains the backing store pages for the interrupt
61 * fifos of the VC sub-engine in case of overflow.
71 [VST_ERQ
] = { "ERQ", 1, VC_QUEUE_COUNT
},
74 #define xive2_error(xive, fmt, ...) \
75 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
76 (xive)->chip->chip_id, ## __VA_ARGS__);
79 * TODO: Document block id override
81 static uint32_t pnv_xive2_block_id(PnvXive2
*xive
)
83 uint8_t blk
= xive
->chip
->chip_id
;
84 uint64_t cfg_val
= xive
->cq_regs
[CQ_XIVE_CFG
>> 3];
86 if (cfg_val
& CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE
) {
87 blk
= GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, cfg_val
);
94 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
95 * of the chips is good enough.
97 * TODO: Block scope support
99 static PnvXive2
*pnv_xive2_get_remote(uint8_t blk
)
101 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
104 for (i
= 0; i
< pnv
->num_chips
; i
++) {
105 Pnv10Chip
*chip10
= PNV10_CHIP(pnv
->chips
[i
]);
106 PnvXive2
*xive
= &chip10
->xive
;
108 if (pnv_xive2_block_id(xive
) == blk
) {
116 * VST accessors for ESB, EAT, ENDT, NVP
118 * Indirect VST tables are arrays of VSDs pointing to a page (of same
119 * size). Each page is a direct VST table.
122 #define XIVE_VSD_SIZE 8
124 /* Indirect page size can be 4K, 64K, 2M, 16M. */
125 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift
)
127 return page_shift
== 12 || page_shift
== 16 ||
128 page_shift
== 21 || page_shift
== 24;
131 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2
*xive
, uint32_t type
,
132 uint64_t vsd
, uint32_t idx
)
134 const XiveVstInfo
*info
= &vst_infos
[type
];
135 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
136 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
139 idx_max
= vst_tsize
/ info
->size
- 1;
142 xive2_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
143 info
->name
, idx
, idx_max
);
148 return vst_addr
+ idx
* info
->size
;
151 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2
*xive
, uint32_t type
,
152 uint64_t vsd
, uint32_t idx
)
154 const XiveVstInfo
*info
= &vst_infos
[type
];
158 uint32_t vst_per_page
;
160 /* Get the page size of the indirect table. */
161 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
162 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
164 if (!(vsd
& VSD_ADDRESS_MASK
)) {
165 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
169 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
171 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
172 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
177 vst_per_page
= (1ull << page_shift
) / info
->size
;
178 vsd_idx
= idx
/ vst_per_page
;
180 /* Load the VSD we are looking for, if not already done */
182 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
183 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
,
184 MEMTXATTRS_UNSPECIFIED
);
186 if (!(vsd
& VSD_ADDRESS_MASK
)) {
187 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
192 * Check that the pages have a consistent size across the
195 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
196 xive2_error(xive
, "VST: %s entry %x indirect page size differ !?",
202 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
205 static uint64_t pnv_xive2_vst_addr(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
208 const XiveVstInfo
*info
= &vst_infos
[type
];
211 if (blk
>= info
->max_blocks
) {
212 xive2_error(xive
, "VST: invalid block id %d for VST %s %d !?",
213 blk
, info
->name
, idx
);
217 vsd
= xive
->vsds
[type
][blk
];
219 /* Remote VST access */
220 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
221 xive
= pnv_xive2_get_remote(blk
);
223 return xive
? pnv_xive2_vst_addr(xive
, type
, blk
, idx
) : 0;
226 if (VSD_INDIRECT
& vsd
) {
227 return pnv_xive2_vst_addr_indirect(xive
, type
, vsd
, idx
);
230 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, idx
);
233 static int pnv_xive2_vst_read(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
234 uint32_t idx
, void *data
)
236 const XiveVstInfo
*info
= &vst_infos
[type
];
237 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
243 cpu_physical_memory_read(addr
, data
, info
->size
);
247 #define XIVE_VST_WORD_ALL -1
249 static int pnv_xive2_vst_write(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
250 uint32_t idx
, void *data
, uint32_t word_number
)
252 const XiveVstInfo
*info
= &vst_infos
[type
];
253 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
259 if (word_number
== XIVE_VST_WORD_ALL
) {
260 cpu_physical_memory_write(addr
, data
, info
->size
);
262 cpu_physical_memory_write(addr
+ word_number
* 4,
263 data
+ word_number
* 4, 4);
268 static int pnv_xive2_get_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
271 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
273 if (pnv_xive2_block_id(xive
) != blk
) {
274 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
278 *pq
= xive_source_esb_get(&xive
->ipi_source
, idx
);
282 static int pnv_xive2_set_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
285 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
287 if (pnv_xive2_block_id(xive
) != blk
) {
288 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
292 *pq
= xive_source_esb_set(&xive
->ipi_source
, idx
, *pq
);
296 static int pnv_xive2_get_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
299 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
);
302 static int pnv_xive2_write_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
303 Xive2End
*end
, uint8_t word_number
)
305 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
,
309 static int pnv_xive2_end_update(PnvXive2
*xive
)
311 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
312 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
313 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
314 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
316 uint64_t endc_watch
[4];
318 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
320 cpu_to_be64(xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
]);
323 return pnv_xive2_vst_write(xive
, VST_END
, blk
, idx
, endc_watch
,
327 static void pnv_xive2_end_cache_load(PnvXive2
*xive
)
329 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
330 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
331 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
332 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
333 uint64_t endc_watch
[4] = { 0 };
336 if (pnv_xive2_vst_read(xive
, VST_END
, blk
, idx
, endc_watch
)) {
337 xive2_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
340 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
341 xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
] =
342 be64_to_cpu(endc_watch
[i
]);
346 static int pnv_xive2_get_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
349 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
);
352 static int pnv_xive2_write_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
353 Xive2Nvp
*nvp
, uint8_t word_number
)
355 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
,
359 static int pnv_xive2_nvp_update(PnvXive2
*xive
)
361 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
362 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
363 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
364 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
366 uint64_t nxc_watch
[4];
368 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
370 cpu_to_be64(xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
]);
373 return pnv_xive2_vst_write(xive
, VST_NVP
, blk
, idx
, nxc_watch
,
377 static void pnv_xive2_nvp_cache_load(PnvXive2
*xive
)
379 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
380 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
381 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
382 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
383 uint64_t nxc_watch
[4] = { 0 };
386 if (pnv_xive2_vst_read(xive
, VST_NVP
, blk
, idx
, nxc_watch
)) {
387 xive2_error(xive
, "VST: no NVP entry %x/%x !?", blk
, idx
);
390 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
391 xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
] =
392 be64_to_cpu(nxc_watch
[i
]);
396 static int pnv_xive2_get_eas(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
399 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
401 if (pnv_xive2_block_id(xive
) != blk
) {
402 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
406 return pnv_xive2_vst_read(xive
, VST_EAS
, blk
, idx
, eas
);
409 static uint32_t pnv_xive2_get_config(Xive2Router
*xrtr
)
411 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
414 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
) {
415 cfg
|= XIVE2_GEN1_TIMA_OS
;
418 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE
) {
419 cfg
|= XIVE2_VP_SAVE_RESTORE
;
422 if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE
,
423 xive
->cq_regs
[CQ_XIVE_CFG
>> 3]) == CQ_XIVE_CFG_THREADID_8BITS
) {
424 cfg
|= XIVE2_THREADID_8BITS
;
430 static bool pnv_xive2_is_cpu_enabled(PnvXive2
*xive
, PowerPCCPU
*cpu
)
432 int pir
= ppc_cpu_pir(cpu
);
433 uint32_t fc
= PNV10_PIR2FUSEDCORE(pir
);
434 uint64_t reg
= fc
< 8 ? TCTXT_EN0
: TCTXT_EN1
;
435 uint32_t bit
= pir
& 0x3f;
437 return xive
->tctxt_regs
[reg
>> 3] & PPC_BIT(bit
);
440 static int pnv_xive2_match_nvt(XivePresenter
*xptr
, uint8_t format
,
441 uint8_t nvt_blk
, uint32_t nvt_idx
,
442 bool cam_ignore
, uint8_t priority
,
443 uint32_t logic_serv
, XiveTCTXMatch
*match
)
445 PnvXive2
*xive
= PNV_XIVE2(xptr
);
446 PnvChip
*chip
= xive
->chip
;
450 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
452 for (i
= 0; i
< chip
->nr_cores
; i
++) {
453 PnvCore
*pc
= chip
->cores
[i
];
454 CPUCore
*cc
= CPU_CORE(pc
);
456 for (j
= 0; j
< cc
->nr_threads
; j
++) {
457 PowerPCCPU
*cpu
= pc
->threads
[j
];
461 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
465 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
468 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
472 ring
= xive2_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
478 * Save the context and follow on to catch duplicates,
479 * that we don't support yet.
483 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
484 "thread context NVT %x/%x\n",
499 static uint8_t pnv_xive2_get_block_id(Xive2Router
*xrtr
)
501 return pnv_xive2_block_id(PNV_XIVE2(xrtr
));
505 * The TIMA MMIO space is shared among the chips and to identify the
506 * chip from which the access is being done, we extract the chip id
509 static PnvXive2
*pnv_xive2_tm_get_xive(PowerPCCPU
*cpu
)
511 int pir
= ppc_cpu_pir(cpu
);
512 XivePresenter
*xptr
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
)->xptr
;
513 PnvXive2
*xive
= PNV_XIVE2(xptr
);
515 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
516 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
522 * The internal sources of the interrupt controller have no knowledge
523 * of the XIVE2 chip on which they reside. Encode the block id in the
524 * source interrupt number before forwarding the source event
525 * notification to the Router. This is required on a multichip system.
527 static void pnv_xive2_notify(XiveNotifier
*xn
, uint32_t srcno
, bool pq_checked
)
529 PnvXive2
*xive
= PNV_XIVE2(xn
);
530 uint8_t blk
= pnv_xive2_block_id(xive
);
532 xive2_router_notify(xn
, XIVE_EAS(blk
, srcno
), pq_checked
);
536 * Set Translation Tables
538 * TODO add support for multiple sets
540 static int pnv_xive2_stt_set_data(PnvXive2
*xive
, uint64_t val
)
542 uint8_t tsel
= GETFIELD(CQ_TAR_SELECT
, xive
->cq_regs
[CQ_TAR
>> 3]);
543 uint8_t entry
= GETFIELD(CQ_TAR_ENTRY_SELECT
,
544 xive
->cq_regs
[CQ_TAR
>> 3]);
550 xive
->tables
[tsel
][entry
] = val
;
553 xive2_error(xive
, "IC: unsupported table %d", tsel
);
557 if (xive
->cq_regs
[CQ_TAR
>> 3] & CQ_TAR_AUTOINC
) {
558 xive
->cq_regs
[CQ_TAR
>> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT
,
559 xive
->cq_regs
[CQ_TAR
>> 3], ++entry
);
565 * Virtual Structure Tables (VST) configuration
567 static void pnv_xive2_vst_set_exclusive(PnvXive2
*xive
, uint8_t type
,
568 uint8_t blk
, uint64_t vsd
)
570 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
571 XiveSource
*xsrc
= &xive
->ipi_source
;
572 const XiveVstInfo
*info
= &vst_infos
[type
];
573 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
574 uint64_t vst_tsize
= 1ull << page_shift
;
575 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
579 if (VSD_INDIRECT
& vsd
) {
580 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
581 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
587 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
588 xive2_error(xive
, "VST: %s table address 0x%"PRIx64
589 " is not aligned with page shift %d",
590 info
->name
, vst_addr
, page_shift
);
594 /* Record the table configuration (in SRAM on HW) */
595 xive
->vsds
[type
][blk
] = vsd
;
597 /* Now tune the models with the configuration provided by the FW */
602 * Backing store pages for the source PQ bits. The model does
603 * not use these PQ bits backed in RAM because the XiveSource
606 * If the table is direct, we can compute the number of PQ
607 * entries provisioned by FW (such as skiboot) and resize the
608 * ESB window accordingly.
610 if (!(VSD_INDIRECT
& vsd
)) {
611 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
612 * (1ull << xsrc
->esb_shift
));
615 memory_region_add_subregion(&xive
->esb_mmio
, 0, &xsrc
->esb_mmio
);
618 case VST_EAS
: /* Nothing to be done */
623 * Backing store pages for the END.
625 if (!(VSD_INDIRECT
& vsd
)) {
626 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
627 * (1ull << end_xsrc
->esb_shift
));
629 memory_region_add_subregion(&xive
->end_mmio
, 0, &end_xsrc
->esb_mmio
);
632 case VST_NVP
: /* Not modeled */
633 case VST_NVG
: /* Not modeled */
634 case VST_NVC
: /* Not modeled */
635 case VST_IC
: /* Not modeled */
636 case VST_SYNC
: /* Not modeled */
637 case VST_ERQ
: /* Not modeled */
641 g_assert_not_reached();
646 * Both PC and VC sub-engines are configured as each use the Virtual
649 static void pnv_xive2_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
651 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
652 uint8_t type
= GETFIELD(VC_VSD_TABLE_SELECT
,
653 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
654 uint8_t blk
= GETFIELD(VC_VSD_TABLE_ADDRESS
,
655 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
656 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
658 if (type
> VST_ERQ
) {
659 xive2_error(xive
, "VST: invalid table type %d", type
);
663 if (blk
>= vst_infos
[type
].max_blocks
) {
664 xive2_error(xive
, "VST: invalid block id %d for"
665 " %s table", blk
, vst_infos
[type
].name
);
670 xive2_error(xive
, "VST: invalid %s table address",
671 vst_infos
[type
].name
);
676 case VSD_MODE_FORWARD
:
677 xive
->vsds
[type
][blk
] = vsd
;
680 case VSD_MODE_EXCLUSIVE
:
681 pnv_xive2_vst_set_exclusive(xive
, type
, blk
, vsd
);
685 xive2_error(xive
, "VST: unsupported table mode %d", mode
);
698 * Page 0: Internal CQ register accesses (reads & writes)
699 * Page 1: Internal PC register accesses (reads & writes)
700 * Page 2: Internal VC register accesses (reads & writes)
701 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
702 * Page 4: Notify Port page (writes only, w/data),
704 * Page 6: Sync Poll page (writes only, dataless)
705 * Page 7: Sync Inject page (writes only, dataless)
706 * Page 8: LSI Trigger page (writes only, dataless)
707 * Page 9: LSI SB Management page (reads & writes dataless)
708 * Pages 10-255: Reserved
709 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
710 * covering the 128 threads in P10.
711 * Pages 384-511: Reserved
713 typedef struct PnvXive2Region
{
717 const MemoryRegionOps
*ops
;
720 static const MemoryRegionOps pnv_xive2_ic_cq_ops
;
721 static const MemoryRegionOps pnv_xive2_ic_pc_ops
;
722 static const MemoryRegionOps pnv_xive2_ic_vc_ops
;
723 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
;
724 static const MemoryRegionOps pnv_xive2_ic_notify_ops
;
725 static const MemoryRegionOps pnv_xive2_ic_sync_ops
;
726 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
;
727 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
;
729 /* 512 pages. 4K: 2M range, 64K: 32M range */
730 static const PnvXive2Region pnv_xive2_ic_regions
[] = {
731 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops
},
732 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops
},
733 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops
},
734 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops
},
735 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops
},
736 /* page 5 reserved */
737 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops
},
738 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops
},
739 /* pages 10-255 reserved */
740 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops
},
741 /* pages 384-511 reserved */
748 static uint64_t pnv_xive2_ic_cq_read(void *opaque
, hwaddr offset
,
751 PnvXive2
*xive
= PNV_XIVE2(opaque
);
752 uint32_t reg
= offset
>> 3;
756 case CQ_XIVE_CAP
: /* Set at reset */
758 val
= xive
->cq_regs
[reg
];
760 case CQ_MSGSND
: /* TODO check the #cores of the machine */
761 val
= 0xffffffff00000000;
764 val
= CQ_CFG_PB_GEN_PB_INIT
; /* TODO: fix CQ_CFG_PB_GEN default value */
767 xive2_error(xive
, "CQ: invalid read @%"HWADDR_PRIx
, offset
);
773 static uint64_t pnv_xive2_bar_size(uint64_t val
)
775 return 1ull << (GETFIELD(CQ_BAR_RANGE
, val
) + 24);
778 static void pnv_xive2_ic_cq_write(void *opaque
, hwaddr offset
,
779 uint64_t val
, unsigned size
)
781 PnvXive2
*xive
= PNV_XIVE2(opaque
);
782 MemoryRegion
*sysmem
= get_system_memory();
783 uint32_t reg
= offset
>> 3;
788 case CQ_RST_CTL
: /* TODO: reset all BARs */
792 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
793 if (!(val
& CQ_IC_BAR_VALID
)) {
795 if (xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
) {
796 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
797 memory_region_del_subregion(&xive
->ic_mmio
,
800 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
803 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
804 if (!(xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
)) {
805 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
806 memory_region_add_subregion(&xive
->ic_mmio
,
807 pnv_xive2_ic_regions
[i
].pgoff
<< xive
->ic_shift
,
810 memory_region_add_subregion(sysmem
, xive
->ic_base
,
817 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
818 if (!(val
& CQ_TM_BAR_VALID
)) {
820 if (xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
) {
821 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
824 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
825 if (!(xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
)) {
826 memory_region_add_subregion(sysmem
, xive
->tm_base
,
833 xive
->esb_shift
= val
& CQ_BAR_64K
? 16 : 12;
834 if (!(val
& CQ_BAR_VALID
)) {
836 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
837 memory_region_del_subregion(sysmem
, &xive
->esb_mmio
);
840 xive
->esb_base
= val
& CQ_BAR_ADDR
;
841 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
842 memory_region_set_size(&xive
->esb_mmio
,
843 pnv_xive2_bar_size(val
));
844 memory_region_add_subregion(sysmem
, xive
->esb_base
,
851 xive
->end_shift
= val
& CQ_BAR_64K
? 16 : 12;
852 if (!(val
& CQ_BAR_VALID
)) {
854 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
855 memory_region_del_subregion(sysmem
, &xive
->end_mmio
);
858 xive
->end_base
= val
& CQ_BAR_ADDR
;
859 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
860 memory_region_set_size(&xive
->end_mmio
,
861 pnv_xive2_bar_size(val
));
862 memory_region_add_subregion(sysmem
, xive
->end_base
,
869 xive
->nvc_shift
= val
& CQ_BAR_64K
? 16 : 12;
870 if (!(val
& CQ_BAR_VALID
)) {
872 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
873 memory_region_del_subregion(sysmem
, &xive
->nvc_mmio
);
876 xive
->nvc_base
= val
& CQ_BAR_ADDR
;
877 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
878 memory_region_set_size(&xive
->nvc_mmio
,
879 pnv_xive2_bar_size(val
));
880 memory_region_add_subregion(sysmem
, xive
->nvc_base
,
887 xive
->nvpg_shift
= val
& CQ_BAR_64K
? 16 : 12;
888 if (!(val
& CQ_BAR_VALID
)) {
890 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
891 memory_region_del_subregion(sysmem
, &xive
->nvpg_mmio
);
894 xive
->nvpg_base
= val
& CQ_BAR_ADDR
;
895 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
896 memory_region_set_size(&xive
->nvpg_mmio
,
897 pnv_xive2_bar_size(val
));
898 memory_region_add_subregion(sysmem
, xive
->nvpg_base
,
904 case CQ_TAR
: /* Set Translation Table Address */
906 case CQ_TDR
: /* Set Translation Table Data */
907 pnv_xive2_stt_set_data(xive
, val
);
909 case CQ_FIRMASK_OR
: /* FIR error reporting */
912 xive2_error(xive
, "CQ: invalid write 0x%"HWADDR_PRIx
, offset
);
916 xive
->cq_regs
[reg
] = val
;
919 static const MemoryRegionOps pnv_xive2_ic_cq_ops
= {
920 .read
= pnv_xive2_ic_cq_read
,
921 .write
= pnv_xive2_ic_cq_write
,
922 .endianness
= DEVICE_BIG_ENDIAN
,
924 .min_access_size
= 8,
925 .max_access_size
= 8,
928 .min_access_size
= 8,
929 .max_access_size
= 8,
933 static uint64_t pnv_xive2_ic_vc_read(void *opaque
, hwaddr offset
,
936 PnvXive2
*xive
= PNV_XIVE2(opaque
);
938 uint32_t reg
= offset
>> 3;
942 * VSD table settings.
944 case VC_VSD_TABLE_ADDR
:
945 case VC_VSD_TABLE_DATA
:
946 val
= xive
->vc_regs
[reg
];
950 * ESB cache updates (not modeled)
952 case VC_ESBC_FLUSH_CTRL
:
953 xive
->vc_regs
[reg
] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID
;
954 val
= xive
->vc_regs
[reg
];
958 * EAS cache updates (not modeled)
960 case VC_EASC_FLUSH_CTRL
:
961 xive
->vc_regs
[reg
] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID
;
962 val
= xive
->vc_regs
[reg
];
968 case VC_ENDC_WATCH0_SPEC
:
969 xive
->vc_regs
[reg
] &= ~(VC_ENDC_WATCH_FULL
| VC_ENDC_WATCH_CONFLICT
);
970 val
= xive
->vc_regs
[reg
];
973 case VC_ENDC_WATCH0_DATA0
:
975 * Load DATA registers from cache with data requested by the
978 pnv_xive2_end_cache_load(xive
);
979 val
= xive
->vc_regs
[reg
];
982 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
983 val
= xive
->vc_regs
[reg
];
986 case VC_ENDC_FLUSH_CTRL
:
987 xive
->vc_regs
[reg
] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID
;
988 val
= xive
->vc_regs
[reg
];
992 * Indirect invalidation
994 case VC_AT_MACRO_KILL_MASK
:
995 val
= xive
->vc_regs
[reg
];
998 case VC_AT_MACRO_KILL
:
999 xive
->vc_regs
[reg
] &= ~VC_AT_MACRO_KILL_VALID
;
1000 val
= xive
->vc_regs
[reg
];
1004 * Interrupt fifo overflow in memory backing store (Not modeled)
1006 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1007 val
= xive
->vc_regs
[reg
];
1013 case VC_ENDC_SYNC_DONE
:
1014 val
= VC_ENDC_SYNC_POLL_DONE
;
1017 xive2_error(xive
, "VC: invalid read @%"HWADDR_PRIx
, offset
);
1023 static void pnv_xive2_ic_vc_write(void *opaque
, hwaddr offset
,
1024 uint64_t val
, unsigned size
)
1026 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1027 uint32_t reg
= offset
>> 3;
1031 * VSD table settings.
1033 case VC_VSD_TABLE_ADDR
:
1035 case VC_VSD_TABLE_DATA
:
1036 pnv_xive2_vst_set_data(xive
, val
);
1040 * ESB cache updates (not modeled)
1042 /* case VC_ESBC_FLUSH_CTRL: */
1043 case VC_ESBC_FLUSH_POLL
:
1044 xive
->vc_regs
[VC_ESBC_FLUSH_CTRL
>> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1049 * EAS cache updates (not modeled)
1051 /* case VC_EASC_FLUSH_CTRL: */
1052 case VC_EASC_FLUSH_POLL
:
1053 xive
->vc_regs
[VC_EASC_FLUSH_CTRL
>> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID
;
1060 case VC_ENDC_WATCH0_SPEC
:
1061 val
&= ~VC_ENDC_WATCH_CONFLICT
; /* HW will set this bit */
1064 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1066 case VC_ENDC_WATCH0_DATA0
:
1067 /* writing to DATA0 triggers the cache write */
1068 xive
->vc_regs
[reg
] = val
;
1069 pnv_xive2_end_update(xive
);
1073 /* case VC_ENDC_FLUSH_CTRL: */
1074 case VC_ENDC_FLUSH_POLL
:
1075 xive
->vc_regs
[VC_ENDC_FLUSH_CTRL
>> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1079 * Indirect invalidation
1081 case VC_AT_MACRO_KILL
:
1082 case VC_AT_MACRO_KILL_MASK
:
1086 * Interrupt fifo overflow in memory backing store (Not modeled)
1088 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1094 case VC_ENDC_SYNC_DONE
:
1098 xive2_error(xive
, "VC: invalid write @%"HWADDR_PRIx
, offset
);
1102 xive
->vc_regs
[reg
] = val
;
1105 static const MemoryRegionOps pnv_xive2_ic_vc_ops
= {
1106 .read
= pnv_xive2_ic_vc_read
,
1107 .write
= pnv_xive2_ic_vc_write
,
1108 .endianness
= DEVICE_BIG_ENDIAN
,
1110 .min_access_size
= 8,
1111 .max_access_size
= 8,
1114 .min_access_size
= 8,
1115 .max_access_size
= 8,
1119 static uint64_t pnv_xive2_ic_pc_read(void *opaque
, hwaddr offset
,
1122 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1124 uint32_t reg
= offset
>> 3;
1128 * VSD table settings.
1130 case PC_VSD_TABLE_ADDR
:
1131 case PC_VSD_TABLE_DATA
:
1132 val
= xive
->pc_regs
[reg
];
1138 case PC_NXC_WATCH0_SPEC
:
1139 xive
->pc_regs
[reg
] &= ~(PC_NXC_WATCH_FULL
| PC_NXC_WATCH_CONFLICT
);
1140 val
= xive
->pc_regs
[reg
];
1143 case PC_NXC_WATCH0_DATA0
:
1145 * Load DATA registers from cache with data requested by the
1148 pnv_xive2_nvp_cache_load(xive
);
1149 val
= xive
->pc_regs
[reg
];
1152 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1153 val
= xive
->pc_regs
[reg
];
1156 case PC_NXC_FLUSH_CTRL
:
1157 xive
->pc_regs
[reg
] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID
;
1158 val
= xive
->pc_regs
[reg
];
1162 * Indirect invalidation
1165 xive
->pc_regs
[reg
] &= ~PC_AT_KILL_VALID
;
1166 val
= xive
->pc_regs
[reg
];
1170 xive2_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, offset
);
1176 static void pnv_xive2_ic_pc_write(void *opaque
, hwaddr offset
,
1177 uint64_t val
, unsigned size
)
1179 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1180 uint32_t reg
= offset
>> 3;
1185 * VSD table settings. Only taken into account in the VC
1186 * sub-engine because the Xive2Router model combines both VC and PC
1189 case PC_VSD_TABLE_ADDR
:
1190 case PC_VSD_TABLE_DATA
:
1196 case PC_NXC_WATCH0_SPEC
:
1197 val
&= ~PC_NXC_WATCH_CONFLICT
; /* HW will set this bit */
1200 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1202 case PC_NXC_WATCH0_DATA0
:
1203 /* writing to DATA0 triggers the cache write */
1204 xive
->pc_regs
[reg
] = val
;
1205 pnv_xive2_nvp_update(xive
);
1208 /* case PC_NXC_FLUSH_CTRL: */
1209 case PC_NXC_FLUSH_POLL
:
1210 xive
->pc_regs
[PC_NXC_FLUSH_CTRL
>> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID
;
1214 * Indirect invalidation
1217 case PC_AT_KILL_MASK
:
1221 xive2_error(xive
, "PC: invalid write @%"HWADDR_PRIx
, offset
);
1225 xive
->pc_regs
[reg
] = val
;
1228 static const MemoryRegionOps pnv_xive2_ic_pc_ops
= {
1229 .read
= pnv_xive2_ic_pc_read
,
1230 .write
= pnv_xive2_ic_pc_write
,
1231 .endianness
= DEVICE_BIG_ENDIAN
,
1233 .min_access_size
= 8,
1234 .max_access_size
= 8,
1237 .min_access_size
= 8,
1238 .max_access_size
= 8,
1243 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque
, hwaddr offset
,
1246 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1248 uint32_t reg
= offset
>> 3;
1252 * XIVE2 hardware thread enablement
1256 val
= xive
->tctxt_regs
[reg
];
1260 case TCTXT_EN0_RESET
:
1261 val
= xive
->tctxt_regs
[TCTXT_EN0
>> 3];
1264 case TCTXT_EN1_RESET
:
1265 val
= xive
->tctxt_regs
[TCTXT_EN1
>> 3];
1268 xive2_error(xive
, "TCTXT: invalid read @%"HWADDR_PRIx
, offset
);
1274 static void pnv_xive2_ic_tctxt_write(void *opaque
, hwaddr offset
,
1275 uint64_t val
, unsigned size
)
1277 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1281 * XIVE2 hardware thread enablement
1283 case TCTXT_EN0
: /* Physical Thread Enable */
1284 case TCTXT_EN1
: /* Physical Thread Enable (fused core) */
1288 xive
->tctxt_regs
[TCTXT_EN0
>> 3] |= val
;
1291 xive
->tctxt_regs
[TCTXT_EN1
>> 3] |= val
;
1293 case TCTXT_EN0_RESET
:
1294 xive
->tctxt_regs
[TCTXT_EN0
>> 3] &= ~val
;
1296 case TCTXT_EN1_RESET
:
1297 xive
->tctxt_regs
[TCTXT_EN1
>> 3] &= ~val
;
1301 xive2_error(xive
, "TCTXT: invalid write @%"HWADDR_PRIx
, offset
);
1306 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
= {
1307 .read
= pnv_xive2_ic_tctxt_read
,
1308 .write
= pnv_xive2_ic_tctxt_write
,
1309 .endianness
= DEVICE_BIG_ENDIAN
,
1311 .min_access_size
= 8,
1312 .max_access_size
= 8,
1315 .min_access_size
= 8,
1316 .max_access_size
= 8,
1321 * Redirect XSCOM to MMIO handlers
1323 static uint64_t pnv_xive2_xscom_read(void *opaque
, hwaddr offset
,
1326 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1328 uint32_t xscom_reg
= offset
>> 3;
1329 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1331 switch (xscom_reg
) {
1332 case 0x000 ... 0x0FF:
1333 val
= pnv_xive2_ic_cq_read(opaque
, mmio_offset
, size
);
1335 case 0x100 ... 0x1FF:
1336 val
= pnv_xive2_ic_vc_read(opaque
, mmio_offset
, size
);
1338 case 0x200 ... 0x2FF:
1339 val
= pnv_xive2_ic_pc_read(opaque
, mmio_offset
, size
);
1341 case 0x300 ... 0x3FF:
1342 val
= pnv_xive2_ic_tctxt_read(opaque
, mmio_offset
, size
);
1345 xive2_error(xive
, "XSCOM: invalid read @%"HWADDR_PRIx
, offset
);
1351 static void pnv_xive2_xscom_write(void *opaque
, hwaddr offset
,
1352 uint64_t val
, unsigned size
)
1354 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1355 uint32_t xscom_reg
= offset
>> 3;
1356 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1358 switch (xscom_reg
) {
1359 case 0x000 ... 0x0FF:
1360 pnv_xive2_ic_cq_write(opaque
, mmio_offset
, val
, size
);
1362 case 0x100 ... 0x1FF:
1363 pnv_xive2_ic_vc_write(opaque
, mmio_offset
, val
, size
);
1365 case 0x200 ... 0x2FF:
1366 pnv_xive2_ic_pc_write(opaque
, mmio_offset
, val
, size
);
1368 case 0x300 ... 0x3FF:
1369 pnv_xive2_ic_tctxt_write(opaque
, mmio_offset
, val
, size
);
1372 xive2_error(xive
, "XSCOM: invalid write @%"HWADDR_PRIx
, offset
);
1376 static const MemoryRegionOps pnv_xive2_xscom_ops
= {
1377 .read
= pnv_xive2_xscom_read
,
1378 .write
= pnv_xive2_xscom_write
,
1379 .endianness
= DEVICE_BIG_ENDIAN
,
1381 .min_access_size
= 8,
1382 .max_access_size
= 8,
1385 .min_access_size
= 8,
1386 .max_access_size
= 8,
1391 * Notify port page. The layout is compatible between 4K and 64K pages :
1393 * Page 1 Notify page (writes only)
1394 * 0x000 - 0x7FF IPI interrupt (NPU)
1395 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1398 static void pnv_xive2_ic_hw_trigger(PnvXive2
*xive
, hwaddr addr
,
1404 if (val
& XIVE_TRIGGER_END
) {
1405 xive2_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1411 * Forward the source event notification directly to the Router.
1412 * The source interrupt number should already be correctly encoded
1413 * with the chip block id by the sending device (PHB, PSI).
1415 blk
= XIVE_EAS_BLOCK(val
);
1416 idx
= XIVE_EAS_INDEX(val
);
1418 xive2_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
),
1419 !!(val
& XIVE_TRIGGER_PQ
));
1422 static void pnv_xive2_ic_notify_write(void *opaque
, hwaddr offset
,
1423 uint64_t val
, unsigned size
)
1425 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1427 /* VC: IPI triggers */
1429 case 0x000 ... 0x7FF:
1430 /* TODO: check IPI notify sub-page routing */
1431 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1434 /* VC: HW triggers */
1435 case 0x800 ... 0xFFF:
1436 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1440 xive2_error(xive
, "NOTIFY: invalid write @%"HWADDR_PRIx
, offset
);
1444 static uint64_t pnv_xive2_ic_notify_read(void *opaque
, hwaddr offset
,
1447 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1449 /* loads are invalid */
1450 xive2_error(xive
, "NOTIFY: invalid read @%"HWADDR_PRIx
, offset
);
1454 static const MemoryRegionOps pnv_xive2_ic_notify_ops
= {
1455 .read
= pnv_xive2_ic_notify_read
,
1456 .write
= pnv_xive2_ic_notify_write
,
1457 .endianness
= DEVICE_BIG_ENDIAN
,
1459 .min_access_size
= 8,
1460 .max_access_size
= 8,
1463 .min_access_size
= 8,
1464 .max_access_size
= 8,
1468 static uint64_t pnv_xive2_ic_lsi_read(void *opaque
, hwaddr offset
,
1471 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1473 xive2_error(xive
, "LSI: invalid read @%"HWADDR_PRIx
, offset
);
1477 static void pnv_xive2_ic_lsi_write(void *opaque
, hwaddr offset
,
1478 uint64_t val
, unsigned size
)
1480 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1482 xive2_error(xive
, "LSI: invalid write @%"HWADDR_PRIx
, offset
);
1485 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
= {
1486 .read
= pnv_xive2_ic_lsi_read
,
1487 .write
= pnv_xive2_ic_lsi_write
,
1488 .endianness
= DEVICE_BIG_ENDIAN
,
1490 .min_access_size
= 8,
1491 .max_access_size
= 8,
1494 .min_access_size
= 8,
1495 .max_access_size
= 8,
1500 * Sync MMIO page (write only)
1502 #define PNV_XIVE2_SYNC_IPI 0x000
1503 #define PNV_XIVE2_SYNC_HW 0x080
1504 #define PNV_XIVE2_SYNC_NxC 0x100
1505 #define PNV_XIVE2_SYNC_INT 0x180
1506 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1507 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1508 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1510 static uint64_t pnv_xive2_ic_sync_read(void *opaque
, hwaddr offset
,
1513 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1515 /* loads are invalid */
1516 xive2_error(xive
, "SYNC: invalid read @%"HWADDR_PRIx
, offset
);
1520 static void pnv_xive2_ic_sync_write(void *opaque
, hwaddr offset
,
1521 uint64_t val
, unsigned size
)
1523 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1526 case PNV_XIVE2_SYNC_IPI
:
1527 case PNV_XIVE2_SYNC_HW
:
1528 case PNV_XIVE2_SYNC_NxC
:
1529 case PNV_XIVE2_SYNC_INT
:
1530 case PNV_XIVE2_SYNC_OS_ESC
:
1531 case PNV_XIVE2_SYNC_POOL_ESC
:
1532 case PNV_XIVE2_SYNC_HARD_ESC
:
1535 xive2_error(xive
, "SYNC: invalid write @%"HWADDR_PRIx
, offset
);
1539 static const MemoryRegionOps pnv_xive2_ic_sync_ops
= {
1540 .read
= pnv_xive2_ic_sync_read
,
1541 .write
= pnv_xive2_ic_sync_write
,
1542 .endianness
= DEVICE_BIG_ENDIAN
,
1544 .min_access_size
= 8,
1545 .max_access_size
= 8,
1548 .min_access_size
= 8,
1549 .max_access_size
= 8,
1554 * When the TM direct pages of the IC controller are accessed, the
1555 * target HW thread is deduced from the page offset.
1557 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2
*xive
, hwaddr offset
)
1559 /* On P10, the node ID shift in the PIR register is 8 bits */
1560 return xive
->chip
->chip_id
<< 8 | offset
>> xive
->ic_shift
;
1563 static XiveTCTX
*pnv_xive2_get_indirect_tctx(PnvXive2
*xive
, uint32_t pir
)
1565 PnvChip
*chip
= xive
->chip
;
1566 PowerPCCPU
*cpu
= NULL
;
1568 cpu
= pnv_chip_find_cpu(chip
, pir
);
1570 xive2_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1574 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
1575 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
1578 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1581 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque
, hwaddr offset
,
1584 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1589 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
1590 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1592 val
= xive_tctx_tm_read(NULL
, tctx
, offset
, size
);
1598 static void pnv_xive2_ic_tm_indirect_write(void *opaque
, hwaddr offset
,
1599 uint64_t val
, unsigned size
)
1601 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1605 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
1606 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1608 xive_tctx_tm_write(NULL
, tctx
, offset
, val
, size
);
1612 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
= {
1613 .read
= pnv_xive2_ic_tm_indirect_read
,
1614 .write
= pnv_xive2_ic_tm_indirect_write
,
1615 .endianness
= DEVICE_BIG_ENDIAN
,
1617 .min_access_size
= 8,
1618 .max_access_size
= 8,
1621 .min_access_size
= 8,
1622 .max_access_size
= 8,
1631 * Special TIMA offsets to handle accesses in a POWER10 way.
1633 * Only the CAM line updates done by the hypervisor should be handled
1636 #define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT)
1637 #define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2))
1638 #define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX)
1640 static void pnv_xive2_tm_write(void *opaque
, hwaddr offset
,
1641 uint64_t value
, unsigned size
)
1643 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1644 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1645 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1646 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1648 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
1650 /* TODO: should we switch the TM ops table instead ? */
1651 if (!gen1_tima_os
&& offset
== HV_PUSH_OS_CTX_OFFSET
) {
1652 xive2_tm_push_os_ctx(xptr
, tctx
, offset
, value
, size
);
1656 /* Other TM ops are the same as XIVE1 */
1657 xive_tctx_tm_write(xptr
, tctx
, offset
, value
, size
);
1660 static uint64_t pnv_xive2_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
1662 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1663 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1664 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1665 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1667 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
1669 /* TODO: should we switch the TM ops table instead ? */
1670 if (!gen1_tima_os
&& offset
== HV_PULL_OS_CTX_OFFSET
) {
1671 return xive2_tm_pull_os_ctx(xptr
, tctx
, offset
, size
);
1674 /* Other TM ops are the same as XIVE1 */
1675 return xive_tctx_tm_read(xptr
, tctx
, offset
, size
);
1678 static const MemoryRegionOps pnv_xive2_tm_ops
= {
1679 .read
= pnv_xive2_tm_read
,
1680 .write
= pnv_xive2_tm_write
,
1681 .endianness
= DEVICE_BIG_ENDIAN
,
1683 .min_access_size
= 1,
1684 .max_access_size
= 8,
1687 .min_access_size
= 1,
1688 .max_access_size
= 8,
1692 static uint64_t pnv_xive2_nvc_read(void *opaque
, hwaddr offset
,
1695 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1697 xive2_error(xive
, "NVC: invalid read @%"HWADDR_PRIx
, offset
);
1701 static void pnv_xive2_nvc_write(void *opaque
, hwaddr offset
,
1702 uint64_t val
, unsigned size
)
1704 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1706 xive2_error(xive
, "NVC: invalid write @%"HWADDR_PRIx
, offset
);
1709 static const MemoryRegionOps pnv_xive2_nvc_ops
= {
1710 .read
= pnv_xive2_nvc_read
,
1711 .write
= pnv_xive2_nvc_write
,
1712 .endianness
= DEVICE_BIG_ENDIAN
,
1714 .min_access_size
= 8,
1715 .max_access_size
= 8,
1718 .min_access_size
= 8,
1719 .max_access_size
= 8,
1723 static uint64_t pnv_xive2_nvpg_read(void *opaque
, hwaddr offset
,
1726 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1728 xive2_error(xive
, "NVPG: invalid read @%"HWADDR_PRIx
, offset
);
1732 static void pnv_xive2_nvpg_write(void *opaque
, hwaddr offset
,
1733 uint64_t val
, unsigned size
)
1735 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1737 xive2_error(xive
, "NVPG: invalid write @%"HWADDR_PRIx
, offset
);
1740 static const MemoryRegionOps pnv_xive2_nvpg_ops
= {
1741 .read
= pnv_xive2_nvpg_read
,
1742 .write
= pnv_xive2_nvpg_write
,
1743 .endianness
= DEVICE_BIG_ENDIAN
,
1745 .min_access_size
= 8,
1746 .max_access_size
= 8,
1749 .min_access_size
= 8,
1750 .max_access_size
= 8,
1755 * POWER10 default capabilities: 0x2000120076f000FC
1757 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
1760 * POWER10 default configuration: 0x0030000033000000
1762 * 8bits thread id was dropped for P10
1764 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1766 static void pnv_xive2_reset(void *dev
)
1768 PnvXive2
*xive
= PNV_XIVE2(dev
);
1769 XiveSource
*xsrc
= &xive
->ipi_source
;
1770 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1772 xive
->cq_regs
[CQ_XIVE_CAP
>> 3] = xive
->capabilities
;
1773 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] = xive
->config
;
1775 /* HW hardwires the #Topology of the chip in the block field */
1776 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] |=
1777 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, 0ull, xive
->chip
->chip_id
);
1779 /* Set default page size to 64k */
1780 xive
->ic_shift
= xive
->esb_shift
= xive
->end_shift
= 16;
1781 xive
->nvc_shift
= xive
->nvpg_shift
= xive
->tm_shift
= 16;
1783 /* Clear source MMIOs */
1784 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1785 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
1788 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1789 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
1794 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1797 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1798 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1800 static void pnv_xive2_realize(DeviceState
*dev
, Error
**errp
)
1802 PnvXive2
*xive
= PNV_XIVE2(dev
);
1803 PnvXive2Class
*pxc
= PNV_XIVE2_GET_CLASS(dev
);
1804 XiveSource
*xsrc
= &xive
->ipi_source
;
1805 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1806 Error
*local_err
= NULL
;
1809 pxc
->parent_realize(dev
, &local_err
);
1811 error_propagate(errp
, local_err
);
1818 * The XiveSource and Xive2EndSource objects are realized with the
1819 * maximum allowed HW configuration. The ESB MMIO regions will be
1820 * resized dynamically when the controller is configured by the FW
1821 * to limit accesses to resources not provisioned.
1823 object_property_set_int(OBJECT(xsrc
), "flags", XIVE_SRC_STORE_EOI
,
1825 object_property_set_int(OBJECT(xsrc
), "nr-irqs", PNV_XIVE2_NR_IRQS
,
1827 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
1829 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
1831 error_propagate(errp
, local_err
);
1835 object_property_set_int(OBJECT(end_xsrc
), "nr-ends", PNV_XIVE2_NR_ENDS
,
1837 object_property_set_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
1839 qdev_realize(DEVICE(end_xsrc
), NULL
, &local_err
);
1841 error_propagate(errp
, local_err
);
1845 /* XSCOM region, used for initial configuration of the BARs */
1846 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
),
1847 &pnv_xive2_xscom_ops
, xive
, "xscom-xive",
1848 PNV10_XSCOM_XIVE2_SIZE
<< 3);
1850 /* Interrupt controller MMIO regions */
1851 xive
->ic_shift
= 16;
1852 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1853 PNV10_XIVE2_IC_SIZE
);
1855 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
1856 memory_region_init_io(&xive
->ic_mmios
[i
], OBJECT(dev
),
1857 pnv_xive2_ic_regions
[i
].ops
, xive
,
1858 pnv_xive2_ic_regions
[i
].name
,
1859 pnv_xive2_ic_regions
[i
].pgsize
<< xive
->ic_shift
);
1865 xive
->esb_shift
= 16;
1866 xive
->end_shift
= 16;
1867 memory_region_init(&xive
->esb_mmio
, OBJECT(xive
), "xive-esb",
1868 PNV10_XIVE2_ESB_SIZE
);
1869 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-end",
1870 PNV10_XIVE2_END_SIZE
);
1872 /* Presenter Controller MMIO region (not modeled) */
1873 xive
->nvc_shift
= 16;
1874 xive
->nvpg_shift
= 16;
1875 memory_region_init_io(&xive
->nvc_mmio
, OBJECT(dev
),
1876 &pnv_xive2_nvc_ops
, xive
,
1877 "xive-nvc", PNV10_XIVE2_NVC_SIZE
);
1879 memory_region_init_io(&xive
->nvpg_mmio
, OBJECT(dev
),
1880 &pnv_xive2_nvpg_ops
, xive
,
1881 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE
);
1883 /* Thread Interrupt Management Area (Direct) */
1884 xive
->tm_shift
= 16;
1885 memory_region_init_io(&xive
->tm_mmio
, OBJECT(dev
), &pnv_xive2_tm_ops
,
1886 xive
, "xive-tima", PNV10_XIVE2_TM_SIZE
);
1888 qemu_register_reset(pnv_xive2_reset
, dev
);
1891 static Property pnv_xive2_properties
[] = {
1892 DEFINE_PROP_UINT64("ic-bar", PnvXive2
, ic_base
, 0),
1893 DEFINE_PROP_UINT64("esb-bar", PnvXive2
, esb_base
, 0),
1894 DEFINE_PROP_UINT64("end-bar", PnvXive2
, end_base
, 0),
1895 DEFINE_PROP_UINT64("nvc-bar", PnvXive2
, nvc_base
, 0),
1896 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2
, nvpg_base
, 0),
1897 DEFINE_PROP_UINT64("tm-bar", PnvXive2
, tm_base
, 0),
1898 DEFINE_PROP_UINT64("capabilities", PnvXive2
, capabilities
,
1899 PNV_XIVE2_CAPABILITIES
),
1900 DEFINE_PROP_UINT64("config", PnvXive2
, config
,
1901 PNV_XIVE2_CONFIGURATION
),
1902 DEFINE_PROP_LINK("chip", PnvXive2
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1903 DEFINE_PROP_END_OF_LIST(),
1906 static void pnv_xive2_instance_init(Object
*obj
)
1908 PnvXive2
*xive
= PNV_XIVE2(obj
);
1910 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1912 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1913 TYPE_XIVE2_END_SOURCE
);
1916 static int pnv_xive2_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1919 const char compat_p10
[] = "ibm,power10-xive-x";
1923 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE
),
1924 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE
)
1927 name
= g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE
);
1928 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1932 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1933 _FDT(fdt_setprop(fdt
, offset
, "compatible", compat_p10
,
1934 sizeof(compat_p10
)));
1938 static void pnv_xive2_class_init(ObjectClass
*klass
, void *data
)
1940 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1941 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1942 Xive2RouterClass
*xrc
= XIVE2_ROUTER_CLASS(klass
);
1943 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1944 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1945 PnvXive2Class
*pxc
= PNV_XIVE2_CLASS(klass
);
1947 xdc
->dt_xscom
= pnv_xive2_dt_xscom
;
1949 dc
->desc
= "PowerNV XIVE2 Interrupt Controller (POWER10)";
1950 device_class_set_parent_realize(dc
, pnv_xive2_realize
,
1951 &pxc
->parent_realize
);
1952 device_class_set_props(dc
, pnv_xive2_properties
);
1954 xrc
->get_eas
= pnv_xive2_get_eas
;
1955 xrc
->get_pq
= pnv_xive2_get_pq
;
1956 xrc
->set_pq
= pnv_xive2_set_pq
;
1957 xrc
->get_end
= pnv_xive2_get_end
;
1958 xrc
->write_end
= pnv_xive2_write_end
;
1959 xrc
->get_nvp
= pnv_xive2_get_nvp
;
1960 xrc
->write_nvp
= pnv_xive2_write_nvp
;
1961 xrc
->get_config
= pnv_xive2_get_config
;
1962 xrc
->get_block_id
= pnv_xive2_get_block_id
;
1964 xnc
->notify
= pnv_xive2_notify
;
1966 xpc
->match_nvt
= pnv_xive2_match_nvt
;
1969 static const TypeInfo pnv_xive2_info
= {
1970 .name
= TYPE_PNV_XIVE2
,
1971 .parent
= TYPE_XIVE2_ROUTER
,
1972 .instance_init
= pnv_xive2_instance_init
,
1973 .instance_size
= sizeof(PnvXive2
),
1974 .class_init
= pnv_xive2_class_init
,
1975 .class_size
= sizeof(PnvXive2Class
),
1976 .interfaces
= (InterfaceInfo
[]) {
1977 { TYPE_PNV_XSCOM_INTERFACE
},
1982 static void pnv_xive2_register_types(void)
1984 type_register_static(&pnv_xive2_info
);
1987 type_init(pnv_xive2_register_types
)
1989 static void xive2_nvp_pic_print_info(Xive2Nvp
*nvp
, uint32_t nvp_idx
,
1992 uint8_t eq_blk
= xive_get_field32(NVP2_W5_VP_END_BLOCK
, nvp
->w5
);
1993 uint32_t eq_idx
= xive_get_field32(NVP2_W5_VP_END_INDEX
, nvp
->w5
);
1995 if (!xive2_nvp_is_valid(nvp
)) {
1999 monitor_printf(mon
, " %08x end:%02x/%04x IPB:%02x",
2000 nvp_idx
, eq_blk
, eq_idx
,
2001 xive_get_field32(NVP2_W2_IPB
, nvp
->w2
));
2003 * When the NVP is HW controlled, more fields are updated
2005 if (xive2_nvp_is_hw(nvp
)) {
2006 monitor_printf(mon
, " CPPR:%02x",
2007 xive_get_field32(NVP2_W2_CPPR
, nvp
->w2
));
2008 if (xive2_nvp_is_co(nvp
)) {
2009 monitor_printf(mon
, " CO:%04x",
2010 xive_get_field32(NVP2_W1_CO_THRID
, nvp
->w1
));
2013 monitor_printf(mon
, "\n");
2017 * If the table is direct, we can compute the number of PQ entries
2018 * provisioned by FW.
2020 static uint32_t pnv_xive2_nr_esbs(PnvXive2
*xive
)
2022 uint8_t blk
= pnv_xive2_block_id(xive
);
2023 uint64_t vsd
= xive
->vsds
[VST_ESB
][blk
];
2024 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
2026 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
2030 * Compute the number of entries per indirect subpage.
2032 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2
*xive
, uint32_t type
)
2034 uint8_t blk
= pnv_xive2_block_id(xive
);
2035 uint64_t vsd
= xive
->vsds
[type
][blk
];
2036 const XiveVstInfo
*info
= &vst_infos
[type
];
2038 uint32_t page_shift
;
2040 /* For direct tables, fake a valid value */
2041 if (!(VSD_INDIRECT
& vsd
)) {
2045 /* Get the page size of the indirect table. */
2046 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
2047 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
2049 if (!(vsd
& VSD_ADDRESS_MASK
)) {
2051 xive2_error(xive
, "VST: invalid %s entry!?", info
->name
);
2056 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
2058 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
2059 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
2064 return (1ull << page_shift
) / info
->size
;
2067 void pnv_xive2_pic_print_info(PnvXive2
*xive
, Monitor
*mon
)
2069 Xive2Router
*xrtr
= XIVE2_ROUTER(xive
);
2070 uint8_t blk
= pnv_xive2_block_id(xive
);
2071 uint8_t chip_id
= xive
->chip
->chip_id
;
2072 uint32_t srcno0
= XIVE_EAS(blk
, 0);
2073 uint32_t nr_esbs
= pnv_xive2_nr_esbs(xive
);
2078 uint64_t xive_nvp_per_subpage
;
2080 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
2081 srcno0
+ nr_esbs
- 1);
2082 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
2084 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
2085 srcno0
+ nr_esbs
- 1);
2086 for (i
= 0; i
< nr_esbs
; i
++) {
2087 if (xive2_router_get_eas(xrtr
, blk
, i
, &eas
)) {
2090 if (!xive2_eas_is_masked(&eas
)) {
2091 xive2_eas_pic_print_info(&eas
, i
, mon
);
2095 monitor_printf(mon
, "XIVE[%x] #%d END Escalation EAT\n", chip_id
, blk
);
2097 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2098 xive2_end_eas_pic_print_info(&end
, i
++, mon
);
2101 monitor_printf(mon
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
2103 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2104 xive2_end_pic_print_info(&end
, i
++, mon
);
2107 monitor_printf(mon
, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id
, blk
,
2108 0, XIVE2_NVP_COUNT
- 1);
2109 xive_nvp_per_subpage
= pnv_xive2_vst_per_subpage(xive
, VST_NVP
);
2110 for (i
= 0; i
< XIVE2_NVP_COUNT
; i
+= xive_nvp_per_subpage
) {
2111 while (!xive2_router_get_nvp(xrtr
, blk
, i
, &nvp
)) {
2112 xive2_nvp_pic_print_info(&nvp
, i
++, mon
);