2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
31 #include "pnv_xive2_regs.h"
36 * Virtual structures table (VST)
38 #define SBE_PER_BYTE 4
40 typedef struct XiveVstInfo
{
46 static const XiveVstInfo vst_infos
[] = {
48 [VST_EAS
] = { "EAT", sizeof(Xive2Eas
), 16 },
49 [VST_ESB
] = { "ESB", 1, 16 },
50 [VST_END
] = { "ENDT", sizeof(Xive2End
), 16 },
52 [VST_NVP
] = { "NVPT", sizeof(Xive2Nvp
), 16 },
53 [VST_NVG
] = { "NVGT", sizeof(Xive2Nvgc
), 16 },
54 [VST_NVC
] = { "NVCT", sizeof(Xive2Nvgc
), 16 },
56 [VST_IC
] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
57 [VST_SYNC
] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
60 * This table contains the backing store pages for the interrupt
61 * fifos of the VC sub-engine in case of overflow.
71 [VST_ERQ
] = { "ERQ", 1, VC_QUEUE_COUNT
},
74 #define xive2_error(xive, fmt, ...) \
75 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
76 (xive)->chip->chip_id, ## __VA_ARGS__);
79 * QEMU version of the GETFIELD/SETFIELD macros
81 * TODO: It might be better to use the existing extract64() and
82 * deposit64() but this means that all the register definitions will
83 * change and become incompatible with the ones found in skiboot.
85 * Keep it as it is for now until we find a common ground.
87 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
89 return (word
& mask
) >> ctz64(mask
);
92 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
95 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
99 * TODO: Document block id override
101 static uint32_t pnv_xive2_block_id(PnvXive2
*xive
)
103 uint8_t blk
= xive
->chip
->chip_id
;
104 uint64_t cfg_val
= xive
->cq_regs
[CQ_XIVE_CFG
>> 3];
106 if (cfg_val
& CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE
) {
107 blk
= GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, cfg_val
);
114 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
115 * of the chips is good enough.
117 * TODO: Block scope support
119 static PnvXive2
*pnv_xive2_get_remote(uint8_t blk
)
121 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
124 for (i
= 0; i
< pnv
->num_chips
; i
++) {
125 Pnv10Chip
*chip10
= PNV10_CHIP(pnv
->chips
[i
]);
126 PnvXive2
*xive
= &chip10
->xive
;
128 if (pnv_xive2_block_id(xive
) == blk
) {
136 * VST accessors for ESB, EAT, ENDT, NVP
138 * Indirect VST tables are arrays of VSDs pointing to a page (of same
139 * size). Each page is a direct VST table.
142 #define XIVE_VSD_SIZE 8
144 /* Indirect page size can be 4K, 64K, 2M, 16M. */
145 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift
)
147 return page_shift
== 12 || page_shift
== 16 ||
148 page_shift
== 21 || page_shift
== 24;
151 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2
*xive
, uint32_t type
,
152 uint64_t vsd
, uint32_t idx
)
154 const XiveVstInfo
*info
= &vst_infos
[type
];
155 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
156 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
159 idx_max
= vst_tsize
/ info
->size
- 1;
162 xive2_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
163 info
->name
, idx
, idx_max
);
168 return vst_addr
+ idx
* info
->size
;
171 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2
*xive
, uint32_t type
,
172 uint64_t vsd
, uint32_t idx
)
174 const XiveVstInfo
*info
= &vst_infos
[type
];
178 uint32_t vst_per_page
;
180 /* Get the page size of the indirect table. */
181 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
182 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
184 if (!(vsd
& VSD_ADDRESS_MASK
)) {
185 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
189 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
191 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
192 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
197 vst_per_page
= (1ull << page_shift
) / info
->size
;
198 vsd_idx
= idx
/ vst_per_page
;
200 /* Load the VSD we are looking for, if not already done */
202 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
203 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
,
204 MEMTXATTRS_UNSPECIFIED
);
206 if (!(vsd
& VSD_ADDRESS_MASK
)) {
207 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
212 * Check that the pages have a consistent size across the
215 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
216 xive2_error(xive
, "VST: %s entry %x indirect page size differ !?",
222 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
225 static uint64_t pnv_xive2_vst_addr(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
228 const XiveVstInfo
*info
= &vst_infos
[type
];
231 if (blk
>= info
->max_blocks
) {
232 xive2_error(xive
, "VST: invalid block id %d for VST %s %d !?",
233 blk
, info
->name
, idx
);
237 vsd
= xive
->vsds
[type
][blk
];
239 /* Remote VST access */
240 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
241 xive
= pnv_xive2_get_remote(blk
);
243 return xive
? pnv_xive2_vst_addr(xive
, type
, blk
, idx
) : 0;
246 if (VSD_INDIRECT
& vsd
) {
247 return pnv_xive2_vst_addr_indirect(xive
, type
, vsd
, idx
);
250 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, idx
);
253 static int pnv_xive2_vst_read(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
254 uint32_t idx
, void *data
)
256 const XiveVstInfo
*info
= &vst_infos
[type
];
257 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
263 cpu_physical_memory_read(addr
, data
, info
->size
);
267 #define XIVE_VST_WORD_ALL -1
269 static int pnv_xive2_vst_write(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
270 uint32_t idx
, void *data
, uint32_t word_number
)
272 const XiveVstInfo
*info
= &vst_infos
[type
];
273 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
279 if (word_number
== XIVE_VST_WORD_ALL
) {
280 cpu_physical_memory_write(addr
, data
, info
->size
);
282 cpu_physical_memory_write(addr
+ word_number
* 4,
283 data
+ word_number
* 4, 4);
288 static int pnv_xive2_get_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
291 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
293 if (pnv_xive2_block_id(xive
) != blk
) {
294 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
298 *pq
= xive_source_esb_get(&xive
->ipi_source
, idx
);
302 static int pnv_xive2_set_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
305 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
307 if (pnv_xive2_block_id(xive
) != blk
) {
308 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
312 *pq
= xive_source_esb_set(&xive
->ipi_source
, idx
, *pq
);
316 static int pnv_xive2_get_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
319 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
);
322 static int pnv_xive2_write_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
323 Xive2End
*end
, uint8_t word_number
)
325 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
,
329 static int pnv_xive2_end_update(PnvXive2
*xive
)
331 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
332 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
333 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
334 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
336 uint64_t endc_watch
[4];
338 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
340 cpu_to_be64(xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
]);
343 return pnv_xive2_vst_write(xive
, VST_END
, blk
, idx
, endc_watch
,
347 static void pnv_xive2_end_cache_load(PnvXive2
*xive
)
349 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
350 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
351 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
352 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
353 uint64_t endc_watch
[4] = { 0 };
356 if (pnv_xive2_vst_read(xive
, VST_END
, blk
, idx
, endc_watch
)) {
357 xive2_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
360 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
361 xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
] =
362 be64_to_cpu(endc_watch
[i
]);
366 static int pnv_xive2_get_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
369 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
);
372 static int pnv_xive2_write_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
373 Xive2Nvp
*nvp
, uint8_t word_number
)
375 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
,
379 static int pnv_xive2_nvp_update(PnvXive2
*xive
)
381 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
382 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
383 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
384 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
386 uint64_t nxc_watch
[4];
388 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
390 cpu_to_be64(xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
]);
393 return pnv_xive2_vst_write(xive
, VST_NVP
, blk
, idx
, nxc_watch
,
397 static void pnv_xive2_nvp_cache_load(PnvXive2
*xive
)
399 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
400 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
401 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
402 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
403 uint64_t nxc_watch
[4] = { 0 };
406 if (pnv_xive2_vst_read(xive
, VST_NVP
, blk
, idx
, nxc_watch
)) {
407 xive2_error(xive
, "VST: no NVP entry %x/%x !?", blk
, idx
);
410 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
411 xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
] =
412 be64_to_cpu(nxc_watch
[i
]);
416 static int pnv_xive2_get_eas(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
419 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
421 if (pnv_xive2_block_id(xive
) != blk
) {
422 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
426 return pnv_xive2_vst_read(xive
, VST_EAS
, blk
, idx
, eas
);
429 static bool pnv_xive2_is_cpu_enabled(PnvXive2
*xive
, PowerPCCPU
*cpu
)
431 int pir
= ppc_cpu_pir(cpu
);
432 uint32_t fc
= PNV10_PIR2FUSEDCORE(pir
);
433 uint64_t reg
= fc
< 8 ? TCTXT_EN0
: TCTXT_EN1
;
434 uint32_t bit
= pir
& 0x3f;
436 return xive
->tctxt_regs
[reg
>> 3] & PPC_BIT(bit
);
439 static int pnv_xive2_match_nvt(XivePresenter
*xptr
, uint8_t format
,
440 uint8_t nvt_blk
, uint32_t nvt_idx
,
441 bool cam_ignore
, uint8_t priority
,
442 uint32_t logic_serv
, XiveTCTXMatch
*match
)
444 PnvXive2
*xive
= PNV_XIVE2(xptr
);
445 PnvChip
*chip
= xive
->chip
;
449 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
451 for (i
= 0; i
< chip
->nr_cores
; i
++) {
452 PnvCore
*pc
= chip
->cores
[i
];
453 CPUCore
*cc
= CPU_CORE(pc
);
455 for (j
= 0; j
< cc
->nr_threads
; j
++) {
456 PowerPCCPU
*cpu
= pc
->threads
[j
];
460 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
464 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
467 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
471 ring
= xive2_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
477 * Save the context and follow on to catch duplicates,
478 * that we don't support yet.
482 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
483 "thread context NVT %x/%x\n",
498 static uint8_t pnv_xive2_get_block_id(Xive2Router
*xrtr
)
500 return pnv_xive2_block_id(PNV_XIVE2(xrtr
));
504 * The TIMA MMIO space is shared among the chips and to identify the
505 * chip from which the access is being done, we extract the chip id
508 static PnvXive2
*pnv_xive2_tm_get_xive(PowerPCCPU
*cpu
)
510 int pir
= ppc_cpu_pir(cpu
);
511 XivePresenter
*xptr
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
)->xptr
;
512 PnvXive2
*xive
= PNV_XIVE2(xptr
);
514 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
515 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
521 * The internal sources of the interrupt controller have no knowledge
522 * of the XIVE2 chip on which they reside. Encode the block id in the
523 * source interrupt number before forwarding the source event
524 * notification to the Router. This is required on a multichip system.
526 static void pnv_xive2_notify(XiveNotifier
*xn
, uint32_t srcno
, bool pq_checked
)
528 PnvXive2
*xive
= PNV_XIVE2(xn
);
529 uint8_t blk
= pnv_xive2_block_id(xive
);
531 xive2_router_notify(xn
, XIVE_EAS(blk
, srcno
), pq_checked
);
535 * Set Translation Tables
537 * TODO add support for multiple sets
539 static int pnv_xive2_stt_set_data(PnvXive2
*xive
, uint64_t val
)
541 uint8_t tsel
= GETFIELD(CQ_TAR_SELECT
, xive
->cq_regs
[CQ_TAR
>> 3]);
542 uint8_t entry
= GETFIELD(CQ_TAR_ENTRY_SELECT
,
543 xive
->cq_regs
[CQ_TAR
>> 3]);
549 xive
->tables
[tsel
][entry
] = val
;
552 xive2_error(xive
, "IC: unsupported table %d", tsel
);
556 if (xive
->cq_regs
[CQ_TAR
>> 3] & CQ_TAR_AUTOINC
) {
557 xive
->cq_regs
[CQ_TAR
>> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT
,
558 xive
->cq_regs
[CQ_TAR
>> 3], ++entry
);
564 * Virtual Structure Tables (VST) configuration
566 static void pnv_xive2_vst_set_exclusive(PnvXive2
*xive
, uint8_t type
,
567 uint8_t blk
, uint64_t vsd
)
569 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
570 XiveSource
*xsrc
= &xive
->ipi_source
;
571 const XiveVstInfo
*info
= &vst_infos
[type
];
572 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
573 uint64_t vst_tsize
= 1ull << page_shift
;
574 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
578 if (VSD_INDIRECT
& vsd
) {
579 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
580 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
586 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
587 xive2_error(xive
, "VST: %s table address 0x%"PRIx64
588 " is not aligned with page shift %d",
589 info
->name
, vst_addr
, page_shift
);
593 /* Record the table configuration (in SRAM on HW) */
594 xive
->vsds
[type
][blk
] = vsd
;
596 /* Now tune the models with the configuration provided by the FW */
601 * Backing store pages for the source PQ bits. The model does
602 * not use these PQ bits backed in RAM because the XiveSource
605 * If the table is direct, we can compute the number of PQ
606 * entries provisioned by FW (such as skiboot) and resize the
607 * ESB window accordingly.
609 if (!(VSD_INDIRECT
& vsd
)) {
610 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
611 * (1ull << xsrc
->esb_shift
));
614 memory_region_add_subregion(&xive
->esb_mmio
, 0, &xsrc
->esb_mmio
);
617 case VST_EAS
: /* Nothing to be done */
622 * Backing store pages for the END.
624 if (!(VSD_INDIRECT
& vsd
)) {
625 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
626 * (1ull << end_xsrc
->esb_shift
));
628 memory_region_add_subregion(&xive
->end_mmio
, 0, &end_xsrc
->esb_mmio
);
631 case VST_NVP
: /* Not modeled */
632 case VST_NVG
: /* Not modeled */
633 case VST_NVC
: /* Not modeled */
634 case VST_IC
: /* Not modeled */
635 case VST_SYNC
: /* Not modeled */
636 case VST_ERQ
: /* Not modeled */
640 g_assert_not_reached();
645 * Both PC and VC sub-engines are configured as each use the Virtual
648 static void pnv_xive2_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
650 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
651 uint8_t type
= GETFIELD(VC_VSD_TABLE_SELECT
,
652 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
653 uint8_t blk
= GETFIELD(VC_VSD_TABLE_ADDRESS
,
654 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
655 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
657 if (type
> VST_ERQ
) {
658 xive2_error(xive
, "VST: invalid table type %d", type
);
662 if (blk
>= vst_infos
[type
].max_blocks
) {
663 xive2_error(xive
, "VST: invalid block id %d for"
664 " %s table", blk
, vst_infos
[type
].name
);
669 xive2_error(xive
, "VST: invalid %s table address",
670 vst_infos
[type
].name
);
675 case VSD_MODE_FORWARD
:
676 xive
->vsds
[type
][blk
] = vsd
;
679 case VSD_MODE_EXCLUSIVE
:
680 pnv_xive2_vst_set_exclusive(xive
, type
, blk
, vsd
);
684 xive2_error(xive
, "VST: unsupported table mode %d", mode
);
697 * Page 0: Internal CQ register accesses (reads & writes)
698 * Page 1: Internal PC register accesses (reads & writes)
699 * Page 2: Internal VC register accesses (reads & writes)
700 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
701 * Page 4: Notify Port page (writes only, w/data),
703 * Page 6: Sync Poll page (writes only, dataless)
704 * Page 7: Sync Inject page (writes only, dataless)
705 * Page 8: LSI Trigger page (writes only, dataless)
706 * Page 9: LSI SB Management page (reads & writes dataless)
707 * Pages 10-255: Reserved
708 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
709 * covering the 128 threads in P10.
710 * Pages 384-511: Reserved
712 typedef struct PnvXive2Region
{
716 const MemoryRegionOps
*ops
;
719 static const MemoryRegionOps pnv_xive2_ic_cq_ops
;
720 static const MemoryRegionOps pnv_xive2_ic_pc_ops
;
721 static const MemoryRegionOps pnv_xive2_ic_vc_ops
;
722 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
;
723 static const MemoryRegionOps pnv_xive2_ic_notify_ops
;
724 static const MemoryRegionOps pnv_xive2_ic_sync_ops
;
725 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
;
726 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
;
728 /* 512 pages. 4K: 2M range, 64K: 32M range */
729 static const PnvXive2Region pnv_xive2_ic_regions
[] = {
730 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops
},
731 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops
},
732 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops
},
733 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops
},
734 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops
},
735 /* page 5 reserved */
736 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops
},
737 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops
},
738 /* pages 10-255 reserved */
739 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops
},
740 /* pages 384-511 reserved */
747 static uint64_t pnv_xive2_ic_cq_read(void *opaque
, hwaddr offset
,
750 PnvXive2
*xive
= PNV_XIVE2(opaque
);
751 uint32_t reg
= offset
>> 3;
755 case CQ_XIVE_CAP
: /* Set at reset */
757 val
= xive
->cq_regs
[reg
];
759 case CQ_MSGSND
: /* TODO check the #cores of the machine */
760 val
= 0xffffffff00000000;
763 val
= CQ_CFG_PB_GEN_PB_INIT
; /* TODO: fix CQ_CFG_PB_GEN default value */
766 xive2_error(xive
, "CQ: invalid read @%"HWADDR_PRIx
, offset
);
772 static uint64_t pnv_xive2_bar_size(uint64_t val
)
774 return 1ull << (GETFIELD(CQ_BAR_RANGE
, val
) + 24);
777 static void pnv_xive2_ic_cq_write(void *opaque
, hwaddr offset
,
778 uint64_t val
, unsigned size
)
780 PnvXive2
*xive
= PNV_XIVE2(opaque
);
781 MemoryRegion
*sysmem
= get_system_memory();
782 uint32_t reg
= offset
>> 3;
787 case CQ_RST_CTL
: /* TODO: reset all BARs */
791 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
792 if (!(val
& CQ_IC_BAR_VALID
)) {
794 if (xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
) {
795 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
796 memory_region_del_subregion(&xive
->ic_mmio
,
799 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
802 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
803 if (!(xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
)) {
804 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
805 memory_region_add_subregion(&xive
->ic_mmio
,
806 pnv_xive2_ic_regions
[i
].pgoff
<< xive
->ic_shift
,
809 memory_region_add_subregion(sysmem
, xive
->ic_base
,
816 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
817 if (!(val
& CQ_TM_BAR_VALID
)) {
819 if (xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
) {
820 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
823 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
824 if (!(xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
)) {
825 memory_region_add_subregion(sysmem
, xive
->tm_base
,
832 xive
->esb_shift
= val
& CQ_BAR_64K
? 16 : 12;
833 if (!(val
& CQ_BAR_VALID
)) {
835 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
836 memory_region_del_subregion(sysmem
, &xive
->esb_mmio
);
839 xive
->esb_base
= val
& CQ_BAR_ADDR
;
840 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
841 memory_region_set_size(&xive
->esb_mmio
,
842 pnv_xive2_bar_size(val
));
843 memory_region_add_subregion(sysmem
, xive
->esb_base
,
850 xive
->end_shift
= val
& CQ_BAR_64K
? 16 : 12;
851 if (!(val
& CQ_BAR_VALID
)) {
853 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
854 memory_region_del_subregion(sysmem
, &xive
->end_mmio
);
857 xive
->end_base
= val
& CQ_BAR_ADDR
;
858 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
859 memory_region_set_size(&xive
->end_mmio
,
860 pnv_xive2_bar_size(val
));
861 memory_region_add_subregion(sysmem
, xive
->end_base
,
868 xive
->nvc_shift
= val
& CQ_BAR_64K
? 16 : 12;
869 if (!(val
& CQ_BAR_VALID
)) {
871 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
872 memory_region_del_subregion(sysmem
, &xive
->nvc_mmio
);
875 xive
->nvc_base
= val
& CQ_BAR_ADDR
;
876 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
877 memory_region_set_size(&xive
->nvc_mmio
,
878 pnv_xive2_bar_size(val
));
879 memory_region_add_subregion(sysmem
, xive
->nvc_base
,
886 xive
->nvpg_shift
= val
& CQ_BAR_64K
? 16 : 12;
887 if (!(val
& CQ_BAR_VALID
)) {
889 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
890 memory_region_del_subregion(sysmem
, &xive
->nvpg_mmio
);
893 xive
->nvpg_base
= val
& CQ_BAR_ADDR
;
894 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
895 memory_region_set_size(&xive
->nvpg_mmio
,
896 pnv_xive2_bar_size(val
));
897 memory_region_add_subregion(sysmem
, xive
->nvpg_base
,
903 case CQ_TAR
: /* Set Translation Table Address */
905 case CQ_TDR
: /* Set Translation Table Data */
906 pnv_xive2_stt_set_data(xive
, val
);
908 case CQ_FIRMASK_OR
: /* FIR error reporting */
911 xive2_error(xive
, "CQ: invalid write 0x%"HWADDR_PRIx
, offset
);
915 xive
->cq_regs
[reg
] = val
;
918 static const MemoryRegionOps pnv_xive2_ic_cq_ops
= {
919 .read
= pnv_xive2_ic_cq_read
,
920 .write
= pnv_xive2_ic_cq_write
,
921 .endianness
= DEVICE_BIG_ENDIAN
,
923 .min_access_size
= 8,
924 .max_access_size
= 8,
927 .min_access_size
= 8,
928 .max_access_size
= 8,
932 static uint64_t pnv_xive2_ic_vc_read(void *opaque
, hwaddr offset
,
935 PnvXive2
*xive
= PNV_XIVE2(opaque
);
937 uint32_t reg
= offset
>> 3;
941 * VSD table settings.
943 case VC_VSD_TABLE_ADDR
:
944 case VC_VSD_TABLE_DATA
:
945 val
= xive
->vc_regs
[reg
];
949 * ESB cache updates (not modeled)
951 case VC_ESBC_FLUSH_CTRL
:
952 xive
->vc_regs
[reg
] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID
;
953 val
= xive
->vc_regs
[reg
];
957 * EAS cache updates (not modeled)
959 case VC_EASC_FLUSH_CTRL
:
960 xive
->vc_regs
[reg
] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID
;
961 val
= xive
->vc_regs
[reg
];
967 case VC_ENDC_WATCH0_SPEC
:
968 xive
->vc_regs
[reg
] &= ~(VC_ENDC_WATCH_FULL
| VC_ENDC_WATCH_CONFLICT
);
969 val
= xive
->vc_regs
[reg
];
972 case VC_ENDC_WATCH0_DATA0
:
974 * Load DATA registers from cache with data requested by the
977 pnv_xive2_end_cache_load(xive
);
978 val
= xive
->vc_regs
[reg
];
981 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
982 val
= xive
->vc_regs
[reg
];
985 case VC_ENDC_FLUSH_CTRL
:
986 xive
->vc_regs
[reg
] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID
;
987 val
= xive
->vc_regs
[reg
];
991 * Indirect invalidation
993 case VC_AT_MACRO_KILL_MASK
:
994 val
= xive
->vc_regs
[reg
];
997 case VC_AT_MACRO_KILL
:
998 xive
->vc_regs
[reg
] &= ~VC_AT_MACRO_KILL_VALID
;
999 val
= xive
->vc_regs
[reg
];
1003 * Interrupt fifo overflow in memory backing store (Not modeled)
1005 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1006 val
= xive
->vc_regs
[reg
];
1012 case VC_ENDC_SYNC_DONE
:
1013 val
= VC_ENDC_SYNC_POLL_DONE
;
1016 xive2_error(xive
, "VC: invalid read @%"HWADDR_PRIx
, offset
);
1022 static void pnv_xive2_ic_vc_write(void *opaque
, hwaddr offset
,
1023 uint64_t val
, unsigned size
)
1025 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1026 uint32_t reg
= offset
>> 3;
1030 * VSD table settings.
1032 case VC_VSD_TABLE_ADDR
:
1034 case VC_VSD_TABLE_DATA
:
1035 pnv_xive2_vst_set_data(xive
, val
);
1039 * ESB cache updates (not modeled)
1041 /* case VC_ESBC_FLUSH_CTRL: */
1042 case VC_ESBC_FLUSH_POLL
:
1043 xive
->vc_regs
[VC_ESBC_FLUSH_CTRL
>> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1048 * EAS cache updates (not modeled)
1050 /* case VC_EASC_FLUSH_CTRL: */
1051 case VC_EASC_FLUSH_POLL
:
1052 xive
->vc_regs
[VC_EASC_FLUSH_CTRL
>> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID
;
1059 case VC_ENDC_WATCH0_SPEC
:
1060 val
&= ~VC_ENDC_WATCH_CONFLICT
; /* HW will set this bit */
1063 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1065 case VC_ENDC_WATCH0_DATA0
:
1066 /* writing to DATA0 triggers the cache write */
1067 xive
->vc_regs
[reg
] = val
;
1068 pnv_xive2_end_update(xive
);
1072 /* case VC_ENDC_FLUSH_CTRL: */
1073 case VC_ENDC_FLUSH_POLL
:
1074 xive
->vc_regs
[VC_ENDC_FLUSH_CTRL
>> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1078 * Indirect invalidation
1080 case VC_AT_MACRO_KILL
:
1081 case VC_AT_MACRO_KILL_MASK
:
1085 * Interrupt fifo overflow in memory backing store (Not modeled)
1087 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1093 case VC_ENDC_SYNC_DONE
:
1097 xive2_error(xive
, "VC: invalid write @%"HWADDR_PRIx
, offset
);
1101 xive
->vc_regs
[reg
] = val
;
1104 static const MemoryRegionOps pnv_xive2_ic_vc_ops
= {
1105 .read
= pnv_xive2_ic_vc_read
,
1106 .write
= pnv_xive2_ic_vc_write
,
1107 .endianness
= DEVICE_BIG_ENDIAN
,
1109 .min_access_size
= 8,
1110 .max_access_size
= 8,
1113 .min_access_size
= 8,
1114 .max_access_size
= 8,
1118 static uint64_t pnv_xive2_ic_pc_read(void *opaque
, hwaddr offset
,
1121 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1123 uint32_t reg
= offset
>> 3;
1127 * VSD table settings.
1129 case PC_VSD_TABLE_ADDR
:
1130 case PC_VSD_TABLE_DATA
:
1131 val
= xive
->pc_regs
[reg
];
1137 case PC_NXC_WATCH0_SPEC
:
1138 xive
->pc_regs
[reg
] &= ~(PC_NXC_WATCH_FULL
| PC_NXC_WATCH_CONFLICT
);
1139 val
= xive
->pc_regs
[reg
];
1142 case PC_NXC_WATCH0_DATA0
:
1144 * Load DATA registers from cache with data requested by the
1147 pnv_xive2_nvp_cache_load(xive
);
1148 val
= xive
->pc_regs
[reg
];
1151 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1152 val
= xive
->pc_regs
[reg
];
1155 case PC_NXC_FLUSH_CTRL
:
1156 xive
->pc_regs
[reg
] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID
;
1157 val
= xive
->pc_regs
[reg
];
1161 * Indirect invalidation
1164 xive
->pc_regs
[reg
] &= ~PC_AT_KILL_VALID
;
1165 val
= xive
->pc_regs
[reg
];
1169 xive2_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, offset
);
1175 static void pnv_xive2_ic_pc_write(void *opaque
, hwaddr offset
,
1176 uint64_t val
, unsigned size
)
1178 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1179 uint32_t reg
= offset
>> 3;
1184 * VSD table settings. Only taken into account in the VC
1185 * sub-engine because the Xive2Router model combines both VC and PC
1188 case PC_VSD_TABLE_ADDR
:
1189 case PC_VSD_TABLE_DATA
:
1195 case PC_NXC_WATCH0_SPEC
:
1196 val
&= ~PC_NXC_WATCH_CONFLICT
; /* HW will set this bit */
1199 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1201 case PC_NXC_WATCH0_DATA0
:
1202 /* writing to DATA0 triggers the cache write */
1203 xive
->pc_regs
[reg
] = val
;
1204 pnv_xive2_nvp_update(xive
);
1207 /* case PC_NXC_FLUSH_CTRL: */
1208 case PC_NXC_FLUSH_POLL
:
1209 xive
->pc_regs
[PC_NXC_FLUSH_CTRL
>> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID
;
1213 * Indirect invalidation
1216 case PC_AT_KILL_MASK
:
1220 xive2_error(xive
, "PC: invalid write @%"HWADDR_PRIx
, offset
);
1224 xive
->pc_regs
[reg
] = val
;
1227 static const MemoryRegionOps pnv_xive2_ic_pc_ops
= {
1228 .read
= pnv_xive2_ic_pc_read
,
1229 .write
= pnv_xive2_ic_pc_write
,
1230 .endianness
= DEVICE_BIG_ENDIAN
,
1232 .min_access_size
= 8,
1233 .max_access_size
= 8,
1236 .min_access_size
= 8,
1237 .max_access_size
= 8,
1242 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque
, hwaddr offset
,
1245 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1247 uint32_t reg
= offset
>> 3;
1251 * XIVE2 hardware thread enablement
1255 val
= xive
->tctxt_regs
[reg
];
1259 case TCTXT_EN0_RESET
:
1260 val
= xive
->tctxt_regs
[TCTXT_EN0
>> 3];
1263 case TCTXT_EN1_RESET
:
1264 val
= xive
->tctxt_regs
[TCTXT_EN1
>> 3];
1267 xive2_error(xive
, "TCTXT: invalid read @%"HWADDR_PRIx
, offset
);
1273 static void pnv_xive2_ic_tctxt_write(void *opaque
, hwaddr offset
,
1274 uint64_t val
, unsigned size
)
1276 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1277 uint32_t reg
= offset
>> 3;
1281 * XIVE2 hardware thread enablement
1283 case TCTXT_EN0
: /* Physical Thread Enable */
1284 case TCTXT_EN1
: /* Physical Thread Enable (fused core) */
1288 xive
->tctxt_regs
[TCTXT_EN0
>> 3] |= val
;
1291 xive
->tctxt_regs
[TCTXT_EN1
>> 3] |= val
;
1293 case TCTXT_EN0_RESET
:
1294 xive
->tctxt_regs
[TCTXT_EN0
>> 3] &= ~val
;
1296 case TCTXT_EN1_RESET
:
1297 xive
->tctxt_regs
[TCTXT_EN1
>> 3] &= ~val
;
1301 xive2_error(xive
, "TCTXT: invalid write @%"HWADDR_PRIx
, offset
);
1305 xive
->pc_regs
[reg
] = val
;
1308 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
= {
1309 .read
= pnv_xive2_ic_tctxt_read
,
1310 .write
= pnv_xive2_ic_tctxt_write
,
1311 .endianness
= DEVICE_BIG_ENDIAN
,
1313 .min_access_size
= 8,
1314 .max_access_size
= 8,
1317 .min_access_size
= 8,
1318 .max_access_size
= 8,
1323 * Redirect XSCOM to MMIO handlers
1325 static uint64_t pnv_xive2_xscom_read(void *opaque
, hwaddr offset
,
1328 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1330 uint32_t xscom_reg
= offset
>> 3;
1331 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1333 switch (xscom_reg
) {
1334 case 0x000 ... 0x0FF:
1335 val
= pnv_xive2_ic_cq_read(opaque
, mmio_offset
, size
);
1337 case 0x100 ... 0x1FF:
1338 val
= pnv_xive2_ic_vc_read(opaque
, mmio_offset
, size
);
1340 case 0x200 ... 0x2FF:
1341 val
= pnv_xive2_ic_pc_read(opaque
, mmio_offset
, size
);
1343 case 0x300 ... 0x3FF:
1344 val
= pnv_xive2_ic_tctxt_read(opaque
, mmio_offset
, size
);
1347 xive2_error(xive
, "XSCOM: invalid read @%"HWADDR_PRIx
, offset
);
1353 static void pnv_xive2_xscom_write(void *opaque
, hwaddr offset
,
1354 uint64_t val
, unsigned size
)
1356 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1357 uint32_t xscom_reg
= offset
>> 3;
1358 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1360 switch (xscom_reg
) {
1361 case 0x000 ... 0x0FF:
1362 pnv_xive2_ic_cq_write(opaque
, mmio_offset
, val
, size
);
1364 case 0x100 ... 0x1FF:
1365 pnv_xive2_ic_vc_write(opaque
, mmio_offset
, val
, size
);
1367 case 0x200 ... 0x2FF:
1368 pnv_xive2_ic_pc_write(opaque
, mmio_offset
, val
, size
);
1370 case 0x300 ... 0x3FF:
1371 pnv_xive2_ic_tctxt_write(opaque
, mmio_offset
, val
, size
);
1374 xive2_error(xive
, "XSCOM: invalid write @%"HWADDR_PRIx
, offset
);
1378 static const MemoryRegionOps pnv_xive2_xscom_ops
= {
1379 .read
= pnv_xive2_xscom_read
,
1380 .write
= pnv_xive2_xscom_write
,
1381 .endianness
= DEVICE_BIG_ENDIAN
,
1383 .min_access_size
= 8,
1384 .max_access_size
= 8,
1387 .min_access_size
= 8,
1388 .max_access_size
= 8,
1393 * Notify port page. The layout is compatible between 4K and 64K pages :
1395 * Page 1 Notify page (writes only)
1396 * 0x000 - 0x7FF IPI interrupt (NPU)
1397 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1400 static void pnv_xive2_ic_hw_trigger(PnvXive2
*xive
, hwaddr addr
,
1406 if (val
& XIVE_TRIGGER_END
) {
1407 xive2_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1413 * Forward the source event notification directly to the Router.
1414 * The source interrupt number should already be correctly encoded
1415 * with the chip block id by the sending device (PHB, PSI).
1417 blk
= XIVE_EAS_BLOCK(val
);
1418 idx
= XIVE_EAS_INDEX(val
);
1420 xive2_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
),
1421 !!(val
& XIVE_TRIGGER_PQ
));
1424 static void pnv_xive2_ic_notify_write(void *opaque
, hwaddr offset
,
1425 uint64_t val
, unsigned size
)
1427 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1429 /* VC: IPI triggers */
1431 case 0x000 ... 0x7FF:
1432 /* TODO: check IPI notify sub-page routing */
1433 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1436 /* VC: HW triggers */
1437 case 0x800 ... 0xFFF:
1438 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1442 xive2_error(xive
, "NOTIFY: invalid write @%"HWADDR_PRIx
, offset
);
1446 static uint64_t pnv_xive2_ic_notify_read(void *opaque
, hwaddr offset
,
1449 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1451 /* loads are invalid */
1452 xive2_error(xive
, "NOTIFY: invalid read @%"HWADDR_PRIx
, offset
);
1456 static const MemoryRegionOps pnv_xive2_ic_notify_ops
= {
1457 .read
= pnv_xive2_ic_notify_read
,
1458 .write
= pnv_xive2_ic_notify_write
,
1459 .endianness
= DEVICE_BIG_ENDIAN
,
1461 .min_access_size
= 8,
1462 .max_access_size
= 8,
1465 .min_access_size
= 8,
1466 .max_access_size
= 8,
1470 static uint64_t pnv_xive2_ic_lsi_read(void *opaque
, hwaddr offset
,
1473 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1475 xive2_error(xive
, "LSI: invalid read @%"HWADDR_PRIx
, offset
);
1479 static void pnv_xive2_ic_lsi_write(void *opaque
, hwaddr offset
,
1480 uint64_t val
, unsigned size
)
1482 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1484 xive2_error(xive
, "LSI: invalid write @%"HWADDR_PRIx
, offset
);
1487 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
= {
1488 .read
= pnv_xive2_ic_lsi_read
,
1489 .write
= pnv_xive2_ic_lsi_write
,
1490 .endianness
= DEVICE_BIG_ENDIAN
,
1492 .min_access_size
= 8,
1493 .max_access_size
= 8,
1496 .min_access_size
= 8,
1497 .max_access_size
= 8,
1502 * Sync MMIO page (write only)
1504 #define PNV_XIVE2_SYNC_IPI 0x000
1505 #define PNV_XIVE2_SYNC_HW 0x080
1506 #define PNV_XIVE2_SYNC_NxC 0x100
1507 #define PNV_XIVE2_SYNC_INT 0x180
1508 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1509 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1510 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1512 static uint64_t pnv_xive2_ic_sync_read(void *opaque
, hwaddr offset
,
1515 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1517 /* loads are invalid */
1518 xive2_error(xive
, "SYNC: invalid read @%"HWADDR_PRIx
, offset
);
1522 static void pnv_xive2_ic_sync_write(void *opaque
, hwaddr offset
,
1523 uint64_t val
, unsigned size
)
1525 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1528 case PNV_XIVE2_SYNC_IPI
:
1529 case PNV_XIVE2_SYNC_HW
:
1530 case PNV_XIVE2_SYNC_NxC
:
1531 case PNV_XIVE2_SYNC_INT
:
1532 case PNV_XIVE2_SYNC_OS_ESC
:
1533 case PNV_XIVE2_SYNC_POOL_ESC
:
1534 case PNV_XIVE2_SYNC_HARD_ESC
:
1537 xive2_error(xive
, "SYNC: invalid write @%"HWADDR_PRIx
, offset
);
1541 static const MemoryRegionOps pnv_xive2_ic_sync_ops
= {
1542 .read
= pnv_xive2_ic_sync_read
,
1543 .write
= pnv_xive2_ic_sync_write
,
1544 .endianness
= DEVICE_BIG_ENDIAN
,
1546 .min_access_size
= 8,
1547 .max_access_size
= 8,
1550 .min_access_size
= 8,
1551 .max_access_size
= 8,
1556 * When the TM direct pages of the IC controller are accessed, the
1557 * target HW thread is deduced from the page offset.
1559 static XiveTCTX
*pnv_xive2_get_indirect_tctx(PnvXive2
*xive
, uint32_t pir
)
1561 PnvChip
*chip
= xive
->chip
;
1562 PowerPCCPU
*cpu
= NULL
;
1564 cpu
= pnv_chip_find_cpu(chip
, pir
);
1566 xive2_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1570 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
1571 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
1574 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1577 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque
, hwaddr offset
,
1580 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1581 uint32_t pir
= offset
>> xive
->ic_shift
;
1582 XiveTCTX
*tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1586 val
= xive_tctx_tm_read(NULL
, tctx
, offset
, size
);
1592 static void pnv_xive2_ic_tm_indirect_write(void *opaque
, hwaddr offset
,
1593 uint64_t val
, unsigned size
)
1595 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1596 uint32_t pir
= offset
>> xive
->ic_shift
;
1597 XiveTCTX
*tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1600 xive_tctx_tm_write(NULL
, tctx
, offset
, val
, size
);
1604 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
= {
1605 .read
= pnv_xive2_ic_tm_indirect_read
,
1606 .write
= pnv_xive2_ic_tm_indirect_write
,
1607 .endianness
= DEVICE_BIG_ENDIAN
,
1609 .min_access_size
= 8,
1610 .max_access_size
= 8,
1613 .min_access_size
= 8,
1614 .max_access_size
= 8,
1623 * Special TIMA offsets to handle accesses in a POWER10 way.
1625 * Only the CAM line updates done by the hypervisor should be handled
1628 #define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT)
1629 #define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2))
1630 #define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX)
1632 static void pnv_xive2_tm_write(void *opaque
, hwaddr offset
,
1633 uint64_t value
, unsigned size
)
1635 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1636 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1637 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1638 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1640 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
1642 /* TODO: should we switch the TM ops table instead ? */
1643 if (!gen1_tima_os
&& offset
== HV_PUSH_OS_CTX_OFFSET
) {
1644 xive2_tm_push_os_ctx(xptr
, tctx
, offset
, value
, size
);
1648 /* Other TM ops are the same as XIVE1 */
1649 xive_tctx_tm_write(xptr
, tctx
, offset
, value
, size
);
1652 static uint64_t pnv_xive2_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
1654 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1655 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1656 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1657 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1659 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
1661 /* TODO: should we switch the TM ops table instead ? */
1662 if (!gen1_tima_os
&& offset
== HV_PULL_OS_CTX_OFFSET
) {
1663 return xive2_tm_pull_os_ctx(xptr
, tctx
, offset
, size
);
1666 /* Other TM ops are the same as XIVE1 */
1667 return xive_tctx_tm_read(xptr
, tctx
, offset
, size
);
1670 static const MemoryRegionOps pnv_xive2_tm_ops
= {
1671 .read
= pnv_xive2_tm_read
,
1672 .write
= pnv_xive2_tm_write
,
1673 .endianness
= DEVICE_BIG_ENDIAN
,
1675 .min_access_size
= 1,
1676 .max_access_size
= 8,
1679 .min_access_size
= 1,
1680 .max_access_size
= 8,
1684 static uint64_t pnv_xive2_nvc_read(void *opaque
, hwaddr offset
,
1687 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1689 xive2_error(xive
, "NVC: invalid read @%"HWADDR_PRIx
, offset
);
1693 static void pnv_xive2_nvc_write(void *opaque
, hwaddr offset
,
1694 uint64_t val
, unsigned size
)
1696 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1698 xive2_error(xive
, "NVC: invalid write @%"HWADDR_PRIx
, offset
);
1701 static const MemoryRegionOps pnv_xive2_nvc_ops
= {
1702 .read
= pnv_xive2_nvc_read
,
1703 .write
= pnv_xive2_nvc_write
,
1704 .endianness
= DEVICE_BIG_ENDIAN
,
1706 .min_access_size
= 8,
1707 .max_access_size
= 8,
1710 .min_access_size
= 8,
1711 .max_access_size
= 8,
1715 static uint64_t pnv_xive2_nvpg_read(void *opaque
, hwaddr offset
,
1718 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1720 xive2_error(xive
, "NVPG: invalid read @%"HWADDR_PRIx
, offset
);
1724 static void pnv_xive2_nvpg_write(void *opaque
, hwaddr offset
,
1725 uint64_t val
, unsigned size
)
1727 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1729 xive2_error(xive
, "NVPG: invalid write @%"HWADDR_PRIx
, offset
);
1732 static const MemoryRegionOps pnv_xive2_nvpg_ops
= {
1733 .read
= pnv_xive2_nvpg_read
,
1734 .write
= pnv_xive2_nvpg_write
,
1735 .endianness
= DEVICE_BIG_ENDIAN
,
1737 .min_access_size
= 8,
1738 .max_access_size
= 8,
1741 .min_access_size
= 8,
1742 .max_access_size
= 8,
1747 * POWER10 default capabilities: 0x2000120076f000FC
1749 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
1752 * POWER10 default configuration: 0x0030000033000000
1754 * 8bits thread id was dropped for P10
1756 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1758 static void pnv_xive2_reset(void *dev
)
1760 PnvXive2
*xive
= PNV_XIVE2(dev
);
1761 XiveSource
*xsrc
= &xive
->ipi_source
;
1762 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1764 xive
->cq_regs
[CQ_XIVE_CAP
>> 3] = xive
->capabilities
;
1765 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] = xive
->config
;
1767 /* HW hardwires the #Topology of the chip in the block field */
1768 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] |=
1769 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, 0ull, xive
->chip
->chip_id
);
1771 /* Set default page size to 64k */
1772 xive
->ic_shift
= xive
->esb_shift
= xive
->end_shift
= 16;
1773 xive
->nvc_shift
= xive
->nvpg_shift
= xive
->tm_shift
= 16;
1775 /* Clear source MMIOs */
1776 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1777 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
1780 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1781 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
1786 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1789 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1790 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1792 static void pnv_xive2_realize(DeviceState
*dev
, Error
**errp
)
1794 PnvXive2
*xive
= PNV_XIVE2(dev
);
1795 PnvXive2Class
*pxc
= PNV_XIVE2_GET_CLASS(dev
);
1796 XiveSource
*xsrc
= &xive
->ipi_source
;
1797 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1798 Error
*local_err
= NULL
;
1801 pxc
->parent_realize(dev
, &local_err
);
1803 error_propagate(errp
, local_err
);
1810 * The XiveSource and Xive2EndSource objects are realized with the
1811 * maximum allowed HW configuration. The ESB MMIO regions will be
1812 * resized dynamically when the controller is configured by the FW
1813 * to limit accesses to resources not provisioned.
1815 object_property_set_int(OBJECT(xsrc
), "flags", XIVE_SRC_STORE_EOI
,
1817 object_property_set_int(OBJECT(xsrc
), "nr-irqs", PNV_XIVE2_NR_IRQS
,
1819 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
1821 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
1823 error_propagate(errp
, local_err
);
1827 object_property_set_int(OBJECT(end_xsrc
), "nr-ends", PNV_XIVE2_NR_ENDS
,
1829 object_property_set_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
1831 qdev_realize(DEVICE(end_xsrc
), NULL
, &local_err
);
1833 error_propagate(errp
, local_err
);
1837 /* XSCOM region, used for initial configuration of the BARs */
1838 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
),
1839 &pnv_xive2_xscom_ops
, xive
, "xscom-xive",
1840 PNV10_XSCOM_XIVE2_SIZE
<< 3);
1842 /* Interrupt controller MMIO regions */
1843 xive
->ic_shift
= 16;
1844 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1845 PNV10_XIVE2_IC_SIZE
);
1847 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
1848 memory_region_init_io(&xive
->ic_mmios
[i
], OBJECT(dev
),
1849 pnv_xive2_ic_regions
[i
].ops
, xive
,
1850 pnv_xive2_ic_regions
[i
].name
,
1851 pnv_xive2_ic_regions
[i
].pgsize
<< xive
->ic_shift
);
1857 xive
->esb_shift
= 16;
1858 xive
->end_shift
= 16;
1859 memory_region_init(&xive
->esb_mmio
, OBJECT(xive
), "xive-esb",
1860 PNV10_XIVE2_ESB_SIZE
);
1861 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-end",
1862 PNV10_XIVE2_END_SIZE
);
1864 /* Presenter Controller MMIO region (not modeled) */
1865 xive
->nvc_shift
= 16;
1866 xive
->nvpg_shift
= 16;
1867 memory_region_init_io(&xive
->nvc_mmio
, OBJECT(dev
),
1868 &pnv_xive2_nvc_ops
, xive
,
1869 "xive-nvc", PNV10_XIVE2_NVC_SIZE
);
1871 memory_region_init_io(&xive
->nvpg_mmio
, OBJECT(dev
),
1872 &pnv_xive2_nvpg_ops
, xive
,
1873 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE
);
1875 /* Thread Interrupt Management Area (Direct) */
1876 xive
->tm_shift
= 16;
1877 memory_region_init_io(&xive
->tm_mmio
, OBJECT(dev
), &pnv_xive2_tm_ops
,
1878 xive
, "xive-tima", PNV10_XIVE2_TM_SIZE
);
1880 qemu_register_reset(pnv_xive2_reset
, dev
);
1883 static Property pnv_xive2_properties
[] = {
1884 DEFINE_PROP_UINT64("ic-bar", PnvXive2
, ic_base
, 0),
1885 DEFINE_PROP_UINT64("esb-bar", PnvXive2
, esb_base
, 0),
1886 DEFINE_PROP_UINT64("end-bar", PnvXive2
, end_base
, 0),
1887 DEFINE_PROP_UINT64("nvc-bar", PnvXive2
, nvc_base
, 0),
1888 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2
, nvpg_base
, 0),
1889 DEFINE_PROP_UINT64("tm-bar", PnvXive2
, tm_base
, 0),
1890 DEFINE_PROP_UINT64("capabilities", PnvXive2
, capabilities
,
1891 PNV_XIVE2_CAPABILITIES
),
1892 DEFINE_PROP_UINT64("config", PnvXive2
, config
,
1893 PNV_XIVE2_CONFIGURATION
),
1894 DEFINE_PROP_LINK("chip", PnvXive2
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1895 DEFINE_PROP_END_OF_LIST(),
1898 static void pnv_xive2_instance_init(Object
*obj
)
1900 PnvXive2
*xive
= PNV_XIVE2(obj
);
1902 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1904 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1905 TYPE_XIVE2_END_SOURCE
);
1908 static int pnv_xive2_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1911 const char compat_p10
[] = "ibm,power10-xive-x";
1915 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE
),
1916 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE
)
1919 name
= g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE
);
1920 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1924 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1925 _FDT(fdt_setprop(fdt
, offset
, "compatible", compat_p10
,
1926 sizeof(compat_p10
)));
1930 static void pnv_xive2_class_init(ObjectClass
*klass
, void *data
)
1932 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1933 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1934 Xive2RouterClass
*xrc
= XIVE2_ROUTER_CLASS(klass
);
1935 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1936 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1937 PnvXive2Class
*pxc
= PNV_XIVE2_CLASS(klass
);
1939 xdc
->dt_xscom
= pnv_xive2_dt_xscom
;
1941 dc
->desc
= "PowerNV XIVE2 Interrupt Controller (POWER10)";
1942 device_class_set_parent_realize(dc
, pnv_xive2_realize
,
1943 &pxc
->parent_realize
);
1944 device_class_set_props(dc
, pnv_xive2_properties
);
1946 xrc
->get_eas
= pnv_xive2_get_eas
;
1947 xrc
->get_pq
= pnv_xive2_get_pq
;
1948 xrc
->set_pq
= pnv_xive2_set_pq
;
1949 xrc
->get_end
= pnv_xive2_get_end
;
1950 xrc
->write_end
= pnv_xive2_write_end
;
1951 xrc
->get_nvp
= pnv_xive2_get_nvp
;
1952 xrc
->write_nvp
= pnv_xive2_write_nvp
;
1953 xrc
->get_block_id
= pnv_xive2_get_block_id
;
1955 xnc
->notify
= pnv_xive2_notify
;
1957 xpc
->match_nvt
= pnv_xive2_match_nvt
;
1960 static const TypeInfo pnv_xive2_info
= {
1961 .name
= TYPE_PNV_XIVE2
,
1962 .parent
= TYPE_XIVE2_ROUTER
,
1963 .instance_init
= pnv_xive2_instance_init
,
1964 .instance_size
= sizeof(PnvXive2
),
1965 .class_init
= pnv_xive2_class_init
,
1966 .class_size
= sizeof(PnvXive2Class
),
1967 .interfaces
= (InterfaceInfo
[]) {
1968 { TYPE_PNV_XSCOM_INTERFACE
},
1973 static void pnv_xive2_register_types(void)
1975 type_register_static(&pnv_xive2_info
);
1978 type_init(pnv_xive2_register_types
)
1980 static void xive2_nvp_pic_print_info(Xive2Nvp
*nvp
, uint32_t nvp_idx
,
1983 uint8_t eq_blk
= xive_get_field32(NVP2_W5_VP_END_BLOCK
, nvp
->w5
);
1984 uint32_t eq_idx
= xive_get_field32(NVP2_W5_VP_END_INDEX
, nvp
->w5
);
1986 if (!xive2_nvp_is_valid(nvp
)) {
1990 monitor_printf(mon
, " %08x end:%02x/%04x IPB:%02x\n",
1991 nvp_idx
, eq_blk
, eq_idx
,
1992 xive_get_field32(NVP2_W2_IPB
, nvp
->w2
));
1996 * If the table is direct, we can compute the number of PQ entries
1997 * provisioned by FW.
1999 static uint32_t pnv_xive2_nr_esbs(PnvXive2
*xive
)
2001 uint8_t blk
= pnv_xive2_block_id(xive
);
2002 uint64_t vsd
= xive
->vsds
[VST_ESB
][blk
];
2003 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
2005 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
2009 * Compute the number of entries per indirect subpage.
2011 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2
*xive
, uint32_t type
)
2013 uint8_t blk
= pnv_xive2_block_id(xive
);
2014 uint64_t vsd
= xive
->vsds
[type
][blk
];
2015 const XiveVstInfo
*info
= &vst_infos
[type
];
2017 uint32_t page_shift
;
2019 /* For direct tables, fake a valid value */
2020 if (!(VSD_INDIRECT
& vsd
)) {
2024 /* Get the page size of the indirect table. */
2025 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
2026 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
2028 if (!(vsd
& VSD_ADDRESS_MASK
)) {
2030 xive2_error(xive
, "VST: invalid %s entry!?", info
->name
);
2035 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
2037 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
2038 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
2043 return (1ull << page_shift
) / info
->size
;
2046 void pnv_xive2_pic_print_info(PnvXive2
*xive
, Monitor
*mon
)
2048 Xive2Router
*xrtr
= XIVE2_ROUTER(xive
);
2049 uint8_t blk
= pnv_xive2_block_id(xive
);
2050 uint8_t chip_id
= xive
->chip
->chip_id
;
2051 uint32_t srcno0
= XIVE_EAS(blk
, 0);
2052 uint32_t nr_esbs
= pnv_xive2_nr_esbs(xive
);
2057 uint64_t xive_nvp_per_subpage
;
2059 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
2060 srcno0
+ nr_esbs
- 1);
2061 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
2063 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
2064 srcno0
+ nr_esbs
- 1);
2065 for (i
= 0; i
< nr_esbs
; i
++) {
2066 if (xive2_router_get_eas(xrtr
, blk
, i
, &eas
)) {
2069 if (!xive2_eas_is_masked(&eas
)) {
2070 xive2_eas_pic_print_info(&eas
, i
, mon
);
2074 monitor_printf(mon
, "XIVE[%x] #%d END Escalation EAT\n", chip_id
, blk
);
2076 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2077 xive2_end_eas_pic_print_info(&end
, i
++, mon
);
2080 monitor_printf(mon
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
2082 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2083 xive2_end_pic_print_info(&end
, i
++, mon
);
2086 monitor_printf(mon
, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id
, blk
,
2087 0, XIVE2_NVP_COUNT
- 1);
2088 xive_nvp_per_subpage
= pnv_xive2_vst_per_subpage(xive
, VST_NVP
);
2089 for (i
= 0; i
< XIVE2_NVP_COUNT
; i
+= xive_nvp_per_subpage
) {
2090 while (!xive2_router_get_nvp(xrtr
, blk
, i
, &nvp
)) {
2091 xive2_nvp_pic_print_info(&nvp
, i
++, mon
);