2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_chip.h"
20 #include "hw/ppc/pnv_core.h"
21 #include "hw/ppc/pnv_xscom.h"
22 #include "hw/ppc/xive2.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/ppc/xive2_regs.h"
26 #include "hw/ppc/ppc.h"
27 #include "hw/qdev-properties.h"
28 #include "sysemu/reset.h"
32 #include "pnv_xive2_regs.h"
37 * Virtual structures table (VST)
39 #define SBE_PER_BYTE 4
41 typedef struct XiveVstInfo
{
47 static const XiveVstInfo vst_infos
[] = {
49 [VST_EAS
] = { "EAT", sizeof(Xive2Eas
), 16 },
50 [VST_ESB
] = { "ESB", 1, 16 },
51 [VST_END
] = { "ENDT", sizeof(Xive2End
), 16 },
53 [VST_NVP
] = { "NVPT", sizeof(Xive2Nvp
), 16 },
54 [VST_NVG
] = { "NVGT", sizeof(Xive2Nvgc
), 16 },
55 [VST_NVC
] = { "NVCT", sizeof(Xive2Nvgc
), 16 },
57 [VST_IC
] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
58 [VST_SYNC
] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
61 * This table contains the backing store pages for the interrupt
62 * fifos of the VC sub-engine in case of overflow.
72 [VST_ERQ
] = { "ERQ", 1, VC_QUEUE_COUNT
},
75 #define xive2_error(xive, fmt, ...) \
76 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
77 (xive)->chip->chip_id, ## __VA_ARGS__);
80 * TODO: Document block id override
82 static uint32_t pnv_xive2_block_id(PnvXive2
*xive
)
84 uint8_t blk
= xive
->chip
->chip_id
;
85 uint64_t cfg_val
= xive
->cq_regs
[CQ_XIVE_CFG
>> 3];
87 if (cfg_val
& CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE
) {
88 blk
= GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, cfg_val
);
95 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
96 * of the chips is good enough.
98 * TODO: Block scope support
100 static PnvXive2
*pnv_xive2_get_remote(uint8_t blk
)
102 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
105 for (i
= 0; i
< pnv
->num_chips
; i
++) {
106 Pnv10Chip
*chip10
= PNV10_CHIP(pnv
->chips
[i
]);
107 PnvXive2
*xive
= &chip10
->xive
;
109 if (pnv_xive2_block_id(xive
) == blk
) {
117 * VST accessors for ESB, EAT, ENDT, NVP
119 * Indirect VST tables are arrays of VSDs pointing to a page (of same
120 * size). Each page is a direct VST table.
123 #define XIVE_VSD_SIZE 8
125 /* Indirect page size can be 4K, 64K, 2M, 16M. */
126 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift
)
128 return page_shift
== 12 || page_shift
== 16 ||
129 page_shift
== 21 || page_shift
== 24;
132 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2
*xive
, uint32_t type
,
133 uint64_t vsd
, uint32_t idx
)
135 const XiveVstInfo
*info
= &vst_infos
[type
];
136 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
137 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
140 idx_max
= vst_tsize
/ info
->size
- 1;
143 xive2_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
144 info
->name
, idx
, idx_max
);
149 return vst_addr
+ idx
* info
->size
;
152 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2
*xive
, uint32_t type
,
153 uint64_t vsd
, uint32_t idx
)
155 const XiveVstInfo
*info
= &vst_infos
[type
];
159 uint32_t vst_per_page
;
161 /* Get the page size of the indirect table. */
162 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
163 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
165 if (!(vsd
& VSD_ADDRESS_MASK
)) {
167 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
172 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
174 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
175 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
180 vst_per_page
= (1ull << page_shift
) / info
->size
;
181 vsd_idx
= idx
/ vst_per_page
;
183 /* Load the VSD we are looking for, if not already done */
185 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
186 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
,
187 MEMTXATTRS_UNSPECIFIED
);
189 if (!(vsd
& VSD_ADDRESS_MASK
)) {
191 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
197 * Check that the pages have a consistent size across the
200 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
201 xive2_error(xive
, "VST: %s entry %x indirect page size differ !?",
207 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
210 static uint64_t pnv_xive2_vst_addr(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
213 const XiveVstInfo
*info
= &vst_infos
[type
];
216 if (blk
>= info
->max_blocks
) {
217 xive2_error(xive
, "VST: invalid block id %d for VST %s %d !?",
218 blk
, info
->name
, idx
);
222 vsd
= xive
->vsds
[type
][blk
];
224 /* Remote VST access */
225 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
226 xive
= pnv_xive2_get_remote(blk
);
228 return xive
? pnv_xive2_vst_addr(xive
, type
, blk
, idx
) : 0;
231 if (VSD_INDIRECT
& vsd
) {
232 return pnv_xive2_vst_addr_indirect(xive
, type
, vsd
, idx
);
235 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, idx
);
238 static int pnv_xive2_vst_read(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
239 uint32_t idx
, void *data
)
241 const XiveVstInfo
*info
= &vst_infos
[type
];
242 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
249 result
= address_space_read(&address_space_memory
, addr
,
250 MEMTXATTRS_UNSPECIFIED
, data
,
252 if (result
!= MEMTX_OK
) {
253 xive2_error(xive
, "VST: read failed at @0x%" HWADDR_PRIx
254 " for VST %s %x/%x\n", addr
, info
->name
, blk
, idx
);
260 #define XIVE_VST_WORD_ALL -1
262 static int pnv_xive2_vst_write(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
263 uint32_t idx
, void *data
, uint32_t word_number
)
265 const XiveVstInfo
*info
= &vst_infos
[type
];
266 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
273 if (word_number
== XIVE_VST_WORD_ALL
) {
274 result
= address_space_write(&address_space_memory
, addr
,
275 MEMTXATTRS_UNSPECIFIED
, data
,
278 result
= address_space_write(&address_space_memory
,
279 addr
+ word_number
* 4,
280 MEMTXATTRS_UNSPECIFIED
,
281 data
+ word_number
* 4, 4);
284 if (result
!= MEMTX_OK
) {
285 xive2_error(xive
, "VST: write failed at @0x%" HWADDR_PRIx
286 "for VST %s %x/%x\n", addr
, info
->name
, blk
, idx
);
292 static int pnv_xive2_get_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
295 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
297 if (pnv_xive2_block_id(xive
) != blk
) {
298 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
302 *pq
= xive_source_esb_get(&xive
->ipi_source
, idx
);
306 static int pnv_xive2_set_pq(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
309 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
311 if (pnv_xive2_block_id(xive
) != blk
) {
312 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
316 *pq
= xive_source_esb_set(&xive
->ipi_source
, idx
, *pq
);
320 static int pnv_xive2_get_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
323 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
);
326 static int pnv_xive2_write_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
327 Xive2End
*end
, uint8_t word_number
)
329 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
,
333 static int pnv_xive2_end_update(PnvXive2
*xive
)
335 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
336 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
337 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
338 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
340 uint64_t endc_watch
[4];
342 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
344 cpu_to_be64(xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
]);
347 return pnv_xive2_vst_write(xive
, VST_END
, blk
, idx
, endc_watch
,
351 static void pnv_xive2_end_cache_load(PnvXive2
*xive
)
353 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
354 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
355 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
356 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
357 uint64_t endc_watch
[4] = { 0 };
360 if (pnv_xive2_vst_read(xive
, VST_END
, blk
, idx
, endc_watch
)) {
361 xive2_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
364 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
365 xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
] =
366 be64_to_cpu(endc_watch
[i
]);
370 static int pnv_xive2_get_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
373 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
);
376 static int pnv_xive2_write_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
377 Xive2Nvp
*nvp
, uint8_t word_number
)
379 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
,
383 static int pnv_xive2_nvp_update(PnvXive2
*xive
)
385 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
386 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
387 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
388 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
390 uint64_t nxc_watch
[4];
392 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
394 cpu_to_be64(xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
]);
397 return pnv_xive2_vst_write(xive
, VST_NVP
, blk
, idx
, nxc_watch
,
401 static void pnv_xive2_nvp_cache_load(PnvXive2
*xive
)
403 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
404 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
405 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
406 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
407 uint64_t nxc_watch
[4] = { 0 };
410 if (pnv_xive2_vst_read(xive
, VST_NVP
, blk
, idx
, nxc_watch
)) {
411 xive2_error(xive
, "VST: no NVP entry %x/%x !?", blk
, idx
);
414 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
415 xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
] =
416 be64_to_cpu(nxc_watch
[i
]);
420 static int pnv_xive2_get_eas(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
423 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
425 if (pnv_xive2_block_id(xive
) != blk
) {
426 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
430 return pnv_xive2_vst_read(xive
, VST_EAS
, blk
, idx
, eas
);
433 static uint32_t pnv_xive2_get_config(Xive2Router
*xrtr
)
435 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
438 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
) {
439 cfg
|= XIVE2_GEN1_TIMA_OS
;
442 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE
) {
443 cfg
|= XIVE2_VP_SAVE_RESTORE
;
446 if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE
,
447 xive
->cq_regs
[CQ_XIVE_CFG
>> 3]) == CQ_XIVE_CFG_THREADID_8BITS
) {
448 cfg
|= XIVE2_THREADID_8BITS
;
454 static bool pnv_xive2_is_cpu_enabled(PnvXive2
*xive
, PowerPCCPU
*cpu
)
456 int pir
= ppc_cpu_pir(cpu
);
457 uint32_t fc
= PNV10_PIR2FUSEDCORE(pir
);
458 uint64_t reg
= fc
< 8 ? TCTXT_EN0
: TCTXT_EN1
;
459 uint32_t bit
= pir
& 0x3f;
461 return xive
->tctxt_regs
[reg
>> 3] & PPC_BIT(bit
);
464 static int pnv_xive2_match_nvt(XivePresenter
*xptr
, uint8_t format
,
465 uint8_t nvt_blk
, uint32_t nvt_idx
,
466 bool cam_ignore
, uint8_t priority
,
467 uint32_t logic_serv
, XiveTCTXMatch
*match
)
469 PnvXive2
*xive
= PNV_XIVE2(xptr
);
470 PnvChip
*chip
= xive
->chip
;
474 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
;
476 for (i
= 0; i
< chip
->nr_cores
; i
++) {
477 PnvCore
*pc
= chip
->cores
[i
];
478 CPUCore
*cc
= CPU_CORE(pc
);
480 for (j
= 0; j
< cc
->nr_threads
; j
++) {
481 PowerPCCPU
*cpu
= pc
->threads
[j
];
485 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
489 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
492 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
496 ring
= xive2_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
502 * Save the context and follow on to catch duplicates,
503 * that we don't support yet.
507 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
508 "thread context NVT %x/%x\n",
523 static uint32_t pnv_xive2_presenter_get_config(XivePresenter
*xptr
)
525 PnvXive2
*xive
= PNV_XIVE2(xptr
);
528 if (xive
->cq_regs
[CQ_XIVE_CFG
>> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS
) {
529 cfg
|= XIVE_PRESENTER_GEN1_TIMA_OS
;
534 static uint8_t pnv_xive2_get_block_id(Xive2Router
*xrtr
)
536 return pnv_xive2_block_id(PNV_XIVE2(xrtr
));
540 * The TIMA MMIO space is shared among the chips and to identify the
541 * chip from which the access is being done, we extract the chip id
544 static PnvXive2
*pnv_xive2_tm_get_xive(PowerPCCPU
*cpu
)
546 int pir
= ppc_cpu_pir(cpu
);
547 XivePresenter
*xptr
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
)->xptr
;
548 PnvXive2
*xive
= PNV_XIVE2(xptr
);
550 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
551 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
557 * The internal sources of the interrupt controller have no knowledge
558 * of the XIVE2 chip on which they reside. Encode the block id in the
559 * source interrupt number before forwarding the source event
560 * notification to the Router. This is required on a multichip system.
562 static void pnv_xive2_notify(XiveNotifier
*xn
, uint32_t srcno
, bool pq_checked
)
564 PnvXive2
*xive
= PNV_XIVE2(xn
);
565 uint8_t blk
= pnv_xive2_block_id(xive
);
567 xive2_router_notify(xn
, XIVE_EAS(blk
, srcno
), pq_checked
);
571 * Set Translation Tables
573 * TODO add support for multiple sets
575 static int pnv_xive2_stt_set_data(PnvXive2
*xive
, uint64_t val
)
577 uint8_t tsel
= GETFIELD(CQ_TAR_SELECT
, xive
->cq_regs
[CQ_TAR
>> 3]);
578 uint8_t entry
= GETFIELD(CQ_TAR_ENTRY_SELECT
,
579 xive
->cq_regs
[CQ_TAR
>> 3]);
585 xive
->tables
[tsel
][entry
] = val
;
588 xive2_error(xive
, "IC: unsupported table %d", tsel
);
592 if (xive
->cq_regs
[CQ_TAR
>> 3] & CQ_TAR_AUTOINC
) {
593 xive
->cq_regs
[CQ_TAR
>> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT
,
594 xive
->cq_regs
[CQ_TAR
>> 3], ++entry
);
600 * Virtual Structure Tables (VST) configuration
602 static void pnv_xive2_vst_set_exclusive(PnvXive2
*xive
, uint8_t type
,
603 uint8_t blk
, uint64_t vsd
)
605 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
606 XiveSource
*xsrc
= &xive
->ipi_source
;
607 const XiveVstInfo
*info
= &vst_infos
[type
];
608 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
609 uint64_t vst_tsize
= 1ull << page_shift
;
610 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
614 if (VSD_INDIRECT
& vsd
) {
615 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
616 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
622 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
623 xive2_error(xive
, "VST: %s table address 0x%"PRIx64
624 " is not aligned with page shift %d",
625 info
->name
, vst_addr
, page_shift
);
629 /* Record the table configuration (in SRAM on HW) */
630 xive
->vsds
[type
][blk
] = vsd
;
632 /* Now tune the models with the configuration provided by the FW */
637 * Backing store pages for the source PQ bits. The model does
638 * not use these PQ bits backed in RAM because the XiveSource
641 * If the table is direct, we can compute the number of PQ
642 * entries provisioned by FW (such as skiboot) and resize the
643 * ESB window accordingly.
645 if (!(VSD_INDIRECT
& vsd
)) {
646 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
647 * (1ull << xsrc
->esb_shift
));
650 memory_region_add_subregion(&xive
->esb_mmio
, 0, &xsrc
->esb_mmio
);
653 case VST_EAS
: /* Nothing to be done */
658 * Backing store pages for the END.
660 if (!(VSD_INDIRECT
& vsd
)) {
661 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
662 * (1ull << end_xsrc
->esb_shift
));
664 memory_region_add_subregion(&xive
->end_mmio
, 0, &end_xsrc
->esb_mmio
);
667 case VST_NVP
: /* Not modeled */
668 case VST_NVG
: /* Not modeled */
669 case VST_NVC
: /* Not modeled */
670 case VST_IC
: /* Not modeled */
671 case VST_SYNC
: /* Not modeled */
672 case VST_ERQ
: /* Not modeled */
676 g_assert_not_reached();
681 * Both PC and VC sub-engines are configured as each use the Virtual
684 static void pnv_xive2_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
686 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
687 uint8_t type
= GETFIELD(VC_VSD_TABLE_SELECT
,
688 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
689 uint8_t blk
= GETFIELD(VC_VSD_TABLE_ADDRESS
,
690 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
691 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
693 if (type
> VST_ERQ
) {
694 xive2_error(xive
, "VST: invalid table type %d", type
);
698 if (blk
>= vst_infos
[type
].max_blocks
) {
699 xive2_error(xive
, "VST: invalid block id %d for"
700 " %s table", blk
, vst_infos
[type
].name
);
705 xive2_error(xive
, "VST: invalid %s table address",
706 vst_infos
[type
].name
);
711 case VSD_MODE_FORWARD
:
712 xive
->vsds
[type
][blk
] = vsd
;
715 case VSD_MODE_EXCLUSIVE
:
716 pnv_xive2_vst_set_exclusive(xive
, type
, blk
, vsd
);
720 xive2_error(xive
, "VST: unsupported table mode %d", mode
);
733 * Page 0: Internal CQ register accesses (reads & writes)
734 * Page 1: Internal PC register accesses (reads & writes)
735 * Page 2: Internal VC register accesses (reads & writes)
736 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
737 * Page 4: Notify Port page (writes only, w/data),
739 * Page 6: Sync Poll page (writes only, dataless)
740 * Page 7: Sync Inject page (writes only, dataless)
741 * Page 8: LSI Trigger page (writes only, dataless)
742 * Page 9: LSI SB Management page (reads & writes dataless)
743 * Pages 10-255: Reserved
744 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
745 * covering the 128 threads in P10.
746 * Pages 384-511: Reserved
748 typedef struct PnvXive2Region
{
752 const MemoryRegionOps
*ops
;
755 static const MemoryRegionOps pnv_xive2_ic_cq_ops
;
756 static const MemoryRegionOps pnv_xive2_ic_pc_ops
;
757 static const MemoryRegionOps pnv_xive2_ic_vc_ops
;
758 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
;
759 static const MemoryRegionOps pnv_xive2_ic_notify_ops
;
760 static const MemoryRegionOps pnv_xive2_ic_sync_ops
;
761 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
;
762 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
;
764 /* 512 pages. 4K: 2M range, 64K: 32M range */
765 static const PnvXive2Region pnv_xive2_ic_regions
[] = {
766 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops
},
767 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops
},
768 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops
},
769 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops
},
770 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops
},
771 /* page 5 reserved */
772 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops
},
773 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops
},
774 /* pages 10-255 reserved */
775 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops
},
776 /* pages 384-511 reserved */
783 static uint64_t pnv_xive2_ic_cq_read(void *opaque
, hwaddr offset
,
786 PnvXive2
*xive
= PNV_XIVE2(opaque
);
787 uint32_t reg
= offset
>> 3;
791 case CQ_XIVE_CAP
: /* Set at reset */
793 val
= xive
->cq_regs
[reg
];
795 case CQ_MSGSND
: /* TODO check the #cores of the machine */
796 val
= 0xffffffff00000000;
799 val
= CQ_CFG_PB_GEN_PB_INIT
; /* TODO: fix CQ_CFG_PB_GEN default value */
802 xive2_error(xive
, "CQ: invalid read @%"HWADDR_PRIx
, offset
);
808 static uint64_t pnv_xive2_bar_size(uint64_t val
)
810 return 1ull << (GETFIELD(CQ_BAR_RANGE
, val
) + 24);
813 static void pnv_xive2_ic_cq_write(void *opaque
, hwaddr offset
,
814 uint64_t val
, unsigned size
)
816 PnvXive2
*xive
= PNV_XIVE2(opaque
);
817 MemoryRegion
*sysmem
= get_system_memory();
818 uint32_t reg
= offset
>> 3;
823 case CQ_RST_CTL
: /* TODO: reset all BARs */
827 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
828 if (!(val
& CQ_IC_BAR_VALID
)) {
830 if (xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
) {
831 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
832 memory_region_del_subregion(&xive
->ic_mmio
,
835 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
838 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
839 if (!(xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
)) {
840 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
841 memory_region_add_subregion(&xive
->ic_mmio
,
842 pnv_xive2_ic_regions
[i
].pgoff
<< xive
->ic_shift
,
845 memory_region_add_subregion(sysmem
, xive
->ic_base
,
852 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
853 if (!(val
& CQ_TM_BAR_VALID
)) {
855 if (xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
) {
856 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
859 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
860 if (!(xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
)) {
861 memory_region_add_subregion(sysmem
, xive
->tm_base
,
868 xive
->esb_shift
= val
& CQ_BAR_64K
? 16 : 12;
869 if (!(val
& CQ_BAR_VALID
)) {
871 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
872 memory_region_del_subregion(sysmem
, &xive
->esb_mmio
);
875 xive
->esb_base
= val
& CQ_BAR_ADDR
;
876 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
877 memory_region_set_size(&xive
->esb_mmio
,
878 pnv_xive2_bar_size(val
));
879 memory_region_add_subregion(sysmem
, xive
->esb_base
,
886 xive
->end_shift
= val
& CQ_BAR_64K
? 16 : 12;
887 if (!(val
& CQ_BAR_VALID
)) {
889 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
890 memory_region_del_subregion(sysmem
, &xive
->end_mmio
);
893 xive
->end_base
= val
& CQ_BAR_ADDR
;
894 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
895 memory_region_set_size(&xive
->end_mmio
,
896 pnv_xive2_bar_size(val
));
897 memory_region_add_subregion(sysmem
, xive
->end_base
,
904 xive
->nvc_shift
= val
& CQ_BAR_64K
? 16 : 12;
905 if (!(val
& CQ_BAR_VALID
)) {
907 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
908 memory_region_del_subregion(sysmem
, &xive
->nvc_mmio
);
911 xive
->nvc_base
= val
& CQ_BAR_ADDR
;
912 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
913 memory_region_set_size(&xive
->nvc_mmio
,
914 pnv_xive2_bar_size(val
));
915 memory_region_add_subregion(sysmem
, xive
->nvc_base
,
922 xive
->nvpg_shift
= val
& CQ_BAR_64K
? 16 : 12;
923 if (!(val
& CQ_BAR_VALID
)) {
925 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
926 memory_region_del_subregion(sysmem
, &xive
->nvpg_mmio
);
929 xive
->nvpg_base
= val
& CQ_BAR_ADDR
;
930 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
931 memory_region_set_size(&xive
->nvpg_mmio
,
932 pnv_xive2_bar_size(val
));
933 memory_region_add_subregion(sysmem
, xive
->nvpg_base
,
939 case CQ_TAR
: /* Set Translation Table Address */
941 case CQ_TDR
: /* Set Translation Table Data */
942 pnv_xive2_stt_set_data(xive
, val
);
944 case CQ_FIRMASK_OR
: /* FIR error reporting */
947 xive2_error(xive
, "CQ: invalid write 0x%"HWADDR_PRIx
, offset
);
951 xive
->cq_regs
[reg
] = val
;
954 static const MemoryRegionOps pnv_xive2_ic_cq_ops
= {
955 .read
= pnv_xive2_ic_cq_read
,
956 .write
= pnv_xive2_ic_cq_write
,
957 .endianness
= DEVICE_BIG_ENDIAN
,
959 .min_access_size
= 8,
960 .max_access_size
= 8,
963 .min_access_size
= 8,
964 .max_access_size
= 8,
968 static uint64_t pnv_xive2_ic_vc_read(void *opaque
, hwaddr offset
,
971 PnvXive2
*xive
= PNV_XIVE2(opaque
);
973 uint32_t reg
= offset
>> 3;
977 * VSD table settings.
979 case VC_VSD_TABLE_ADDR
:
980 case VC_VSD_TABLE_DATA
:
981 val
= xive
->vc_regs
[reg
];
985 * ESB cache updates (not modeled)
987 case VC_ESBC_FLUSH_CTRL
:
988 xive
->vc_regs
[reg
] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID
;
989 val
= xive
->vc_regs
[reg
];
993 val
= xive
->vc_regs
[reg
];
997 * EAS cache updates (not modeled)
999 case VC_EASC_FLUSH_CTRL
:
1000 xive
->vc_regs
[reg
] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID
;
1001 val
= xive
->vc_regs
[reg
];
1007 case VC_ENDC_WATCH0_SPEC
:
1008 xive
->vc_regs
[reg
] &= ~(VC_ENDC_WATCH_FULL
| VC_ENDC_WATCH_CONFLICT
);
1009 val
= xive
->vc_regs
[reg
];
1012 case VC_ENDC_WATCH0_DATA0
:
1014 * Load DATA registers from cache with data requested by the
1017 pnv_xive2_end_cache_load(xive
);
1018 val
= xive
->vc_regs
[reg
];
1021 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1022 val
= xive
->vc_regs
[reg
];
1025 case VC_ENDC_FLUSH_CTRL
:
1026 xive
->vc_regs
[reg
] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1027 val
= xive
->vc_regs
[reg
];
1031 * Indirect invalidation
1033 case VC_AT_MACRO_KILL_MASK
:
1034 val
= xive
->vc_regs
[reg
];
1037 case VC_AT_MACRO_KILL
:
1038 xive
->vc_regs
[reg
] &= ~VC_AT_MACRO_KILL_VALID
;
1039 val
= xive
->vc_regs
[reg
];
1043 * Interrupt fifo overflow in memory backing store (Not modeled)
1045 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1046 val
= xive
->vc_regs
[reg
];
1052 case VC_ENDC_SYNC_DONE
:
1053 val
= VC_ENDC_SYNC_POLL_DONE
;
1056 xive2_error(xive
, "VC: invalid read @%"HWADDR_PRIx
, offset
);
1062 static void pnv_xive2_ic_vc_write(void *opaque
, hwaddr offset
,
1063 uint64_t val
, unsigned size
)
1065 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1066 uint32_t reg
= offset
>> 3;
1070 * VSD table settings.
1072 case VC_VSD_TABLE_ADDR
:
1074 case VC_VSD_TABLE_DATA
:
1075 pnv_xive2_vst_set_data(xive
, val
);
1079 * ESB cache updates (not modeled)
1081 /* case VC_ESBC_FLUSH_CTRL: */
1082 case VC_ESBC_FLUSH_POLL
:
1083 xive
->vc_regs
[VC_ESBC_FLUSH_CTRL
>> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1091 * EAS cache updates (not modeled)
1093 /* case VC_EASC_FLUSH_CTRL: */
1094 case VC_EASC_FLUSH_POLL
:
1095 xive
->vc_regs
[VC_EASC_FLUSH_CTRL
>> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID
;
1102 case VC_ENDC_WATCH0_SPEC
:
1103 val
&= ~VC_ENDC_WATCH_CONFLICT
; /* HW will set this bit */
1106 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1108 case VC_ENDC_WATCH0_DATA0
:
1109 /* writing to DATA0 triggers the cache write */
1110 xive
->vc_regs
[reg
] = val
;
1111 pnv_xive2_end_update(xive
);
1115 /* case VC_ENDC_FLUSH_CTRL: */
1116 case VC_ENDC_FLUSH_POLL
:
1117 xive
->vc_regs
[VC_ENDC_FLUSH_CTRL
>> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1121 * Indirect invalidation
1123 case VC_AT_MACRO_KILL
:
1124 case VC_AT_MACRO_KILL_MASK
:
1128 * Interrupt fifo overflow in memory backing store (Not modeled)
1130 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1136 case VC_ENDC_SYNC_DONE
:
1140 xive2_error(xive
, "VC: invalid write @%"HWADDR_PRIx
, offset
);
1144 xive
->vc_regs
[reg
] = val
;
1147 static const MemoryRegionOps pnv_xive2_ic_vc_ops
= {
1148 .read
= pnv_xive2_ic_vc_read
,
1149 .write
= pnv_xive2_ic_vc_write
,
1150 .endianness
= DEVICE_BIG_ENDIAN
,
1152 .min_access_size
= 8,
1153 .max_access_size
= 8,
1156 .min_access_size
= 8,
1157 .max_access_size
= 8,
1161 static uint64_t pnv_xive2_ic_pc_read(void *opaque
, hwaddr offset
,
1164 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1166 uint32_t reg
= offset
>> 3;
1170 * VSD table settings.
1172 case PC_VSD_TABLE_ADDR
:
1173 case PC_VSD_TABLE_DATA
:
1174 val
= xive
->pc_regs
[reg
];
1180 case PC_NXC_WATCH0_SPEC
:
1181 xive
->pc_regs
[reg
] &= ~(PC_NXC_WATCH_FULL
| PC_NXC_WATCH_CONFLICT
);
1182 val
= xive
->pc_regs
[reg
];
1185 case PC_NXC_WATCH0_DATA0
:
1187 * Load DATA registers from cache with data requested by the
1190 pnv_xive2_nvp_cache_load(xive
);
1191 val
= xive
->pc_regs
[reg
];
1194 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1195 val
= xive
->pc_regs
[reg
];
1198 case PC_NXC_FLUSH_CTRL
:
1199 xive
->pc_regs
[reg
] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID
;
1200 val
= xive
->pc_regs
[reg
];
1204 * Indirect invalidation
1207 xive
->pc_regs
[reg
] &= ~PC_AT_KILL_VALID
;
1208 val
= xive
->pc_regs
[reg
];
1212 xive2_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, offset
);
1218 static void pnv_xive2_ic_pc_write(void *opaque
, hwaddr offset
,
1219 uint64_t val
, unsigned size
)
1221 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1222 uint32_t reg
= offset
>> 3;
1227 * VSD table settings. Only taken into account in the VC
1228 * sub-engine because the Xive2Router model combines both VC and PC
1231 case PC_VSD_TABLE_ADDR
:
1232 case PC_VSD_TABLE_DATA
:
1238 case PC_NXC_WATCH0_SPEC
:
1239 val
&= ~PC_NXC_WATCH_CONFLICT
; /* HW will set this bit */
1242 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1244 case PC_NXC_WATCH0_DATA0
:
1245 /* writing to DATA0 triggers the cache write */
1246 xive
->pc_regs
[reg
] = val
;
1247 pnv_xive2_nvp_update(xive
);
1250 /* case PC_NXC_FLUSH_CTRL: */
1251 case PC_NXC_FLUSH_POLL
:
1252 xive
->pc_regs
[PC_NXC_FLUSH_CTRL
>> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID
;
1256 * Indirect invalidation
1259 case PC_AT_KILL_MASK
:
1263 xive2_error(xive
, "PC: invalid write @%"HWADDR_PRIx
, offset
);
1267 xive
->pc_regs
[reg
] = val
;
1270 static const MemoryRegionOps pnv_xive2_ic_pc_ops
= {
1271 .read
= pnv_xive2_ic_pc_read
,
1272 .write
= pnv_xive2_ic_pc_write
,
1273 .endianness
= DEVICE_BIG_ENDIAN
,
1275 .min_access_size
= 8,
1276 .max_access_size
= 8,
1279 .min_access_size
= 8,
1280 .max_access_size
= 8,
1285 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque
, hwaddr offset
,
1288 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1290 uint32_t reg
= offset
>> 3;
1294 * XIVE2 hardware thread enablement
1298 val
= xive
->tctxt_regs
[reg
];
1302 case TCTXT_EN0_RESET
:
1303 val
= xive
->tctxt_regs
[TCTXT_EN0
>> 3];
1306 case TCTXT_EN1_RESET
:
1307 val
= xive
->tctxt_regs
[TCTXT_EN1
>> 3];
1310 val
= xive
->tctxt_regs
[reg
];
1313 xive2_error(xive
, "TCTXT: invalid read @%"HWADDR_PRIx
, offset
);
1319 static void pnv_xive2_ic_tctxt_write(void *opaque
, hwaddr offset
,
1320 uint64_t val
, unsigned size
)
1322 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1323 uint32_t reg
= offset
>> 3;
1327 * XIVE2 hardware thread enablement
1329 case TCTXT_EN0
: /* Physical Thread Enable */
1330 case TCTXT_EN1
: /* Physical Thread Enable (fused core) */
1331 xive
->tctxt_regs
[reg
] = val
;
1335 xive
->tctxt_regs
[TCTXT_EN0
>> 3] |= val
;
1338 xive
->tctxt_regs
[TCTXT_EN1
>> 3] |= val
;
1340 case TCTXT_EN0_RESET
:
1341 xive
->tctxt_regs
[TCTXT_EN0
>> 3] &= ~val
;
1343 case TCTXT_EN1_RESET
:
1344 xive
->tctxt_regs
[TCTXT_EN1
>> 3] &= ~val
;
1347 xive
->tctxt_regs
[reg
] = val
;
1350 xive2_error(xive
, "TCTXT: invalid write @%"HWADDR_PRIx
, offset
);
1355 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
= {
1356 .read
= pnv_xive2_ic_tctxt_read
,
1357 .write
= pnv_xive2_ic_tctxt_write
,
1358 .endianness
= DEVICE_BIG_ENDIAN
,
1360 .min_access_size
= 8,
1361 .max_access_size
= 8,
1364 .min_access_size
= 8,
1365 .max_access_size
= 8,
1370 * Redirect XSCOM to MMIO handlers
1372 static uint64_t pnv_xive2_xscom_read(void *opaque
, hwaddr offset
,
1375 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1377 uint32_t xscom_reg
= offset
>> 3;
1378 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1380 switch (xscom_reg
) {
1381 case 0x000 ... 0x0FF:
1382 val
= pnv_xive2_ic_cq_read(opaque
, mmio_offset
, size
);
1384 case 0x100 ... 0x1FF:
1385 val
= pnv_xive2_ic_vc_read(opaque
, mmio_offset
, size
);
1387 case 0x200 ... 0x2FF:
1388 val
= pnv_xive2_ic_pc_read(opaque
, mmio_offset
, size
);
1390 case 0x300 ... 0x3FF:
1391 val
= pnv_xive2_ic_tctxt_read(opaque
, mmio_offset
, size
);
1394 xive2_error(xive
, "XSCOM: invalid read @%"HWADDR_PRIx
, offset
);
1400 static void pnv_xive2_xscom_write(void *opaque
, hwaddr offset
,
1401 uint64_t val
, unsigned size
)
1403 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1404 uint32_t xscom_reg
= offset
>> 3;
1405 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1407 switch (xscom_reg
) {
1408 case 0x000 ... 0x0FF:
1409 pnv_xive2_ic_cq_write(opaque
, mmio_offset
, val
, size
);
1411 case 0x100 ... 0x1FF:
1412 pnv_xive2_ic_vc_write(opaque
, mmio_offset
, val
, size
);
1414 case 0x200 ... 0x2FF:
1415 pnv_xive2_ic_pc_write(opaque
, mmio_offset
, val
, size
);
1417 case 0x300 ... 0x3FF:
1418 pnv_xive2_ic_tctxt_write(opaque
, mmio_offset
, val
, size
);
1421 xive2_error(xive
, "XSCOM: invalid write @%"HWADDR_PRIx
, offset
);
1425 static const MemoryRegionOps pnv_xive2_xscom_ops
= {
1426 .read
= pnv_xive2_xscom_read
,
1427 .write
= pnv_xive2_xscom_write
,
1428 .endianness
= DEVICE_BIG_ENDIAN
,
1430 .min_access_size
= 8,
1431 .max_access_size
= 8,
1434 .min_access_size
= 8,
1435 .max_access_size
= 8,
1440 * Notify port page. The layout is compatible between 4K and 64K pages :
1442 * Page 1 Notify page (writes only)
1443 * 0x000 - 0x7FF IPI interrupt (NPU)
1444 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1447 static void pnv_xive2_ic_hw_trigger(PnvXive2
*xive
, hwaddr addr
,
1453 if (val
& XIVE_TRIGGER_END
) {
1454 xive2_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1460 * Forward the source event notification directly to the Router.
1461 * The source interrupt number should already be correctly encoded
1462 * with the chip block id by the sending device (PHB, PSI).
1464 blk
= XIVE_EAS_BLOCK(val
);
1465 idx
= XIVE_EAS_INDEX(val
);
1467 xive2_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
),
1468 !!(val
& XIVE_TRIGGER_PQ
));
1471 static void pnv_xive2_ic_notify_write(void *opaque
, hwaddr offset
,
1472 uint64_t val
, unsigned size
)
1474 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1476 /* VC: IPI triggers */
1478 case 0x000 ... 0x7FF:
1479 /* TODO: check IPI notify sub-page routing */
1480 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1483 /* VC: HW triggers */
1484 case 0x800 ... 0xFFF:
1485 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1489 xive2_error(xive
, "NOTIFY: invalid write @%"HWADDR_PRIx
, offset
);
1493 static uint64_t pnv_xive2_ic_notify_read(void *opaque
, hwaddr offset
,
1496 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1498 /* loads are invalid */
1499 xive2_error(xive
, "NOTIFY: invalid read @%"HWADDR_PRIx
, offset
);
1503 static const MemoryRegionOps pnv_xive2_ic_notify_ops
= {
1504 .read
= pnv_xive2_ic_notify_read
,
1505 .write
= pnv_xive2_ic_notify_write
,
1506 .endianness
= DEVICE_BIG_ENDIAN
,
1508 .min_access_size
= 8,
1509 .max_access_size
= 8,
1512 .min_access_size
= 8,
1513 .max_access_size
= 8,
1517 static uint64_t pnv_xive2_ic_lsi_read(void *opaque
, hwaddr offset
,
1520 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1522 xive2_error(xive
, "LSI: invalid read @%"HWADDR_PRIx
, offset
);
1526 static void pnv_xive2_ic_lsi_write(void *opaque
, hwaddr offset
,
1527 uint64_t val
, unsigned size
)
1529 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1531 xive2_error(xive
, "LSI: invalid write @%"HWADDR_PRIx
, offset
);
1534 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
= {
1535 .read
= pnv_xive2_ic_lsi_read
,
1536 .write
= pnv_xive2_ic_lsi_write
,
1537 .endianness
= DEVICE_BIG_ENDIAN
,
1539 .min_access_size
= 8,
1540 .max_access_size
= 8,
1543 .min_access_size
= 8,
1544 .max_access_size
= 8,
1549 * Sync MMIO page (write only)
1551 #define PNV_XIVE2_SYNC_IPI 0x000
1552 #define PNV_XIVE2_SYNC_HW 0x080
1553 #define PNV_XIVE2_SYNC_NxC 0x100
1554 #define PNV_XIVE2_SYNC_INT 0x180
1555 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1556 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1557 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1559 static uint64_t pnv_xive2_ic_sync_read(void *opaque
, hwaddr offset
,
1562 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1564 /* loads are invalid */
1565 xive2_error(xive
, "SYNC: invalid read @%"HWADDR_PRIx
, offset
);
1569 static void pnv_xive2_ic_sync_write(void *opaque
, hwaddr offset
,
1570 uint64_t val
, unsigned size
)
1572 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1575 case PNV_XIVE2_SYNC_IPI
:
1576 case PNV_XIVE2_SYNC_HW
:
1577 case PNV_XIVE2_SYNC_NxC
:
1578 case PNV_XIVE2_SYNC_INT
:
1579 case PNV_XIVE2_SYNC_OS_ESC
:
1580 case PNV_XIVE2_SYNC_POOL_ESC
:
1581 case PNV_XIVE2_SYNC_HARD_ESC
:
1584 xive2_error(xive
, "SYNC: invalid write @%"HWADDR_PRIx
, offset
);
1588 static const MemoryRegionOps pnv_xive2_ic_sync_ops
= {
1589 .read
= pnv_xive2_ic_sync_read
,
1590 .write
= pnv_xive2_ic_sync_write
,
1591 .endianness
= DEVICE_BIG_ENDIAN
,
1593 .min_access_size
= 8,
1594 .max_access_size
= 8,
1597 .min_access_size
= 8,
1598 .max_access_size
= 8,
1603 * When the TM direct pages of the IC controller are accessed, the
1604 * target HW thread is deduced from the page offset.
1606 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2
*xive
, hwaddr offset
)
1608 /* On P10, the node ID shift in the PIR register is 8 bits */
1609 return xive
->chip
->chip_id
<< 8 | offset
>> xive
->ic_shift
;
1612 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2
*xive
,
1616 * Indirect TIMA accesses are similar to direct accesses for
1617 * privilege ring 0. So remove any traces of the hw thread ID from
1618 * the offset in the IC BAR as it could be interpreted as the ring
1619 * privilege when calling the underlying direct access functions.
1621 return offset
& ((1ull << xive
->ic_shift
) - 1);
1624 static XiveTCTX
*pnv_xive2_get_indirect_tctx(PnvXive2
*xive
, uint32_t pir
)
1626 PnvChip
*chip
= xive
->chip
;
1627 PowerPCCPU
*cpu
= NULL
;
1629 cpu
= pnv_chip_find_cpu(chip
, pir
);
1631 xive2_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1635 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
1636 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
1639 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1642 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque
, hwaddr offset
,
1645 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1646 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1647 hwaddr hw_page_offset
;
1652 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
1653 hw_page_offset
= pnv_xive2_ic_tm_get_hw_page_offset(xive
, offset
);
1654 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1656 val
= xive_tctx_tm_read(xptr
, tctx
, hw_page_offset
, size
);
1662 static void pnv_xive2_ic_tm_indirect_write(void *opaque
, hwaddr offset
,
1663 uint64_t val
, unsigned size
)
1665 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1666 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1667 hwaddr hw_page_offset
;
1671 pir
= pnv_xive2_ic_tm_get_pir(xive
, offset
);
1672 hw_page_offset
= pnv_xive2_ic_tm_get_hw_page_offset(xive
, offset
);
1673 tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1675 xive_tctx_tm_write(xptr
, tctx
, hw_page_offset
, val
, size
);
1679 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
= {
1680 .read
= pnv_xive2_ic_tm_indirect_read
,
1681 .write
= pnv_xive2_ic_tm_indirect_write
,
1682 .endianness
= DEVICE_BIG_ENDIAN
,
1684 .min_access_size
= 1,
1685 .max_access_size
= 8,
1688 .min_access_size
= 1,
1689 .max_access_size
= 8,
1696 static void pnv_xive2_tm_write(void *opaque
, hwaddr offset
,
1697 uint64_t value
, unsigned size
)
1699 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1700 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1701 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1702 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1704 xive_tctx_tm_write(xptr
, tctx
, offset
, value
, size
);
1707 static uint64_t pnv_xive2_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
1709 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1710 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1711 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1712 XivePresenter
*xptr
= XIVE_PRESENTER(xive
);
1714 return xive_tctx_tm_read(xptr
, tctx
, offset
, size
);
1717 static const MemoryRegionOps pnv_xive2_tm_ops
= {
1718 .read
= pnv_xive2_tm_read
,
1719 .write
= pnv_xive2_tm_write
,
1720 .endianness
= DEVICE_BIG_ENDIAN
,
1722 .min_access_size
= 1,
1723 .max_access_size
= 8,
1726 .min_access_size
= 1,
1727 .max_access_size
= 8,
1731 static uint64_t pnv_xive2_nvc_read(void *opaque
, hwaddr offset
,
1734 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1736 xive2_error(xive
, "NVC: invalid read @%"HWADDR_PRIx
, offset
);
1740 static void pnv_xive2_nvc_write(void *opaque
, hwaddr offset
,
1741 uint64_t val
, unsigned size
)
1743 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1745 xive2_error(xive
, "NVC: invalid write @%"HWADDR_PRIx
, offset
);
1748 static const MemoryRegionOps pnv_xive2_nvc_ops
= {
1749 .read
= pnv_xive2_nvc_read
,
1750 .write
= pnv_xive2_nvc_write
,
1751 .endianness
= DEVICE_BIG_ENDIAN
,
1753 .min_access_size
= 8,
1754 .max_access_size
= 8,
1757 .min_access_size
= 8,
1758 .max_access_size
= 8,
1762 static uint64_t pnv_xive2_nvpg_read(void *opaque
, hwaddr offset
,
1765 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1767 xive2_error(xive
, "NVPG: invalid read @%"HWADDR_PRIx
, offset
);
1771 static void pnv_xive2_nvpg_write(void *opaque
, hwaddr offset
,
1772 uint64_t val
, unsigned size
)
1774 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1776 xive2_error(xive
, "NVPG: invalid write @%"HWADDR_PRIx
, offset
);
1779 static const MemoryRegionOps pnv_xive2_nvpg_ops
= {
1780 .read
= pnv_xive2_nvpg_read
,
1781 .write
= pnv_xive2_nvpg_write
,
1782 .endianness
= DEVICE_BIG_ENDIAN
,
1784 .min_access_size
= 8,
1785 .max_access_size
= 8,
1788 .min_access_size
= 8,
1789 .max_access_size
= 8,
1794 * POWER10 default capabilities: 0x2000120076f000FC
1796 #define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
1799 * POWER10 default configuration: 0x0030000033000000
1801 * 8bits thread id was dropped for P10
1803 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1805 static void pnv_xive2_reset(void *dev
)
1807 PnvXive2
*xive
= PNV_XIVE2(dev
);
1808 XiveSource
*xsrc
= &xive
->ipi_source
;
1809 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1811 xive
->cq_regs
[CQ_XIVE_CAP
>> 3] = xive
->capabilities
;
1812 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] = xive
->config
;
1814 /* HW hardwires the #Topology of the chip in the block field */
1815 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] |=
1816 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, 0ull, xive
->chip
->chip_id
);
1818 /* Set default page size to 64k */
1819 xive
->ic_shift
= xive
->esb_shift
= xive
->end_shift
= 16;
1820 xive
->nvc_shift
= xive
->nvpg_shift
= xive
->tm_shift
= 16;
1822 /* Clear source MMIOs */
1823 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1824 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
1827 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1828 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
1833 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1836 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1837 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1839 static void pnv_xive2_realize(DeviceState
*dev
, Error
**errp
)
1841 PnvXive2
*xive
= PNV_XIVE2(dev
);
1842 PnvXive2Class
*pxc
= PNV_XIVE2_GET_CLASS(dev
);
1843 XiveSource
*xsrc
= &xive
->ipi_source
;
1844 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1845 Error
*local_err
= NULL
;
1848 pxc
->parent_realize(dev
, &local_err
);
1850 error_propagate(errp
, local_err
);
1857 * The XiveSource and Xive2EndSource objects are realized with the
1858 * maximum allowed HW configuration. The ESB MMIO regions will be
1859 * resized dynamically when the controller is configured by the FW
1860 * to limit accesses to resources not provisioned.
1862 object_property_set_int(OBJECT(xsrc
), "flags", XIVE_SRC_STORE_EOI
,
1864 object_property_set_int(OBJECT(xsrc
), "nr-irqs", PNV_XIVE2_NR_IRQS
,
1866 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
1868 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
1870 error_propagate(errp
, local_err
);
1874 object_property_set_int(OBJECT(end_xsrc
), "nr-ends", PNV_XIVE2_NR_ENDS
,
1876 object_property_set_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
1878 qdev_realize(DEVICE(end_xsrc
), NULL
, &local_err
);
1880 error_propagate(errp
, local_err
);
1884 /* XSCOM region, used for initial configuration of the BARs */
1885 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
),
1886 &pnv_xive2_xscom_ops
, xive
, "xscom-xive",
1887 PNV10_XSCOM_XIVE2_SIZE
<< 3);
1889 /* Interrupt controller MMIO regions */
1890 xive
->ic_shift
= 16;
1891 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1892 PNV10_XIVE2_IC_SIZE
);
1894 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
1895 memory_region_init_io(&xive
->ic_mmios
[i
], OBJECT(dev
),
1896 pnv_xive2_ic_regions
[i
].ops
, xive
,
1897 pnv_xive2_ic_regions
[i
].name
,
1898 pnv_xive2_ic_regions
[i
].pgsize
<< xive
->ic_shift
);
1904 xive
->esb_shift
= 16;
1905 xive
->end_shift
= 16;
1906 memory_region_init(&xive
->esb_mmio
, OBJECT(xive
), "xive-esb",
1907 PNV10_XIVE2_ESB_SIZE
);
1908 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-end",
1909 PNV10_XIVE2_END_SIZE
);
1911 /* Presenter Controller MMIO region (not modeled) */
1912 xive
->nvc_shift
= 16;
1913 xive
->nvpg_shift
= 16;
1914 memory_region_init_io(&xive
->nvc_mmio
, OBJECT(dev
),
1915 &pnv_xive2_nvc_ops
, xive
,
1916 "xive-nvc", PNV10_XIVE2_NVC_SIZE
);
1918 memory_region_init_io(&xive
->nvpg_mmio
, OBJECT(dev
),
1919 &pnv_xive2_nvpg_ops
, xive
,
1920 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE
);
1922 /* Thread Interrupt Management Area (Direct) */
1923 xive
->tm_shift
= 16;
1924 memory_region_init_io(&xive
->tm_mmio
, OBJECT(dev
), &pnv_xive2_tm_ops
,
1925 xive
, "xive-tima", PNV10_XIVE2_TM_SIZE
);
1927 qemu_register_reset(pnv_xive2_reset
, dev
);
1930 static Property pnv_xive2_properties
[] = {
1931 DEFINE_PROP_UINT64("ic-bar", PnvXive2
, ic_base
, 0),
1932 DEFINE_PROP_UINT64("esb-bar", PnvXive2
, esb_base
, 0),
1933 DEFINE_PROP_UINT64("end-bar", PnvXive2
, end_base
, 0),
1934 DEFINE_PROP_UINT64("nvc-bar", PnvXive2
, nvc_base
, 0),
1935 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2
, nvpg_base
, 0),
1936 DEFINE_PROP_UINT64("tm-bar", PnvXive2
, tm_base
, 0),
1937 DEFINE_PROP_UINT64("capabilities", PnvXive2
, capabilities
,
1938 PNV_XIVE2_CAPABILITIES
),
1939 DEFINE_PROP_UINT64("config", PnvXive2
, config
,
1940 PNV_XIVE2_CONFIGURATION
),
1941 DEFINE_PROP_LINK("chip", PnvXive2
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1942 DEFINE_PROP_END_OF_LIST(),
1945 static void pnv_xive2_instance_init(Object
*obj
)
1947 PnvXive2
*xive
= PNV_XIVE2(obj
);
1949 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1951 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1952 TYPE_XIVE2_END_SOURCE
);
1955 static int pnv_xive2_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1958 const char compat_p10
[] = "ibm,power10-xive-x";
1962 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE
),
1963 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE
)
1966 name
= g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE
);
1967 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1971 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1972 _FDT(fdt_setprop(fdt
, offset
, "compatible", compat_p10
,
1973 sizeof(compat_p10
)));
1977 static void pnv_xive2_class_init(ObjectClass
*klass
, void *data
)
1979 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1980 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1981 Xive2RouterClass
*xrc
= XIVE2_ROUTER_CLASS(klass
);
1982 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1983 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1984 PnvXive2Class
*pxc
= PNV_XIVE2_CLASS(klass
);
1986 xdc
->dt_xscom
= pnv_xive2_dt_xscom
;
1988 dc
->desc
= "PowerNV XIVE2 Interrupt Controller (POWER10)";
1989 device_class_set_parent_realize(dc
, pnv_xive2_realize
,
1990 &pxc
->parent_realize
);
1991 device_class_set_props(dc
, pnv_xive2_properties
);
1993 xrc
->get_eas
= pnv_xive2_get_eas
;
1994 xrc
->get_pq
= pnv_xive2_get_pq
;
1995 xrc
->set_pq
= pnv_xive2_set_pq
;
1996 xrc
->get_end
= pnv_xive2_get_end
;
1997 xrc
->write_end
= pnv_xive2_write_end
;
1998 xrc
->get_nvp
= pnv_xive2_get_nvp
;
1999 xrc
->write_nvp
= pnv_xive2_write_nvp
;
2000 xrc
->get_config
= pnv_xive2_get_config
;
2001 xrc
->get_block_id
= pnv_xive2_get_block_id
;
2003 xnc
->notify
= pnv_xive2_notify
;
2005 xpc
->match_nvt
= pnv_xive2_match_nvt
;
2006 xpc
->get_config
= pnv_xive2_presenter_get_config
;
2009 static const TypeInfo pnv_xive2_info
= {
2010 .name
= TYPE_PNV_XIVE2
,
2011 .parent
= TYPE_XIVE2_ROUTER
,
2012 .instance_init
= pnv_xive2_instance_init
,
2013 .instance_size
= sizeof(PnvXive2
),
2014 .class_init
= pnv_xive2_class_init
,
2015 .class_size
= sizeof(PnvXive2Class
),
2016 .interfaces
= (InterfaceInfo
[]) {
2017 { TYPE_PNV_XSCOM_INTERFACE
},
2022 static void pnv_xive2_register_types(void)
2024 type_register_static(&pnv_xive2_info
);
2027 type_init(pnv_xive2_register_types
)
2029 static void xive2_nvp_pic_print_info(Xive2Nvp
*nvp
, uint32_t nvp_idx
,
2032 uint8_t eq_blk
= xive_get_field32(NVP2_W5_VP_END_BLOCK
, nvp
->w5
);
2033 uint32_t eq_idx
= xive_get_field32(NVP2_W5_VP_END_INDEX
, nvp
->w5
);
2035 if (!xive2_nvp_is_valid(nvp
)) {
2039 monitor_printf(mon
, " %08x end:%02x/%04x IPB:%02x",
2040 nvp_idx
, eq_blk
, eq_idx
,
2041 xive_get_field32(NVP2_W2_IPB
, nvp
->w2
));
2043 * When the NVP is HW controlled, more fields are updated
2045 if (xive2_nvp_is_hw(nvp
)) {
2046 monitor_printf(mon
, " CPPR:%02x",
2047 xive_get_field32(NVP2_W2_CPPR
, nvp
->w2
));
2048 if (xive2_nvp_is_co(nvp
)) {
2049 monitor_printf(mon
, " CO:%04x",
2050 xive_get_field32(NVP2_W1_CO_THRID
, nvp
->w1
));
2053 monitor_printf(mon
, "\n");
2057 * If the table is direct, we can compute the number of PQ entries
2058 * provisioned by FW.
2060 static uint32_t pnv_xive2_nr_esbs(PnvXive2
*xive
)
2062 uint8_t blk
= pnv_xive2_block_id(xive
);
2063 uint64_t vsd
= xive
->vsds
[VST_ESB
][blk
];
2064 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
2066 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
2070 * Compute the number of entries per indirect subpage.
2072 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2
*xive
, uint32_t type
)
2074 uint8_t blk
= pnv_xive2_block_id(xive
);
2075 uint64_t vsd
= xive
->vsds
[type
][blk
];
2076 const XiveVstInfo
*info
= &vst_infos
[type
];
2078 uint32_t page_shift
;
2080 /* For direct tables, fake a valid value */
2081 if (!(VSD_INDIRECT
& vsd
)) {
2085 /* Get the page size of the indirect table. */
2086 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
2087 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
2089 if (!(vsd
& VSD_ADDRESS_MASK
)) {
2091 xive2_error(xive
, "VST: invalid %s entry!?", info
->name
);
2096 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
2098 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
2099 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
2104 return (1ull << page_shift
) / info
->size
;
2107 void pnv_xive2_pic_print_info(PnvXive2
*xive
, Monitor
*mon
)
2109 Xive2Router
*xrtr
= XIVE2_ROUTER(xive
);
2110 uint8_t blk
= pnv_xive2_block_id(xive
);
2111 uint8_t chip_id
= xive
->chip
->chip_id
;
2112 uint32_t srcno0
= XIVE_EAS(blk
, 0);
2113 uint32_t nr_esbs
= pnv_xive2_nr_esbs(xive
);
2118 uint64_t xive_nvp_per_subpage
;
2120 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
2121 srcno0
+ nr_esbs
- 1);
2122 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
2124 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
2125 srcno0
+ nr_esbs
- 1);
2126 for (i
= 0; i
< nr_esbs
; i
++) {
2127 if (xive2_router_get_eas(xrtr
, blk
, i
, &eas
)) {
2130 if (!xive2_eas_is_masked(&eas
)) {
2131 xive2_eas_pic_print_info(&eas
, i
, mon
);
2135 monitor_printf(mon
, "XIVE[%x] #%d END Escalation EAT\n", chip_id
, blk
);
2137 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2138 xive2_end_eas_pic_print_info(&end
, i
++, mon
);
2141 monitor_printf(mon
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
2143 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2144 xive2_end_pic_print_info(&end
, i
++, mon
);
2147 monitor_printf(mon
, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id
, blk
,
2148 0, XIVE2_NVP_COUNT
- 1);
2149 xive_nvp_per_subpage
= pnv_xive2_vst_per_subpage(xive
, VST_NVP
);
2150 for (i
= 0; i
< XIVE2_NVP_COUNT
; i
+= xive_nvp_per_subpage
) {
2151 while (!xive2_router_get_nvp(xrtr
, blk
, i
, &nvp
)) {
2152 xive2_nvp_pic_print_info(&nvp
, i
++, mon
);