2 * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
4 * Copyright (c) 2019-2022, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
31 #include "pnv_xive2_regs.h"
36 * Virtual structures table (VST)
38 #define SBE_PER_BYTE 4
40 typedef struct XiveVstInfo
{
46 static const XiveVstInfo vst_infos
[] = {
48 [VST_EAS
] = { "EAT", sizeof(Xive2Eas
), 16 },
49 [VST_ESB
] = { "ESB", 1, 16 },
50 [VST_END
] = { "ENDT", sizeof(Xive2End
), 16 },
52 [VST_NVP
] = { "NVPT", sizeof(Xive2Nvp
), 16 },
53 [VST_NVG
] = { "NVGT", sizeof(Xive2Nvgc
), 16 },
54 [VST_NVC
] = { "NVCT", sizeof(Xive2Nvgc
), 16 },
56 [VST_IC
] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
57 [VST_SYNC
] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
60 * This table contains the backing store pages for the interrupt
61 * fifos of the VC sub-engine in case of overflow.
71 [VST_ERQ
] = { "ERQ", 1, VC_QUEUE_COUNT
},
74 #define xive2_error(xive, fmt, ...) \
75 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
76 (xive)->chip->chip_id, ## __VA_ARGS__);
79 * QEMU version of the GETFIELD/SETFIELD macros
81 * TODO: It might be better to use the existing extract64() and
82 * deposit64() but this means that all the register definitions will
83 * change and become incompatible with the ones found in skiboot.
85 * Keep it as it is for now until we find a common ground.
87 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
89 return (word
& mask
) >> ctz64(mask
);
92 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
95 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
99 * TODO: Document block id override
101 static uint32_t pnv_xive2_block_id(PnvXive2
*xive
)
103 uint8_t blk
= xive
->chip
->chip_id
;
104 uint64_t cfg_val
= xive
->cq_regs
[CQ_XIVE_CFG
>> 3];
106 if (cfg_val
& CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE
) {
107 blk
= GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, cfg_val
);
114 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
115 * of the chips is good enough.
117 * TODO: Block scope support
119 static PnvXive2
*pnv_xive2_get_remote(uint8_t blk
)
121 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
124 for (i
= 0; i
< pnv
->num_chips
; i
++) {
125 Pnv10Chip
*chip10
= PNV10_CHIP(pnv
->chips
[i
]);
126 PnvXive2
*xive
= &chip10
->xive
;
128 if (pnv_xive2_block_id(xive
) == blk
) {
136 * VST accessors for ESB, EAT, ENDT, NVP
138 * Indirect VST tables are arrays of VSDs pointing to a page (of same
139 * size). Each page is a direct VST table.
142 #define XIVE_VSD_SIZE 8
144 /* Indirect page size can be 4K, 64K, 2M, 16M. */
145 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift
)
147 return page_shift
== 12 || page_shift
== 16 ||
148 page_shift
== 21 || page_shift
== 24;
151 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2
*xive
, uint32_t type
,
152 uint64_t vsd
, uint32_t idx
)
154 const XiveVstInfo
*info
= &vst_infos
[type
];
155 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
156 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
159 idx_max
= vst_tsize
/ info
->size
- 1;
162 xive2_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
163 info
->name
, idx
, idx_max
);
168 return vst_addr
+ idx
* info
->size
;
171 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2
*xive
, uint32_t type
,
172 uint64_t vsd
, uint32_t idx
)
174 const XiveVstInfo
*info
= &vst_infos
[type
];
178 uint32_t vst_per_page
;
180 /* Get the page size of the indirect table. */
181 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
182 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
184 if (!(vsd
& VSD_ADDRESS_MASK
)) {
185 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
189 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
191 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
192 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
197 vst_per_page
= (1ull << page_shift
) / info
->size
;
198 vsd_idx
= idx
/ vst_per_page
;
200 /* Load the VSD we are looking for, if not already done */
202 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
203 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
,
204 MEMTXATTRS_UNSPECIFIED
);
206 if (!(vsd
& VSD_ADDRESS_MASK
)) {
207 xive2_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
212 * Check that the pages have a consistent size across the
215 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
216 xive2_error(xive
, "VST: %s entry %x indirect page size differ !?",
222 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
225 static uint64_t pnv_xive2_vst_addr(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
228 const XiveVstInfo
*info
= &vst_infos
[type
];
231 if (blk
>= info
->max_blocks
) {
232 xive2_error(xive
, "VST: invalid block id %d for VST %s %d !?",
233 blk
, info
->name
, idx
);
237 vsd
= xive
->vsds
[type
][blk
];
239 /* Remote VST access */
240 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
241 xive
= pnv_xive2_get_remote(blk
);
243 return xive
? pnv_xive2_vst_addr(xive
, type
, blk
, idx
) : 0;
246 if (VSD_INDIRECT
& vsd
) {
247 return pnv_xive2_vst_addr_indirect(xive
, type
, vsd
, idx
);
250 return pnv_xive2_vst_addr_direct(xive
, type
, vsd
, idx
);
253 static int pnv_xive2_vst_read(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
254 uint32_t idx
, void *data
)
256 const XiveVstInfo
*info
= &vst_infos
[type
];
257 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
263 cpu_physical_memory_read(addr
, data
, info
->size
);
267 #define XIVE_VST_WORD_ALL -1
269 static int pnv_xive2_vst_write(PnvXive2
*xive
, uint32_t type
, uint8_t blk
,
270 uint32_t idx
, void *data
, uint32_t word_number
)
272 const XiveVstInfo
*info
= &vst_infos
[type
];
273 uint64_t addr
= pnv_xive2_vst_addr(xive
, type
, blk
, idx
);
279 if (word_number
== XIVE_VST_WORD_ALL
) {
280 cpu_physical_memory_write(addr
, data
, info
->size
);
282 cpu_physical_memory_write(addr
+ word_number
* 4,
283 data
+ word_number
* 4, 4);
288 static int pnv_xive2_get_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
291 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
);
294 static int pnv_xive2_write_end(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
295 Xive2End
*end
, uint8_t word_number
)
297 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_END
, blk
, idx
, end
,
301 static int pnv_xive2_end_update(PnvXive2
*xive
)
303 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
304 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
305 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
306 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
308 uint64_t endc_watch
[4];
310 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
312 cpu_to_be64(xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
]);
315 return pnv_xive2_vst_write(xive
, VST_END
, blk
, idx
, endc_watch
,
319 static void pnv_xive2_end_cache_load(PnvXive2
*xive
)
321 uint8_t blk
= GETFIELD(VC_ENDC_WATCH_BLOCK_ID
,
322 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
323 uint32_t idx
= GETFIELD(VC_ENDC_WATCH_INDEX
,
324 xive
->vc_regs
[(VC_ENDC_WATCH0_SPEC
>> 3)]);
325 uint64_t endc_watch
[4] = { 0 };
328 if (pnv_xive2_vst_read(xive
, VST_END
, blk
, idx
, endc_watch
)) {
329 xive2_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
332 for (i
= 0; i
< ARRAY_SIZE(endc_watch
); i
++) {
333 xive
->vc_regs
[(VC_ENDC_WATCH0_DATA0
>> 3) + i
] =
334 be64_to_cpu(endc_watch
[i
]);
338 static int pnv_xive2_get_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
341 return pnv_xive2_vst_read(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
);
344 static int pnv_xive2_write_nvp(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
345 Xive2Nvp
*nvp
, uint8_t word_number
)
347 return pnv_xive2_vst_write(PNV_XIVE2(xrtr
), VST_NVP
, blk
, idx
, nvp
,
351 static int pnv_xive2_nvp_update(PnvXive2
*xive
)
353 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
354 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
355 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
356 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
358 uint64_t nxc_watch
[4];
360 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
362 cpu_to_be64(xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
]);
365 return pnv_xive2_vst_write(xive
, VST_NVP
, blk
, idx
, nxc_watch
,
369 static void pnv_xive2_nvp_cache_load(PnvXive2
*xive
)
371 uint8_t blk
= GETFIELD(PC_NXC_WATCH_BLOCK_ID
,
372 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
373 uint32_t idx
= GETFIELD(PC_NXC_WATCH_INDEX
,
374 xive
->pc_regs
[(PC_NXC_WATCH0_SPEC
>> 3)]);
375 uint64_t nxc_watch
[4] = { 0 };
378 if (pnv_xive2_vst_read(xive
, VST_NVP
, blk
, idx
, nxc_watch
)) {
379 xive2_error(xive
, "VST: no NVP entry %x/%x !?", blk
, idx
);
382 for (i
= 0; i
< ARRAY_SIZE(nxc_watch
); i
++) {
383 xive
->pc_regs
[(PC_NXC_WATCH0_DATA0
>> 3) + i
] =
384 be64_to_cpu(nxc_watch
[i
]);
388 static int pnv_xive2_get_eas(Xive2Router
*xrtr
, uint8_t blk
, uint32_t idx
,
391 PnvXive2
*xive
= PNV_XIVE2(xrtr
);
393 if (pnv_xive2_block_id(xive
) != blk
) {
394 xive2_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
398 return pnv_xive2_vst_read(xive
, VST_EAS
, blk
, idx
, eas
);
401 static bool pnv_xive2_is_cpu_enabled(PnvXive2
*xive
, PowerPCCPU
*cpu
)
403 int pir
= ppc_cpu_pir(cpu
);
404 uint32_t fc
= PNV10_PIR2FUSEDCORE(pir
);
405 uint64_t reg
= fc
< 8 ? TCTXT_EN0
: TCTXT_EN1
;
406 uint32_t bit
= pir
& 0x3f;
408 return xive
->tctxt_regs
[reg
>> 3] & PPC_BIT(bit
);
411 static int pnv_xive2_match_nvt(XivePresenter
*xptr
, uint8_t format
,
412 uint8_t nvt_blk
, uint32_t nvt_idx
,
413 bool cam_ignore
, uint8_t priority
,
414 uint32_t logic_serv
, XiveTCTXMatch
*match
)
416 PnvXive2
*xive
= PNV_XIVE2(xptr
);
417 PnvChip
*chip
= xive
->chip
;
421 for (i
= 0; i
< chip
->nr_cores
; i
++) {
422 PnvCore
*pc
= chip
->cores
[i
];
423 CPUCore
*cc
= CPU_CORE(pc
);
425 for (j
= 0; j
< cc
->nr_threads
; j
++) {
426 PowerPCCPU
*cpu
= pc
->threads
[j
];
430 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
434 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
436 ring
= xive2_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
441 * Save the context and follow on to catch duplicates,
442 * that we don't support yet.
446 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
447 "thread context NVT %x/%x\n",
462 static uint8_t pnv_xive2_get_block_id(Xive2Router
*xrtr
)
464 return pnv_xive2_block_id(PNV_XIVE2(xrtr
));
468 * The TIMA MMIO space is shared among the chips and to identify the
469 * chip from which the access is being done, we extract the chip id
472 static PnvXive2
*pnv_xive2_tm_get_xive(PowerPCCPU
*cpu
)
474 int pir
= ppc_cpu_pir(cpu
);
475 XivePresenter
*xptr
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
)->xptr
;
476 PnvXive2
*xive
= PNV_XIVE2(xptr
);
478 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
479 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
485 * The internal sources of the interrupt controller have no knowledge
486 * of the XIVE2 chip on which they reside. Encode the block id in the
487 * source interrupt number before forwarding the source event
488 * notification to the Router. This is required on a multichip system.
490 static void pnv_xive2_notify(XiveNotifier
*xn
, uint32_t srcno
)
492 PnvXive2
*xive
= PNV_XIVE2(xn
);
493 uint8_t blk
= pnv_xive2_block_id(xive
);
495 xive2_router_notify(xn
, XIVE_EAS(blk
, srcno
));
499 * Set Translation Tables
501 * TODO add support for multiple sets
503 static int pnv_xive2_stt_set_data(PnvXive2
*xive
, uint64_t val
)
505 uint8_t tsel
= GETFIELD(CQ_TAR_SELECT
, xive
->cq_regs
[CQ_TAR
>> 3]);
506 uint8_t entry
= GETFIELD(CQ_TAR_ENTRY_SELECT
,
507 xive
->cq_regs
[CQ_TAR
>> 3]);
513 xive
->tables
[tsel
][entry
] = val
;
516 xive2_error(xive
, "IC: unsupported table %d", tsel
);
520 if (xive
->cq_regs
[CQ_TAR
>> 3] & CQ_TAR_AUTOINC
) {
521 xive
->cq_regs
[CQ_TAR
>> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT
,
522 xive
->cq_regs
[CQ_TAR
>> 3], ++entry
);
528 * Virtual Structure Tables (VST) configuration
530 static void pnv_xive2_vst_set_exclusive(PnvXive2
*xive
, uint8_t type
,
531 uint8_t blk
, uint64_t vsd
)
533 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
534 XiveSource
*xsrc
= &xive
->ipi_source
;
535 const XiveVstInfo
*info
= &vst_infos
[type
];
536 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
537 uint64_t vst_tsize
= 1ull << page_shift
;
538 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
542 if (VSD_INDIRECT
& vsd
) {
543 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
544 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
550 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
551 xive2_error(xive
, "VST: %s table address 0x%"PRIx64
552 " is not aligned with page shift %d",
553 info
->name
, vst_addr
, page_shift
);
557 /* Record the table configuration (in SRAM on HW) */
558 xive
->vsds
[type
][blk
] = vsd
;
560 /* Now tune the models with the configuration provided by the FW */
565 * Backing store pages for the source PQ bits. The model does
566 * not use these PQ bits backed in RAM because the XiveSource
569 * If the table is direct, we can compute the number of PQ
570 * entries provisioned by FW (such as skiboot) and resize the
571 * ESB window accordingly.
573 if (!(VSD_INDIRECT
& vsd
)) {
574 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
575 * (1ull << xsrc
->esb_shift
));
578 memory_region_add_subregion(&xive
->esb_mmio
, 0, &xsrc
->esb_mmio
);
581 case VST_EAS
: /* Nothing to be done */
586 * Backing store pages for the END.
588 if (!(VSD_INDIRECT
& vsd
)) {
589 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
590 * (1ull << end_xsrc
->esb_shift
));
592 memory_region_add_subregion(&xive
->end_mmio
, 0, &end_xsrc
->esb_mmio
);
595 case VST_NVP
: /* Not modeled */
596 case VST_NVG
: /* Not modeled */
597 case VST_NVC
: /* Not modeled */
598 case VST_IC
: /* Not modeled */
599 case VST_SYNC
: /* Not modeled */
600 case VST_ERQ
: /* Not modeled */
604 g_assert_not_reached();
609 * Both PC and VC sub-engines are configured as each use the Virtual
612 static void pnv_xive2_vst_set_data(PnvXive2
*xive
, uint64_t vsd
)
614 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
615 uint8_t type
= GETFIELD(VC_VSD_TABLE_SELECT
,
616 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
617 uint8_t blk
= GETFIELD(VC_VSD_TABLE_ADDRESS
,
618 xive
->vc_regs
[VC_VSD_TABLE_ADDR
>> 3]);
619 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
621 if (type
> VST_ERQ
) {
622 xive2_error(xive
, "VST: invalid table type %d", type
);
626 if (blk
>= vst_infos
[type
].max_blocks
) {
627 xive2_error(xive
, "VST: invalid block id %d for"
628 " %s table", blk
, vst_infos
[type
].name
);
633 xive2_error(xive
, "VST: invalid %s table address",
634 vst_infos
[type
].name
);
639 case VSD_MODE_FORWARD
:
640 xive
->vsds
[type
][blk
] = vsd
;
643 case VSD_MODE_EXCLUSIVE
:
644 pnv_xive2_vst_set_exclusive(xive
, type
, blk
, vsd
);
648 xive2_error(xive
, "VST: unsupported table mode %d", mode
);
661 * Page 0: Internal CQ register accesses (reads & writes)
662 * Page 1: Internal PC register accesses (reads & writes)
663 * Page 2: Internal VC register accesses (reads & writes)
664 * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
665 * Page 4: Notify Port page (writes only, w/data),
667 * Page 6: Sync Poll page (writes only, dataless)
668 * Page 7: Sync Inject page (writes only, dataless)
669 * Page 8: LSI Trigger page (writes only, dataless)
670 * Page 9: LSI SB Management page (reads & writes dataless)
671 * Pages 10-255: Reserved
672 * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
673 * covering the 128 threads in P10.
674 * Pages 384-511: Reserved
676 typedef struct PnvXive2Region
{
680 const MemoryRegionOps
*ops
;
683 static const MemoryRegionOps pnv_xive2_ic_cq_ops
;
684 static const MemoryRegionOps pnv_xive2_ic_pc_ops
;
685 static const MemoryRegionOps pnv_xive2_ic_vc_ops
;
686 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
;
687 static const MemoryRegionOps pnv_xive2_ic_notify_ops
;
688 static const MemoryRegionOps pnv_xive2_ic_sync_ops
;
689 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
;
690 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
;
692 /* 512 pages. 4K: 2M range, 64K: 32M range */
693 static const PnvXive2Region pnv_xive2_ic_regions
[] = {
694 { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops
},
695 { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops
},
696 { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops
},
697 { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops
},
698 { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops
},
699 /* page 5 reserved */
700 { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops
},
701 { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops
},
702 /* pages 10-255 reserved */
703 { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops
},
704 /* pages 384-511 reserved */
711 static uint64_t pnv_xive2_ic_cq_read(void *opaque
, hwaddr offset
,
714 PnvXive2
*xive
= PNV_XIVE2(opaque
);
715 uint32_t reg
= offset
>> 3;
719 case CQ_XIVE_CAP
: /* Set at reset */
721 val
= xive
->cq_regs
[reg
];
723 case CQ_MSGSND
: /* TODO check the #cores of the machine */
724 val
= 0xffffffff00000000;
727 val
= CQ_CFG_PB_GEN_PB_INIT
; /* TODO: fix CQ_CFG_PB_GEN default value */
730 xive2_error(xive
, "CQ: invalid read @%"HWADDR_PRIx
, offset
);
736 static uint64_t pnv_xive2_bar_size(uint64_t val
)
738 return 1ull << (GETFIELD(CQ_BAR_RANGE
, val
) + 24);
741 static void pnv_xive2_ic_cq_write(void *opaque
, hwaddr offset
,
742 uint64_t val
, unsigned size
)
744 PnvXive2
*xive
= PNV_XIVE2(opaque
);
745 MemoryRegion
*sysmem
= get_system_memory();
746 uint32_t reg
= offset
>> 3;
751 case CQ_RST_CTL
: /* TODO: reset all BARs */
755 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
756 if (!(val
& CQ_IC_BAR_VALID
)) {
758 if (xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
) {
759 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
760 memory_region_del_subregion(&xive
->ic_mmio
,
763 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
766 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
767 if (!(xive
->cq_regs
[reg
] & CQ_IC_BAR_VALID
)) {
768 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
769 memory_region_add_subregion(&xive
->ic_mmio
,
770 pnv_xive2_ic_regions
[i
].pgoff
<< xive
->ic_shift
,
773 memory_region_add_subregion(sysmem
, xive
->ic_base
,
780 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
781 if (!(val
& CQ_TM_BAR_VALID
)) {
783 if (xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
) {
784 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
787 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
788 if (!(xive
->cq_regs
[reg
] & CQ_TM_BAR_VALID
)) {
789 memory_region_add_subregion(sysmem
, xive
->tm_base
,
796 xive
->esb_shift
= val
& CQ_BAR_64K
? 16 : 12;
797 if (!(val
& CQ_BAR_VALID
)) {
799 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
800 memory_region_del_subregion(sysmem
, &xive
->esb_mmio
);
803 xive
->esb_base
= val
& CQ_BAR_ADDR
;
804 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
805 memory_region_set_size(&xive
->esb_mmio
,
806 pnv_xive2_bar_size(val
));
807 memory_region_add_subregion(sysmem
, xive
->esb_base
,
814 xive
->end_shift
= val
& CQ_BAR_64K
? 16 : 12;
815 if (!(val
& CQ_BAR_VALID
)) {
817 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
818 memory_region_del_subregion(sysmem
, &xive
->end_mmio
);
821 xive
->end_base
= val
& CQ_BAR_ADDR
;
822 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
823 memory_region_set_size(&xive
->end_mmio
,
824 pnv_xive2_bar_size(val
));
825 memory_region_add_subregion(sysmem
, xive
->end_base
,
832 xive
->nvc_shift
= val
& CQ_BAR_64K
? 16 : 12;
833 if (!(val
& CQ_BAR_VALID
)) {
835 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
836 memory_region_del_subregion(sysmem
, &xive
->nvc_mmio
);
839 xive
->nvc_base
= val
& CQ_BAR_ADDR
;
840 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
841 memory_region_set_size(&xive
->nvc_mmio
,
842 pnv_xive2_bar_size(val
));
843 memory_region_add_subregion(sysmem
, xive
->nvc_base
,
850 xive
->nvpg_shift
= val
& CQ_BAR_64K
? 16 : 12;
851 if (!(val
& CQ_BAR_VALID
)) {
853 if (xive
->cq_regs
[reg
] & CQ_BAR_VALID
) {
854 memory_region_del_subregion(sysmem
, &xive
->nvpg_mmio
);
857 xive
->nvpg_base
= val
& CQ_BAR_ADDR
;
858 if (!(xive
->cq_regs
[reg
] & CQ_BAR_VALID
)) {
859 memory_region_set_size(&xive
->nvpg_mmio
,
860 pnv_xive2_bar_size(val
));
861 memory_region_add_subregion(sysmem
, xive
->nvpg_base
,
867 case CQ_TAR
: /* Set Translation Table Address */
869 case CQ_TDR
: /* Set Translation Table Data */
870 pnv_xive2_stt_set_data(xive
, val
);
872 case CQ_FIRMASK_OR
: /* FIR error reporting */
875 xive2_error(xive
, "CQ: invalid write 0x%"HWADDR_PRIx
, offset
);
879 xive
->cq_regs
[reg
] = val
;
882 static const MemoryRegionOps pnv_xive2_ic_cq_ops
= {
883 .read
= pnv_xive2_ic_cq_read
,
884 .write
= pnv_xive2_ic_cq_write
,
885 .endianness
= DEVICE_BIG_ENDIAN
,
887 .min_access_size
= 8,
888 .max_access_size
= 8,
891 .min_access_size
= 8,
892 .max_access_size
= 8,
896 static uint64_t pnv_xive2_ic_vc_read(void *opaque
, hwaddr offset
,
899 PnvXive2
*xive
= PNV_XIVE2(opaque
);
901 uint32_t reg
= offset
>> 3;
905 * VSD table settings.
907 case VC_VSD_TABLE_ADDR
:
908 case VC_VSD_TABLE_DATA
:
909 val
= xive
->vc_regs
[reg
];
913 * ESB cache updates (not modeled)
915 case VC_ESBC_FLUSH_CTRL
:
916 xive
->vc_regs
[reg
] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID
;
917 val
= xive
->vc_regs
[reg
];
921 * EAS cache updates (not modeled)
923 case VC_EASC_FLUSH_CTRL
:
924 xive
->vc_regs
[reg
] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID
;
925 val
= xive
->vc_regs
[reg
];
931 case VC_ENDC_WATCH0_SPEC
:
932 xive
->vc_regs
[reg
] &= ~(VC_ENDC_WATCH_FULL
| VC_ENDC_WATCH_CONFLICT
);
933 val
= xive
->vc_regs
[reg
];
936 case VC_ENDC_WATCH0_DATA0
:
938 * Load DATA registers from cache with data requested by the
941 pnv_xive2_end_cache_load(xive
);
942 val
= xive
->vc_regs
[reg
];
945 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
946 val
= xive
->vc_regs
[reg
];
949 case VC_ENDC_FLUSH_CTRL
:
950 xive
->vc_regs
[reg
] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID
;
951 val
= xive
->vc_regs
[reg
];
955 * Indirect invalidation
957 case VC_AT_MACRO_KILL_MASK
:
958 val
= xive
->vc_regs
[reg
];
961 case VC_AT_MACRO_KILL
:
962 xive
->vc_regs
[reg
] &= ~VC_AT_MACRO_KILL_VALID
;
963 val
= xive
->vc_regs
[reg
];
967 * Interrupt fifo overflow in memory backing store (Not modeled)
969 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
970 val
= xive
->vc_regs
[reg
];
976 case VC_ENDC_SYNC_DONE
:
977 val
= VC_ENDC_SYNC_POLL_DONE
;
980 xive2_error(xive
, "VC: invalid read @%"HWADDR_PRIx
, offset
);
986 static void pnv_xive2_ic_vc_write(void *opaque
, hwaddr offset
,
987 uint64_t val
, unsigned size
)
989 PnvXive2
*xive
= PNV_XIVE2(opaque
);
990 uint32_t reg
= offset
>> 3;
994 * VSD table settings.
996 case VC_VSD_TABLE_ADDR
:
998 case VC_VSD_TABLE_DATA
:
999 pnv_xive2_vst_set_data(xive
, val
);
1003 * ESB cache updates (not modeled)
1005 /* case VC_ESBC_FLUSH_CTRL: */
1006 case VC_ESBC_FLUSH_POLL
:
1007 xive
->vc_regs
[VC_ESBC_FLUSH_CTRL
>> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID
;
1012 * EAS cache updates (not modeled)
1014 /* case VC_EASC_FLUSH_CTRL: */
1015 case VC_EASC_FLUSH_POLL
:
1016 xive
->vc_regs
[VC_EASC_FLUSH_CTRL
>> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID
;
1023 case VC_ENDC_WATCH0_SPEC
:
1024 val
&= ~VC_ENDC_WATCH_CONFLICT
; /* HW will set this bit */
1027 case VC_ENDC_WATCH0_DATA1
... VC_ENDC_WATCH0_DATA3
:
1029 case VC_ENDC_WATCH0_DATA0
:
1030 /* writing to DATA0 triggers the cache write */
1031 xive
->vc_regs
[reg
] = val
;
1032 pnv_xive2_end_update(xive
);
1036 /* case VC_ENDC_FLUSH_CTRL: */
1037 case VC_ENDC_FLUSH_POLL
:
1038 xive
->vc_regs
[VC_ENDC_FLUSH_CTRL
>> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID
;
1042 * Indirect invalidation
1044 case VC_AT_MACRO_KILL
:
1045 case VC_AT_MACRO_KILL_MASK
:
1049 * Interrupt fifo overflow in memory backing store (Not modeled)
1051 case VC_QUEUES_CFG_REM0
... VC_QUEUES_CFG_REM6
:
1057 case VC_ENDC_SYNC_DONE
:
1061 xive2_error(xive
, "VC: invalid write @%"HWADDR_PRIx
, offset
);
1065 xive
->vc_regs
[reg
] = val
;
1068 static const MemoryRegionOps pnv_xive2_ic_vc_ops
= {
1069 .read
= pnv_xive2_ic_vc_read
,
1070 .write
= pnv_xive2_ic_vc_write
,
1071 .endianness
= DEVICE_BIG_ENDIAN
,
1073 .min_access_size
= 8,
1074 .max_access_size
= 8,
1077 .min_access_size
= 8,
1078 .max_access_size
= 8,
1082 static uint64_t pnv_xive2_ic_pc_read(void *opaque
, hwaddr offset
,
1085 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1087 uint32_t reg
= offset
>> 3;
1091 * VSD table settings.
1093 case PC_VSD_TABLE_ADDR
:
1094 case PC_VSD_TABLE_DATA
:
1095 val
= xive
->pc_regs
[reg
];
1101 case PC_NXC_WATCH0_SPEC
:
1102 xive
->pc_regs
[reg
] &= ~(PC_NXC_WATCH_FULL
| PC_NXC_WATCH_CONFLICT
);
1103 val
= xive
->pc_regs
[reg
];
1106 case PC_NXC_WATCH0_DATA0
:
1108 * Load DATA registers from cache with data requested by the
1111 pnv_xive2_nvp_cache_load(xive
);
1112 val
= xive
->pc_regs
[reg
];
1115 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1116 val
= xive
->pc_regs
[reg
];
1119 case PC_NXC_FLUSH_CTRL
:
1120 xive
->pc_regs
[reg
] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID
;
1121 val
= xive
->pc_regs
[reg
];
1125 * Indirect invalidation
1128 xive
->pc_regs
[reg
] &= ~PC_AT_KILL_VALID
;
1129 val
= xive
->pc_regs
[reg
];
1133 xive2_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, offset
);
1139 static void pnv_xive2_ic_pc_write(void *opaque
, hwaddr offset
,
1140 uint64_t val
, unsigned size
)
1142 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1143 uint32_t reg
= offset
>> 3;
1148 * VSD table settings. Only taken into account in the VC
1149 * sub-engine because the Xive2Router model combines both VC and PC
1152 case PC_VSD_TABLE_ADDR
:
1153 case PC_VSD_TABLE_DATA
:
1159 case PC_NXC_WATCH0_SPEC
:
1160 val
&= ~PC_NXC_WATCH_CONFLICT
; /* HW will set this bit */
1163 case PC_NXC_WATCH0_DATA1
... PC_NXC_WATCH0_DATA3
:
1165 case PC_NXC_WATCH0_DATA0
:
1166 /* writing to DATA0 triggers the cache write */
1167 xive
->pc_regs
[reg
] = val
;
1168 pnv_xive2_nvp_update(xive
);
1171 /* case PC_NXC_FLUSH_CTRL: */
1172 case PC_NXC_FLUSH_POLL
:
1173 xive
->pc_regs
[PC_NXC_FLUSH_CTRL
>> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID
;
1177 * Indirect invalidation
1180 case PC_AT_KILL_MASK
:
1184 xive2_error(xive
, "PC: invalid write @%"HWADDR_PRIx
, offset
);
1188 xive
->pc_regs
[reg
] = val
;
1191 static const MemoryRegionOps pnv_xive2_ic_pc_ops
= {
1192 .read
= pnv_xive2_ic_pc_read
,
1193 .write
= pnv_xive2_ic_pc_write
,
1194 .endianness
= DEVICE_BIG_ENDIAN
,
1196 .min_access_size
= 8,
1197 .max_access_size
= 8,
1200 .min_access_size
= 8,
1201 .max_access_size
= 8,
1206 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque
, hwaddr offset
,
1209 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1211 uint32_t reg
= offset
>> 3;
1215 * XIVE2 hardware thread enablement
1219 val
= xive
->tctxt_regs
[reg
];
1223 case TCTXT_EN0_RESET
:
1224 val
= xive
->tctxt_regs
[TCTXT_EN0
>> 3];
1227 case TCTXT_EN1_RESET
:
1228 val
= xive
->tctxt_regs
[TCTXT_EN1
>> 3];
1231 xive2_error(xive
, "TCTXT: invalid read @%"HWADDR_PRIx
, offset
);
1237 static void pnv_xive2_ic_tctxt_write(void *opaque
, hwaddr offset
,
1238 uint64_t val
, unsigned size
)
1240 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1241 uint32_t reg
= offset
>> 3;
1245 * XIVE2 hardware thread enablement
1247 case TCTXT_EN0
: /* Physical Thread Enable */
1248 case TCTXT_EN1
: /* Physical Thread Enable (fused core) */
1252 xive
->tctxt_regs
[TCTXT_EN0
>> 3] |= val
;
1255 xive
->tctxt_regs
[TCTXT_EN1
>> 3] |= val
;
1257 case TCTXT_EN0_RESET
:
1258 xive
->tctxt_regs
[TCTXT_EN0
>> 3] &= ~val
;
1260 case TCTXT_EN1_RESET
:
1261 xive
->tctxt_regs
[TCTXT_EN1
>> 3] &= ~val
;
1265 xive2_error(xive
, "TCTXT: invalid write @%"HWADDR_PRIx
, offset
);
1269 xive
->pc_regs
[reg
] = val
;
1272 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops
= {
1273 .read
= pnv_xive2_ic_tctxt_read
,
1274 .write
= pnv_xive2_ic_tctxt_write
,
1275 .endianness
= DEVICE_BIG_ENDIAN
,
1277 .min_access_size
= 8,
1278 .max_access_size
= 8,
1281 .min_access_size
= 8,
1282 .max_access_size
= 8,
1287 * Redirect XSCOM to MMIO handlers
1289 static uint64_t pnv_xive2_xscom_read(void *opaque
, hwaddr offset
,
1292 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1294 uint32_t xscom_reg
= offset
>> 3;
1295 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1297 switch (xscom_reg
) {
1298 case 0x000 ... 0x0FF:
1299 val
= pnv_xive2_ic_cq_read(opaque
, mmio_offset
, size
);
1301 case 0x100 ... 0x1FF:
1302 val
= pnv_xive2_ic_vc_read(opaque
, mmio_offset
, size
);
1304 case 0x200 ... 0x2FF:
1305 val
= pnv_xive2_ic_pc_read(opaque
, mmio_offset
, size
);
1307 case 0x300 ... 0x3FF:
1308 val
= pnv_xive2_ic_tctxt_read(opaque
, mmio_offset
, size
);
1311 xive2_error(xive
, "XSCOM: invalid read @%"HWADDR_PRIx
, offset
);
1317 static void pnv_xive2_xscom_write(void *opaque
, hwaddr offset
,
1318 uint64_t val
, unsigned size
)
1320 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1321 uint32_t xscom_reg
= offset
>> 3;
1322 uint32_t mmio_offset
= (xscom_reg
& 0xFF) << 3;
1324 switch (xscom_reg
) {
1325 case 0x000 ... 0x0FF:
1326 pnv_xive2_ic_cq_write(opaque
, mmio_offset
, val
, size
);
1328 case 0x100 ... 0x1FF:
1329 pnv_xive2_ic_vc_write(opaque
, mmio_offset
, val
, size
);
1331 case 0x200 ... 0x2FF:
1332 pnv_xive2_ic_pc_write(opaque
, mmio_offset
, val
, size
);
1334 case 0x300 ... 0x3FF:
1335 pnv_xive2_ic_tctxt_write(opaque
, mmio_offset
, val
, size
);
1338 xive2_error(xive
, "XSCOM: invalid write @%"HWADDR_PRIx
, offset
);
1342 static const MemoryRegionOps pnv_xive2_xscom_ops
= {
1343 .read
= pnv_xive2_xscom_read
,
1344 .write
= pnv_xive2_xscom_write
,
1345 .endianness
= DEVICE_BIG_ENDIAN
,
1347 .min_access_size
= 8,
1348 .max_access_size
= 8,
1351 .min_access_size
= 8,
1352 .max_access_size
= 8,
1357 * Notify port page. The layout is compatible between 4K and 64K pages :
1359 * Page 1 Notify page (writes only)
1360 * 0x000 - 0x7FF IPI interrupt (NPU)
1361 * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
1364 static void pnv_xive2_ic_hw_trigger(PnvXive2
*xive
, hwaddr addr
,
1370 if (val
& XIVE_TRIGGER_END
) {
1371 xive2_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1377 * Forward the source event notification directly to the Router.
1378 * The source interrupt number should already be correctly encoded
1379 * with the chip block id by the sending device (PHB, PSI).
1381 blk
= XIVE_EAS_BLOCK(val
);
1382 idx
= XIVE_EAS_INDEX(val
);
1384 xive2_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
));
1387 static void pnv_xive2_ic_notify_write(void *opaque
, hwaddr offset
,
1388 uint64_t val
, unsigned size
)
1390 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1392 /* VC: IPI triggers */
1394 case 0x000 ... 0x7FF:
1395 /* TODO: check IPI notify sub-page routing */
1396 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1399 /* VC: HW triggers */
1400 case 0x800 ... 0xFFF:
1401 pnv_xive2_ic_hw_trigger(opaque
, offset
, val
);
1405 xive2_error(xive
, "NOTIFY: invalid write @%"HWADDR_PRIx
, offset
);
1409 static uint64_t pnv_xive2_ic_notify_read(void *opaque
, hwaddr offset
,
1412 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1414 /* loads are invalid */
1415 xive2_error(xive
, "NOTIFY: invalid read @%"HWADDR_PRIx
, offset
);
1419 static const MemoryRegionOps pnv_xive2_ic_notify_ops
= {
1420 .read
= pnv_xive2_ic_notify_read
,
1421 .write
= pnv_xive2_ic_notify_write
,
1422 .endianness
= DEVICE_BIG_ENDIAN
,
1424 .min_access_size
= 8,
1425 .max_access_size
= 8,
1428 .min_access_size
= 8,
1429 .max_access_size
= 8,
1433 static uint64_t pnv_xive2_ic_lsi_read(void *opaque
, hwaddr offset
,
1436 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1438 xive2_error(xive
, "LSI: invalid read @%"HWADDR_PRIx
, offset
);
1442 static void pnv_xive2_ic_lsi_write(void *opaque
, hwaddr offset
,
1443 uint64_t val
, unsigned size
)
1445 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1447 xive2_error(xive
, "LSI: invalid write @%"HWADDR_PRIx
, offset
);
1450 static const MemoryRegionOps pnv_xive2_ic_lsi_ops
= {
1451 .read
= pnv_xive2_ic_lsi_read
,
1452 .write
= pnv_xive2_ic_lsi_write
,
1453 .endianness
= DEVICE_BIG_ENDIAN
,
1455 .min_access_size
= 8,
1456 .max_access_size
= 8,
1459 .min_access_size
= 8,
1460 .max_access_size
= 8,
1465 * Sync MMIO page (write only)
1467 #define PNV_XIVE2_SYNC_IPI 0x000
1468 #define PNV_XIVE2_SYNC_HW 0x080
1469 #define PNV_XIVE2_SYNC_NxC 0x100
1470 #define PNV_XIVE2_SYNC_INT 0x180
1471 #define PNV_XIVE2_SYNC_OS_ESC 0x200
1472 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1473 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1475 static uint64_t pnv_xive2_ic_sync_read(void *opaque
, hwaddr offset
,
1478 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1480 /* loads are invalid */
1481 xive2_error(xive
, "SYNC: invalid read @%"HWADDR_PRIx
, offset
);
1485 static void pnv_xive2_ic_sync_write(void *opaque
, hwaddr offset
,
1486 uint64_t val
, unsigned size
)
1488 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1491 case PNV_XIVE2_SYNC_IPI
:
1492 case PNV_XIVE2_SYNC_HW
:
1493 case PNV_XIVE2_SYNC_NxC
:
1494 case PNV_XIVE2_SYNC_INT
:
1495 case PNV_XIVE2_SYNC_OS_ESC
:
1496 case PNV_XIVE2_SYNC_POOL_ESC
:
1497 case PNV_XIVE2_SYNC_HARD_ESC
:
1500 xive2_error(xive
, "SYNC: invalid write @%"HWADDR_PRIx
, offset
);
1504 static const MemoryRegionOps pnv_xive2_ic_sync_ops
= {
1505 .read
= pnv_xive2_ic_sync_read
,
1506 .write
= pnv_xive2_ic_sync_write
,
1507 .endianness
= DEVICE_BIG_ENDIAN
,
1509 .min_access_size
= 8,
1510 .max_access_size
= 8,
1513 .min_access_size
= 8,
1514 .max_access_size
= 8,
1519 * When the TM direct pages of the IC controller are accessed, the
1520 * target HW thread is deduced from the page offset.
1522 static XiveTCTX
*pnv_xive2_get_indirect_tctx(PnvXive2
*xive
, uint32_t pir
)
1524 PnvChip
*chip
= xive
->chip
;
1525 PowerPCCPU
*cpu
= NULL
;
1527 cpu
= pnv_chip_find_cpu(chip
, pir
);
1529 xive2_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1533 if (!pnv_xive2_is_cpu_enabled(xive
, cpu
)) {
1534 xive2_error(xive
, "IC: CPU %x is not enabled", pir
);
1537 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1540 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque
, hwaddr offset
,
1543 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1544 uint32_t pir
= offset
>> xive
->ic_shift
;
1545 XiveTCTX
*tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1549 val
= xive_tctx_tm_read(NULL
, tctx
, offset
, size
);
1555 static void pnv_xive2_ic_tm_indirect_write(void *opaque
, hwaddr offset
,
1556 uint64_t val
, unsigned size
)
1558 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1559 uint32_t pir
= offset
>> xive
->ic_shift
;
1560 XiveTCTX
*tctx
= pnv_xive2_get_indirect_tctx(xive
, pir
);
1563 xive_tctx_tm_write(NULL
, tctx
, offset
, val
, size
);
1567 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops
= {
1568 .read
= pnv_xive2_ic_tm_indirect_read
,
1569 .write
= pnv_xive2_ic_tm_indirect_write
,
1570 .endianness
= DEVICE_BIG_ENDIAN
,
1572 .min_access_size
= 8,
1573 .max_access_size
= 8,
1576 .min_access_size
= 8,
1577 .max_access_size
= 8,
1585 static void pnv_xive2_tm_write(void *opaque
, hwaddr offset
,
1586 uint64_t value
, unsigned size
)
1588 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1589 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1590 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1592 /* Other TM ops are the same as XIVE1 */
1593 xive_tctx_tm_write(XIVE_PRESENTER(xive
), tctx
, offset
, value
, size
);
1596 static uint64_t pnv_xive2_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
1598 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1599 PnvXive2
*xive
= pnv_xive2_tm_get_xive(cpu
);
1600 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1602 /* Other TM ops are the same as XIVE1 */
1603 return xive_tctx_tm_read(XIVE_PRESENTER(xive
), tctx
, offset
, size
);
1606 static const MemoryRegionOps pnv_xive2_tm_ops
= {
1607 .read
= pnv_xive2_tm_read
,
1608 .write
= pnv_xive2_tm_write
,
1609 .endianness
= DEVICE_BIG_ENDIAN
,
1611 .min_access_size
= 1,
1612 .max_access_size
= 8,
1615 .min_access_size
= 1,
1616 .max_access_size
= 8,
1620 static uint64_t pnv_xive2_nvc_read(void *opaque
, hwaddr offset
,
1623 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1625 xive2_error(xive
, "NVC: invalid read @%"HWADDR_PRIx
, offset
);
1629 static void pnv_xive2_nvc_write(void *opaque
, hwaddr offset
,
1630 uint64_t val
, unsigned size
)
1632 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1634 xive2_error(xive
, "NVC: invalid write @%"HWADDR_PRIx
, offset
);
1637 static const MemoryRegionOps pnv_xive2_nvc_ops
= {
1638 .read
= pnv_xive2_nvc_read
,
1639 .write
= pnv_xive2_nvc_write
,
1640 .endianness
= DEVICE_BIG_ENDIAN
,
1642 .min_access_size
= 8,
1643 .max_access_size
= 8,
1646 .min_access_size
= 8,
1647 .max_access_size
= 8,
1651 static uint64_t pnv_xive2_nvpg_read(void *opaque
, hwaddr offset
,
1654 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1656 xive2_error(xive
, "NVPG: invalid read @%"HWADDR_PRIx
, offset
);
1660 static void pnv_xive2_nvpg_write(void *opaque
, hwaddr offset
,
1661 uint64_t val
, unsigned size
)
1663 PnvXive2
*xive
= PNV_XIVE2(opaque
);
1665 xive2_error(xive
, "NVPG: invalid write @%"HWADDR_PRIx
, offset
);
1668 static const MemoryRegionOps pnv_xive2_nvpg_ops
= {
1669 .read
= pnv_xive2_nvpg_read
,
1670 .write
= pnv_xive2_nvpg_write
,
1671 .endianness
= DEVICE_BIG_ENDIAN
,
1673 .min_access_size
= 8,
1674 .max_access_size
= 8,
1677 .min_access_size
= 8,
1678 .max_access_size
= 8,
1683 * POWER10 default capabilities: 0x2000120076f00000
1685 #define PNV_XIVE2_CAPABILITIES 0x2000120076f00000
1688 * POWER10 default configuration: 0x0030000033000000
1690 * 8bits thread id was dropped for P10
1692 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1694 static void pnv_xive2_reset(void *dev
)
1696 PnvXive2
*xive
= PNV_XIVE2(dev
);
1697 XiveSource
*xsrc
= &xive
->ipi_source
;
1698 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1700 xive
->cq_regs
[CQ_XIVE_CAP
>> 3] = xive
->capabilities
;
1701 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] = xive
->config
;
1703 /* HW hardwires the #Topology of the chip in the block field */
1704 xive
->cq_regs
[CQ_XIVE_CFG
>> 3] |=
1705 SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID
, 0ull, xive
->chip
->chip_id
);
1707 /* Set default page size to 64k */
1708 xive
->ic_shift
= xive
->esb_shift
= xive
->end_shift
= 16;
1709 xive
->nvc_shift
= xive
->nvpg_shift
= xive
->tm_shift
= 16;
1711 /* Clear source MMIOs */
1712 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1713 memory_region_del_subregion(&xive
->esb_mmio
, &xsrc
->esb_mmio
);
1716 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1717 memory_region_del_subregion(&xive
->end_mmio
, &end_xsrc
->esb_mmio
);
1722 * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1725 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1726 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1728 static void pnv_xive2_realize(DeviceState
*dev
, Error
**errp
)
1730 PnvXive2
*xive
= PNV_XIVE2(dev
);
1731 PnvXive2Class
*pxc
= PNV_XIVE2_GET_CLASS(dev
);
1732 XiveSource
*xsrc
= &xive
->ipi_source
;
1733 Xive2EndSource
*end_xsrc
= &xive
->end_source
;
1734 Error
*local_err
= NULL
;
1737 pxc
->parent_realize(dev
, &local_err
);
1739 error_propagate(errp
, local_err
);
1746 * The XiveSource and Xive2EndSource objects are realized with the
1747 * maximum allowed HW configuration. The ESB MMIO regions will be
1748 * resized dynamically when the controller is configured by the FW
1749 * to limit accesses to resources not provisioned.
1751 object_property_set_int(OBJECT(xsrc
), "flags", XIVE_SRC_STORE_EOI
,
1753 object_property_set_int(OBJECT(xsrc
), "nr-irqs", PNV_XIVE2_NR_IRQS
,
1755 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(xive
),
1757 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
1759 error_propagate(errp
, local_err
);
1763 object_property_set_int(OBJECT(end_xsrc
), "nr-ends", PNV_XIVE2_NR_ENDS
,
1765 object_property_set_link(OBJECT(end_xsrc
), "xive", OBJECT(xive
),
1767 qdev_realize(DEVICE(end_xsrc
), NULL
, &local_err
);
1769 error_propagate(errp
, local_err
);
1773 /* XSCOM region, used for initial configuration of the BARs */
1774 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
),
1775 &pnv_xive2_xscom_ops
, xive
, "xscom-xive",
1776 PNV10_XSCOM_XIVE2_SIZE
<< 3);
1778 /* Interrupt controller MMIO regions */
1779 xive
->ic_shift
= 16;
1780 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1781 PNV10_XIVE2_IC_SIZE
);
1783 for (i
= 0; i
< ARRAY_SIZE(xive
->ic_mmios
); i
++) {
1784 memory_region_init_io(&xive
->ic_mmios
[i
], OBJECT(dev
),
1785 pnv_xive2_ic_regions
[i
].ops
, xive
,
1786 pnv_xive2_ic_regions
[i
].name
,
1787 pnv_xive2_ic_regions
[i
].pgsize
<< xive
->ic_shift
);
1793 xive
->esb_shift
= 16;
1794 xive
->end_shift
= 16;
1795 memory_region_init(&xive
->esb_mmio
, OBJECT(xive
), "xive-esb",
1796 PNV10_XIVE2_ESB_SIZE
);
1797 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-end",
1798 PNV10_XIVE2_END_SIZE
);
1800 /* Presenter Controller MMIO region (not modeled) */
1801 xive
->nvc_shift
= 16;
1802 xive
->nvpg_shift
= 16;
1803 memory_region_init_io(&xive
->nvc_mmio
, OBJECT(dev
),
1804 &pnv_xive2_nvc_ops
, xive
,
1805 "xive-nvc", PNV10_XIVE2_NVC_SIZE
);
1807 memory_region_init_io(&xive
->nvpg_mmio
, OBJECT(dev
),
1808 &pnv_xive2_nvpg_ops
, xive
,
1809 "xive-nvpg", PNV10_XIVE2_NVPG_SIZE
);
1811 /* Thread Interrupt Management Area (Direct) */
1812 xive
->tm_shift
= 16;
1813 memory_region_init_io(&xive
->tm_mmio
, OBJECT(dev
), &pnv_xive2_tm_ops
,
1814 xive
, "xive-tima", PNV10_XIVE2_TM_SIZE
);
1816 qemu_register_reset(pnv_xive2_reset
, dev
);
1819 static Property pnv_xive2_properties
[] = {
1820 DEFINE_PROP_UINT64("ic-bar", PnvXive2
, ic_base
, 0),
1821 DEFINE_PROP_UINT64("esb-bar", PnvXive2
, esb_base
, 0),
1822 DEFINE_PROP_UINT64("end-bar", PnvXive2
, end_base
, 0),
1823 DEFINE_PROP_UINT64("nvc-bar", PnvXive2
, nvc_base
, 0),
1824 DEFINE_PROP_UINT64("nvpg-bar", PnvXive2
, nvpg_base
, 0),
1825 DEFINE_PROP_UINT64("tm-bar", PnvXive2
, tm_base
, 0),
1826 DEFINE_PROP_UINT64("capabilities", PnvXive2
, capabilities
,
1827 PNV_XIVE2_CAPABILITIES
),
1828 DEFINE_PROP_UINT64("config", PnvXive2
, config
,
1829 PNV_XIVE2_CONFIGURATION
),
1830 DEFINE_PROP_LINK("chip", PnvXive2
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1831 DEFINE_PROP_END_OF_LIST(),
1834 static void pnv_xive2_instance_init(Object
*obj
)
1836 PnvXive2
*xive
= PNV_XIVE2(obj
);
1838 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1840 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1841 TYPE_XIVE2_END_SOURCE
);
1844 static int pnv_xive2_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1847 const char compat_p10
[] = "ibm,power10-xive-x";
1851 cpu_to_be32(PNV10_XSCOM_XIVE2_BASE
),
1852 cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE
)
1855 name
= g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE
);
1856 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1860 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1861 _FDT(fdt_setprop(fdt
, offset
, "compatible", compat_p10
,
1862 sizeof(compat_p10
)));
1866 static void pnv_xive2_class_init(ObjectClass
*klass
, void *data
)
1868 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1869 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1870 Xive2RouterClass
*xrc
= XIVE2_ROUTER_CLASS(klass
);
1871 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1872 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1873 PnvXive2Class
*pxc
= PNV_XIVE2_CLASS(klass
);
1875 xdc
->dt_xscom
= pnv_xive2_dt_xscom
;
1877 dc
->desc
= "PowerNV XIVE2 Interrupt Controller (POWER10)";
1878 device_class_set_parent_realize(dc
, pnv_xive2_realize
,
1879 &pxc
->parent_realize
);
1880 device_class_set_props(dc
, pnv_xive2_properties
);
1882 xrc
->get_eas
= pnv_xive2_get_eas
;
1883 xrc
->get_end
= pnv_xive2_get_end
;
1884 xrc
->write_end
= pnv_xive2_write_end
;
1885 xrc
->get_nvp
= pnv_xive2_get_nvp
;
1886 xrc
->write_nvp
= pnv_xive2_write_nvp
;
1887 xrc
->get_block_id
= pnv_xive2_get_block_id
;
1889 xnc
->notify
= pnv_xive2_notify
;
1891 xpc
->match_nvt
= pnv_xive2_match_nvt
;
1894 static const TypeInfo pnv_xive2_info
= {
1895 .name
= TYPE_PNV_XIVE2
,
1896 .parent
= TYPE_XIVE2_ROUTER
,
1897 .instance_init
= pnv_xive2_instance_init
,
1898 .instance_size
= sizeof(PnvXive2
),
1899 .class_init
= pnv_xive2_class_init
,
1900 .class_size
= sizeof(PnvXive2Class
),
1901 .interfaces
= (InterfaceInfo
[]) {
1902 { TYPE_PNV_XSCOM_INTERFACE
},
1907 static void pnv_xive2_register_types(void)
1909 type_register_static(&pnv_xive2_info
);
1912 type_init(pnv_xive2_register_types
)
1914 static void xive2_nvp_pic_print_info(Xive2Nvp
*nvp
, uint32_t nvp_idx
,
1917 uint8_t eq_blk
= xive_get_field32(NVP2_W5_VP_END_BLOCK
, nvp
->w5
);
1918 uint32_t eq_idx
= xive_get_field32(NVP2_W5_VP_END_INDEX
, nvp
->w5
);
1920 if (!xive2_nvp_is_valid(nvp
)) {
1924 monitor_printf(mon
, " %08x end:%02x/%04x IPB:%02x\n",
1925 nvp_idx
, eq_blk
, eq_idx
,
1926 xive_get_field32(NVP2_W2_IPB
, nvp
->w2
));
1930 * If the table is direct, we can compute the number of PQ entries
1931 * provisioned by FW.
1933 static uint32_t pnv_xive2_nr_esbs(PnvXive2
*xive
)
1935 uint8_t blk
= pnv_xive2_block_id(xive
);
1936 uint64_t vsd
= xive
->vsds
[VST_ESB
][blk
];
1937 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
1939 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
1943 * Compute the number of entries per indirect subpage.
1945 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2
*xive
, uint32_t type
)
1947 uint8_t blk
= pnv_xive2_block_id(xive
);
1948 uint64_t vsd
= xive
->vsds
[type
][blk
];
1949 const XiveVstInfo
*info
= &vst_infos
[type
];
1951 uint32_t page_shift
;
1953 /* For direct tables, fake a valid value */
1954 if (!(VSD_INDIRECT
& vsd
)) {
1958 /* Get the page size of the indirect table. */
1959 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
1960 ldq_be_dma(&address_space_memory
, vsd_addr
, &vsd
, MEMTXATTRS_UNSPECIFIED
);
1962 if (!(vsd
& VSD_ADDRESS_MASK
)) {
1964 xive2_error(xive
, "VST: invalid %s entry!?", info
->name
);
1969 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
1971 if (!pnv_xive2_vst_page_size_allowed(page_shift
)) {
1972 xive2_error(xive
, "VST: invalid %s page shift %d", info
->name
,
1977 return (1ull << page_shift
) / info
->size
;
1980 void pnv_xive2_pic_print_info(PnvXive2
*xive
, Monitor
*mon
)
1982 Xive2Router
*xrtr
= XIVE2_ROUTER(xive
);
1983 uint8_t blk
= pnv_xive2_block_id(xive
);
1984 uint8_t chip_id
= xive
->chip
->chip_id
;
1985 uint32_t srcno0
= XIVE_EAS(blk
, 0);
1986 uint32_t nr_esbs
= pnv_xive2_nr_esbs(xive
);
1991 uint64_t xive_nvp_per_subpage
;
1993 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
1994 srcno0
+ nr_esbs
- 1);
1995 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
1997 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
1998 srcno0
+ nr_esbs
- 1);
1999 for (i
= 0; i
< nr_esbs
; i
++) {
2000 if (xive2_router_get_eas(xrtr
, blk
, i
, &eas
)) {
2003 if (!xive2_eas_is_masked(&eas
)) {
2004 xive2_eas_pic_print_info(&eas
, i
, mon
);
2008 monitor_printf(mon
, "XIVE[%x] #%d END Escalation EAT\n", chip_id
, blk
);
2010 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2011 xive2_end_eas_pic_print_info(&end
, i
++, mon
);
2014 monitor_printf(mon
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
2016 while (!xive2_router_get_end(xrtr
, blk
, i
, &end
)) {
2017 xive2_end_pic_print_info(&end
, i
++, mon
);
2020 monitor_printf(mon
, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id
, blk
,
2021 0, XIVE2_NVP_COUNT
- 1);
2022 xive_nvp_per_subpage
= pnv_xive2_vst_per_subpage(xive
, VST_NVP
);
2023 for (i
= 0; i
< XIVE2_NVP_COUNT
; i
+= xive_nvp_per_subpage
) {
2024 while (!xive2_router_get_nvp(xrtr
, blk
, i
, &nvp
)) {
2025 xive2_nvp_pic_print_info(&nvp
, i
++, mon
);