pnv/xive2: Check TIMA special ops against a dedicated array for P10
[qemu/kevin.git] / hw / intc / pnv_xive.c
blobe536b3ec26e5db5d3b5a1bb99df3c269c4bc2400
1 /*
2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_chip.h"
22 #include "hw/ppc/pnv_core.h"
23 #include "hw/ppc/pnv_xscom.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/ppc/ppc.h"
28 #include "trace.h"
30 #include <libfdt.h>
32 #include "pnv_xive_regs.h"
34 #undef XIVE_DEBUG
37 * Virtual structures table (VST)
39 #define SBE_PER_BYTE 4
41 typedef struct XiveVstInfo {
42 const char *name;
43 uint32_t size;
44 uint32_t max_blocks;
45 } XiveVstInfo;
47 static const XiveVstInfo vst_infos[] = {
48 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
49 [VST_TSEL_SBE] = { "SBE", 1, 16 },
50 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
51 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
54 * Interrupt fifo backing store table (not modeled) :
56 * 0 - IPI,
57 * 1 - HWD,
58 * 2 - First escalate,
59 * 3 - Second escalate,
60 * 4 - Redistribution,
61 * 5 - IPI cascaded queue ?
63 [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
66 #define xive_error(xive, fmt, ...) \
67 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
68 (xive)->chip->chip_id, ## __VA_ARGS__);
71 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
72 * field overrides the hardwired chip ID in the Powerbus operations
73 * and for CAM compares
75 static uint8_t pnv_xive_block_id(PnvXive *xive)
77 uint8_t blk = xive->chip->chip_id;
78 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
80 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
81 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
84 return blk;
88 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
89 * of the chips is good enough.
91 * TODO: Block scope support
93 static PnvXive *pnv_xive_get_remote(uint8_t blk)
95 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
96 int i;
98 for (i = 0; i < pnv->num_chips; i++) {
99 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
100 PnvXive *xive = &chip9->xive;
102 if (pnv_xive_block_id(xive) == blk) {
103 return xive;
106 return NULL;
110 * VST accessors for SBE, EAT, ENDT, NVT
112 * Indirect VST tables are arrays of VSDs pointing to a page (of same
113 * size). Each page is a direct VST table.
116 #define XIVE_VSD_SIZE 8
118 /* Indirect page size can be 4K, 64K, 2M, 16M. */
119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 return page_shift == 12 || page_shift == 16 ||
122 page_shift == 21 || page_shift == 24;
125 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
126 uint64_t vsd, uint32_t idx)
128 const XiveVstInfo *info = &vst_infos[type];
129 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
130 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
131 uint32_t idx_max;
133 idx_max = vst_tsize / info->size - 1;
134 if (idx > idx_max) {
135 #ifdef XIVE_DEBUG
136 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
137 info->name, idx, idx_max);
138 #endif
139 return 0;
142 return vst_addr + idx * info->size;
145 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
146 uint64_t vsd, uint32_t idx)
148 const XiveVstInfo *info = &vst_infos[type];
149 uint64_t vsd_addr;
150 uint32_t vsd_idx;
151 uint32_t page_shift;
152 uint32_t vst_per_page;
154 /* Get the page size of the indirect table. */
155 vsd_addr = vsd & VSD_ADDRESS_MASK;
156 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
157 MEMTXATTRS_UNSPECIFIED)) {
158 xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
159 info->name, idx, vsd_addr);
160 return 0;
163 if (!(vsd & VSD_ADDRESS_MASK)) {
164 #ifdef XIVE_DEBUG
165 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
166 #endif
167 return 0;
170 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
172 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
173 xive_error(xive, "VST: invalid %s page shift %d", info->name,
174 page_shift);
175 return 0;
178 vst_per_page = (1ull << page_shift) / info->size;
179 vsd_idx = idx / vst_per_page;
181 /* Load the VSD we are looking for, if not already done */
182 if (vsd_idx) {
183 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
184 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
185 MEMTXATTRS_UNSPECIFIED)) {
186 xive_error(xive, "VST: failed to access %s entry %x @0x%"
187 PRIx64, info->name, vsd_idx, vsd_addr);
188 return 0;
191 if (!(vsd & VSD_ADDRESS_MASK)) {
192 #ifdef XIVE_DEBUG
193 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
194 #endif
195 return 0;
199 * Check that the pages have a consistent size across the
200 * indirect table
202 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
203 xive_error(xive, "VST: %s entry %x indirect page size differ !?",
204 info->name, idx);
205 return 0;
209 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
213 uint32_t idx)
215 const XiveVstInfo *info = &vst_infos[type];
216 uint64_t vsd;
218 if (blk >= info->max_blocks) {
219 xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
220 blk, info->name, idx);
221 return 0;
224 vsd = xive->vsds[type][blk];
226 /* Remote VST access */
227 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
228 xive = pnv_xive_get_remote(blk);
230 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
233 if (VSD_INDIRECT & vsd) {
234 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
237 return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
240 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
241 uint32_t idx, void *data)
243 const XiveVstInfo *info = &vst_infos[type];
244 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
246 if (!addr) {
247 return -1;
250 cpu_physical_memory_read(addr, data, info->size);
251 return 0;
254 #define XIVE_VST_WORD_ALL -1
256 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
257 uint32_t idx, void *data, uint32_t word_number)
259 const XiveVstInfo *info = &vst_infos[type];
260 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
262 if (!addr) {
263 return -1;
266 if (word_number == XIVE_VST_WORD_ALL) {
267 cpu_physical_memory_write(addr, data, info->size);
268 } else {
269 cpu_physical_memory_write(addr + word_number * 4,
270 data + word_number * 4, 4);
272 return 0;
275 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
276 XiveEND *end)
278 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
281 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
282 XiveEND *end, uint8_t word_number)
284 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
285 word_number);
288 static int pnv_xive_end_update(PnvXive *xive)
290 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
291 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
292 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
293 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
294 int i;
295 uint64_t eqc_watch[4];
297 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
298 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
301 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
302 XIVE_VST_WORD_ALL);
305 static void pnv_xive_end_cache_load(PnvXive *xive)
307 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
308 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
309 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
310 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
311 uint64_t eqc_watch[4] = { 0 };
312 int i;
314 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
315 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
318 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
319 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
323 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
324 XiveNVT *nvt)
326 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
329 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
330 XiveNVT *nvt, uint8_t word_number)
332 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
333 word_number);
336 static int pnv_xive_nvt_update(PnvXive *xive)
338 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
339 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
340 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
341 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
342 int i;
343 uint64_t vpc_watch[8];
345 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
346 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
349 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
350 XIVE_VST_WORD_ALL);
353 static void pnv_xive_nvt_cache_load(PnvXive *xive)
355 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
356 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
357 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
358 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
359 uint64_t vpc_watch[8] = { 0 };
360 int i;
362 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
363 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
366 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
367 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
371 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
372 XiveEAS *eas)
374 PnvXive *xive = PNV_XIVE(xrtr);
377 * EAT lookups should be local to the IC
379 if (pnv_xive_block_id(xive) != blk) {
380 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
381 return -1;
384 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
387 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
388 uint8_t *pq)
390 PnvXive *xive = PNV_XIVE(xrtr);
392 if (pnv_xive_block_id(xive) != blk) {
393 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
394 return -1;
397 *pq = xive_source_esb_get(&xive->ipi_source, idx);
398 return 0;
401 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
402 uint8_t *pq)
404 PnvXive *xive = PNV_XIVE(xrtr);
406 if (pnv_xive_block_id(xive) != blk) {
407 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
408 return -1;
411 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
412 return 0;
416 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
417 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
418 * second register covers cores 16-23 (normal) or 8-11 (fused).
420 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
422 int pir = ppc_cpu_pir(cpu);
423 uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
424 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
425 uint32_t bit = pir & 0x3f;
427 return xive->regs[reg >> 3] & PPC_BIT(bit);
430 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
431 uint8_t nvt_blk, uint32_t nvt_idx,
432 bool cam_ignore, uint8_t priority,
433 uint32_t logic_serv, XiveTCTXMatch *match)
435 PnvXive *xive = PNV_XIVE(xptr);
436 PnvChip *chip = xive->chip;
437 int count = 0;
438 int i, j;
440 for (i = 0; i < chip->nr_cores; i++) {
441 PnvCore *pc = chip->cores[i];
442 CPUCore *cc = CPU_CORE(pc);
444 for (j = 0; j < cc->nr_threads; j++) {
445 PowerPCCPU *cpu = pc->threads[j];
446 XiveTCTX *tctx;
447 int ring;
449 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
450 continue;
453 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
456 * Check the thread context CAM lines and record matches.
458 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
459 nvt_idx, cam_ignore, logic_serv);
461 * Save the context and follow on to catch duplicates, that we
462 * don't support yet.
464 if (ring != -1) {
465 if (match->tctx) {
466 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
467 "thread context NVT %x/%x\n",
468 nvt_blk, nvt_idx);
469 return -1;
472 match->ring = ring;
473 match->tctx = tctx;
474 count++;
479 return count;
482 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
484 uint32_t cfg = 0;
486 /* TIMA GEN1 is all P9 knows */
487 cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
489 return cfg;
492 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
494 return pnv_xive_block_id(PNV_XIVE(xrtr));
498 * The TIMA MMIO space is shared among the chips and to identify the
499 * chip from which the access is being done, we extract the chip id
500 * from the PIR.
502 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
504 int pir = ppc_cpu_pir(cpu);
505 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
506 PnvXive *xive = PNV_XIVE(xptr);
508 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
509 xive_error(xive, "IC: CPU %x is not enabled", pir);
511 return xive;
515 * The internal sources (IPIs) of the interrupt controller have no
516 * knowledge of the XIVE chip on which they reside. Encode the block
517 * id in the source interrupt number before forwarding the source
518 * event notification to the Router. This is required on a multichip
519 * system.
521 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
523 PnvXive *xive = PNV_XIVE(xn);
524 uint8_t blk = pnv_xive_block_id(xive);
526 xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
530 * XIVE helpers
533 static uint64_t pnv_xive_vc_size(PnvXive *xive)
535 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
538 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
540 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
543 static uint64_t pnv_xive_pc_size(PnvXive *xive)
545 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
548 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
550 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
551 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
553 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
557 * Compute the number of entries per indirect subpage.
559 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
561 uint8_t blk = pnv_xive_block_id(xive);
562 uint64_t vsd = xive->vsds[type][blk];
563 const XiveVstInfo *info = &vst_infos[type];
564 uint64_t vsd_addr;
565 uint32_t page_shift;
567 /* For direct tables, fake a valid value */
568 if (!(VSD_INDIRECT & vsd)) {
569 return 1;
572 /* Get the page size of the indirect table. */
573 vsd_addr = vsd & VSD_ADDRESS_MASK;
574 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
575 MEMTXATTRS_UNSPECIFIED)) {
576 xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
577 info->name, vsd_addr);
578 return 0;
581 if (!(vsd & VSD_ADDRESS_MASK)) {
582 #ifdef XIVE_DEBUG
583 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
584 #endif
585 return 0;
588 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
590 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
591 xive_error(xive, "VST: invalid %s page shift %d", info->name,
592 page_shift);
593 return 0;
596 return (1ull << page_shift) / info->size;
600 * EDT Table
602 * The Virtualization Controller MMIO region containing the IPI ESB
603 * pages and END ESB pages is sub-divided into "sets" which map
604 * portions of the VC region to the different ESB pages. It is
605 * configured at runtime through the EDT "Domain Table" to let the
606 * firmware decide how to split the VC address space between IPI ESB
607 * pages and END ESB pages.
611 * Computes the overall size of the IPI or the END ESB pages
613 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
615 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
616 uint64_t size = 0;
617 int i;
619 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
620 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
622 if (edt_type == type) {
623 size += edt_size;
627 return size;
631 * Maps an offset of the VC region in the IPI or END region using the
632 * layout defined by the EDT "Domaine Table"
634 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
635 uint64_t type)
637 int i;
638 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
639 uint64_t edt_offset = vc_offset;
641 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
642 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
644 if (edt_type != type) {
645 edt_offset -= edt_size;
649 return edt_offset;
652 static void pnv_xive_edt_resize(PnvXive *xive)
654 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
655 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
657 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
658 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
660 memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
661 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
665 * XIVE Table configuration. Only EDT is supported.
667 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
669 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
670 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
671 uint64_t *xive_table;
672 uint8_t max_index;
674 switch (tsel) {
675 case CQ_TAR_TSEL_BLK:
676 max_index = ARRAY_SIZE(xive->blk);
677 xive_table = xive->blk;
678 break;
679 case CQ_TAR_TSEL_MIG:
680 max_index = ARRAY_SIZE(xive->mig);
681 xive_table = xive->mig;
682 break;
683 case CQ_TAR_TSEL_EDT:
684 max_index = ARRAY_SIZE(xive->edt);
685 xive_table = xive->edt;
686 break;
687 case CQ_TAR_TSEL_VDT:
688 max_index = ARRAY_SIZE(xive->vdt);
689 xive_table = xive->vdt;
690 break;
691 default:
692 xive_error(xive, "IC: invalid table %d", (int) tsel);
693 return -1;
696 if (tsel_index >= max_index) {
697 xive_error(xive, "IC: invalid index %d", (int) tsel_index);
698 return -1;
701 xive_table[tsel_index] = val;
703 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
704 xive->regs[CQ_TAR >> 3] =
705 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
709 * EDT configuration is complete. Resize the MMIO windows exposing
710 * the IPI and the END ESBs in the VC region.
712 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
713 pnv_xive_edt_resize(xive);
716 return 0;
720 * Virtual Structure Tables (VST) configuration
722 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
723 uint8_t blk, uint64_t vsd)
725 XiveENDSource *end_xsrc = &xive->end_source;
726 XiveSource *xsrc = &xive->ipi_source;
727 const XiveVstInfo *info = &vst_infos[type];
728 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
729 uint64_t vst_tsize = 1ull << page_shift;
730 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
732 /* Basic checks */
734 if (VSD_INDIRECT & vsd) {
735 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
736 xive_error(xive, "VST: %s indirect tables are not enabled",
737 info->name);
738 return;
741 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
742 xive_error(xive, "VST: invalid %s page shift %d", info->name,
743 page_shift);
744 return;
748 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
749 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
750 " page shift %d", info->name, vst_addr, page_shift);
751 return;
754 /* Record the table configuration (in SRAM on HW) */
755 xive->vsds[type][blk] = vsd;
757 /* Now tune the models with the configuration provided by the FW */
759 switch (type) {
760 case VST_TSEL_IVT: /* Nothing to be done */
761 break;
763 case VST_TSEL_EQDT:
765 * Backing store pages for the END.
767 * If the table is direct, we can compute the number of PQ
768 * entries provisioned by FW (such as skiboot) and resize the
769 * END ESB window accordingly.
771 if (!(VSD_INDIRECT & vsd)) {
772 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
773 * (1ull << xsrc->esb_shift));
775 memory_region_add_subregion(&xive->end_edt_mmio, 0,
776 &end_xsrc->esb_mmio);
777 break;
779 case VST_TSEL_SBE:
781 * Backing store pages for the source PQ bits. The model does
782 * not use these PQ bits backed in RAM because the XiveSource
783 * model has its own.
785 * If the table is direct, we can compute the number of PQ
786 * entries provisioned by FW (such as skiboot) and resize the
787 * ESB window accordingly.
789 if (!(VSD_INDIRECT & vsd)) {
790 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
791 * (1ull << xsrc->esb_shift));
793 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
794 break;
796 case VST_TSEL_VPDT: /* Not modeled */
797 case VST_TSEL_IRQ: /* Not modeled */
799 * These tables contains the backing store pages for the
800 * interrupt fifos of the VC sub-engine in case of overflow.
802 break;
804 default:
805 g_assert_not_reached();
810 * Both PC and VC sub-engines are configured as each use the Virtual
811 * Structure Tables : SBE, EAS, END and NVT.
813 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
815 uint8_t mode = GETFIELD(VSD_MODE, vsd);
816 uint8_t type = GETFIELD(VST_TABLE_SELECT,
817 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
818 uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
819 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
820 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
822 if (type > VST_TSEL_IRQ) {
823 xive_error(xive, "VST: invalid table type %d", type);
824 return;
827 if (blk >= vst_infos[type].max_blocks) {
828 xive_error(xive, "VST: invalid block id %d for"
829 " %s table", blk, vst_infos[type].name);
830 return;
834 * Only take the VC sub-engine configuration into account because
835 * the XiveRouter model combines both VC and PC sub-engines
837 if (pc_engine) {
838 return;
841 if (!vst_addr) {
842 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
843 return;
846 switch (mode) {
847 case VSD_MODE_FORWARD:
848 xive->vsds[type][blk] = vsd;
849 break;
851 case VSD_MODE_EXCLUSIVE:
852 pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
853 break;
855 default:
856 xive_error(xive, "VST: unsupported table mode %d", mode);
857 return;
862 * Interrupt controller MMIO region. The layout is compatible between
863 * 4K and 64K pages :
865 * Page 0 sub-engine BARs
866 * 0x000 - 0x3FF IC registers
867 * 0x400 - 0x7FF PC registers
868 * 0x800 - 0xFFF VC registers
870 * Page 1 Notify page (writes only)
871 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
872 * 0x800 - 0xFFF forwards and syncs
874 * Page 2 LSI Trigger page (writes only) (not modeled)
875 * Page 3 LSI SB EOI page (reads only) (not modeled)
877 * Page 4-7 indirect TIMA
881 * IC - registers MMIO
883 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
884 uint64_t val, unsigned size)
886 PnvXive *xive = PNV_XIVE(opaque);
887 MemoryRegion *sysmem = get_system_memory();
888 uint32_t reg = offset >> 3;
889 bool is_chip0 = xive->chip->chip_id == 0;
891 switch (offset) {
894 * XIVE CQ (PowerBus bridge) settings
896 case CQ_MSGSND: /* msgsnd for doorbells */
897 case CQ_FIRMASK_OR: /* FIR error reporting */
898 break;
899 case CQ_PBI_CTL:
900 if (val & CQ_PBI_PC_64K) {
901 xive->pc_shift = 16;
903 if (val & CQ_PBI_VC_64K) {
904 xive->vc_shift = 16;
906 break;
907 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
909 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
911 break;
914 * XIVE Virtualization Controller settings
916 case VC_GLOBAL_CONFIG:
917 break;
920 * XIVE Presenter Controller settings
922 case PC_GLOBAL_CONFIG:
924 * PC_GCONF_CHIPID_OVR
925 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
927 break;
928 case PC_TCTXT_CFG:
930 * TODO: block group support
932 break;
933 case PC_TCTXT_TRACK:
935 * PC_TCTXT_TRACK_EN:
936 * enable block tracking and exchange of block ownership
937 * information between Interrupt controllers
939 break;
942 * Misc settings
944 case VC_SBC_CONFIG: /* Store EOI configuration */
946 * Configure store EOI if required by firwmare (skiboot has removed
947 * support recently though)
949 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
950 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
952 break;
954 case VC_EQC_CONFIG: /* TODO: silent escalation */
955 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
956 break;
959 * XIVE BAR settings (XSCOM only)
961 case CQ_RST_CTL:
962 /* bit4: resets all BAR registers */
963 break;
965 case CQ_IC_BAR: /* IC BAR. 8 pages */
966 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
967 if (!(val & CQ_IC_BAR_VALID)) {
968 xive->ic_base = 0;
969 if (xive->regs[reg] & CQ_IC_BAR_VALID) {
970 memory_region_del_subregion(&xive->ic_mmio,
971 &xive->ic_reg_mmio);
972 memory_region_del_subregion(&xive->ic_mmio,
973 &xive->ic_notify_mmio);
974 memory_region_del_subregion(&xive->ic_mmio,
975 &xive->ic_lsi_mmio);
976 memory_region_del_subregion(&xive->ic_mmio,
977 &xive->tm_indirect_mmio);
979 memory_region_del_subregion(sysmem, &xive->ic_mmio);
981 } else {
982 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
983 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
984 memory_region_add_subregion(sysmem, xive->ic_base,
985 &xive->ic_mmio);
987 memory_region_add_subregion(&xive->ic_mmio, 0,
988 &xive->ic_reg_mmio);
989 memory_region_add_subregion(&xive->ic_mmio,
990 1ul << xive->ic_shift,
991 &xive->ic_notify_mmio);
992 memory_region_add_subregion(&xive->ic_mmio,
993 2ul << xive->ic_shift,
994 &xive->ic_lsi_mmio);
995 memory_region_add_subregion(&xive->ic_mmio,
996 4ull << xive->ic_shift,
997 &xive->tm_indirect_mmio);
1000 break;
1002 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1003 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1004 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1005 if (!(val & CQ_TM_BAR_VALID)) {
1006 xive->tm_base = 0;
1007 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1008 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1010 } else {
1011 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1012 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1013 memory_region_add_subregion(sysmem, xive->tm_base,
1014 &xive->tm_mmio);
1017 break;
1019 case CQ_PC_BARM:
1020 xive->regs[reg] = val;
1021 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1022 break;
1023 case CQ_PC_BAR: /* From 32M to 512G */
1024 if (!(val & CQ_PC_BAR_VALID)) {
1025 xive->pc_base = 0;
1026 if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1027 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1029 } else {
1030 xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1031 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1032 memory_region_add_subregion(sysmem, xive->pc_base,
1033 &xive->pc_mmio);
1036 break;
1038 case CQ_VC_BARM:
1039 xive->regs[reg] = val;
1040 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1041 break;
1042 case CQ_VC_BAR: /* From 64M to 4TB */
1043 if (!(val & CQ_VC_BAR_VALID)) {
1044 xive->vc_base = 0;
1045 if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1046 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1048 } else {
1049 xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1050 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1051 memory_region_add_subregion(sysmem, xive->vc_base,
1052 &xive->vc_mmio);
1055 break;
1058 * XIVE Table settings.
1060 case CQ_TAR: /* Table Address */
1061 break;
1062 case CQ_TDR: /* Table Data */
1063 pnv_xive_table_set_data(xive, val);
1064 break;
1067 * XIVE VC & PC Virtual Structure Table settings
1069 case VC_VSD_TABLE_ADDR:
1070 case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1071 break;
1072 case VC_VSD_TABLE_DATA: /* Virtual table setting */
1073 case PC_VSD_TABLE_DATA:
1074 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1075 break;
1078 * Interrupt fifo overflow in memory backing store (Not modeled)
1080 case VC_IRQ_CONFIG_IPI:
1081 case VC_IRQ_CONFIG_HW:
1082 case VC_IRQ_CONFIG_CASCADE1:
1083 case VC_IRQ_CONFIG_CASCADE2:
1084 case VC_IRQ_CONFIG_REDIST:
1085 case VC_IRQ_CONFIG_IPI_CASC:
1086 break;
1089 * XIVE hardware thread enablement
1091 case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1092 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1093 break;
1095 case PC_THREAD_EN_REG0_SET:
1096 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1097 break;
1098 case PC_THREAD_EN_REG1_SET:
1099 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1100 break;
1101 case PC_THREAD_EN_REG0_CLR:
1102 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1103 break;
1104 case PC_THREAD_EN_REG1_CLR:
1105 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1106 break;
1109 * Indirect TIMA access set up. Defines the PIR of the HW thread
1110 * to use.
1112 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1113 break;
1116 * XIVE PC & VC cache updates for EAS, NVT and END
1118 case VC_IVC_SCRUB_MASK:
1119 case VC_IVC_SCRUB_TRIG:
1120 break;
1122 case VC_EQC_CWATCH_SPEC:
1123 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1124 break;
1125 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1126 break;
1127 case VC_EQC_CWATCH_DAT0:
1128 /* writing to DATA0 triggers the cache write */
1129 xive->regs[reg] = val;
1130 pnv_xive_end_update(xive);
1131 break;
1132 case VC_EQC_SCRUB_MASK:
1133 case VC_EQC_SCRUB_TRIG:
1135 * The scrubbing registers flush the cache in RAM and can also
1136 * invalidate.
1138 break;
1140 case PC_VPC_CWATCH_SPEC:
1141 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1142 break;
1143 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1144 break;
1145 case PC_VPC_CWATCH_DAT0:
1146 /* writing to DATA0 triggers the cache write */
1147 xive->regs[reg] = val;
1148 pnv_xive_nvt_update(xive);
1149 break;
1150 case PC_VPC_SCRUB_MASK:
1151 case PC_VPC_SCRUB_TRIG:
1153 * The scrubbing registers flush the cache in RAM and can also
1154 * invalidate.
1156 break;
1160 * XIVE PC & VC cache invalidation
1162 case PC_AT_KILL:
1163 break;
1164 case VC_AT_MACRO_KILL:
1165 break;
1166 case PC_AT_KILL_MASK:
1167 case VC_AT_MACRO_KILL_MASK:
1168 break;
1170 default:
1171 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1172 return;
1175 xive->regs[reg] = val;
1178 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1180 PnvXive *xive = PNV_XIVE(opaque);
1181 uint64_t val = 0;
1182 uint32_t reg = offset >> 3;
1184 switch (offset) {
1185 case CQ_CFG_PB_GEN:
1186 case CQ_IC_BAR:
1187 case CQ_TM1_BAR:
1188 case CQ_TM2_BAR:
1189 case CQ_PC_BAR:
1190 case CQ_PC_BARM:
1191 case CQ_VC_BAR:
1192 case CQ_VC_BARM:
1193 case CQ_TAR:
1194 case CQ_TDR:
1195 case CQ_PBI_CTL:
1197 case PC_TCTXT_CFG:
1198 case PC_TCTXT_TRACK:
1199 case PC_TCTXT_INDIR0:
1200 case PC_TCTXT_INDIR1:
1201 case PC_TCTXT_INDIR2:
1202 case PC_TCTXT_INDIR3:
1203 case PC_GLOBAL_CONFIG:
1205 case PC_VPC_SCRUB_MASK:
1207 case VC_GLOBAL_CONFIG:
1208 case VC_AIB_TX_ORDER_TAG2:
1210 case VC_IRQ_CONFIG_IPI:
1211 case VC_IRQ_CONFIG_HW:
1212 case VC_IRQ_CONFIG_CASCADE1:
1213 case VC_IRQ_CONFIG_CASCADE2:
1214 case VC_IRQ_CONFIG_REDIST:
1215 case VC_IRQ_CONFIG_IPI_CASC:
1217 case VC_EQC_SCRUB_MASK:
1218 case VC_IVC_SCRUB_MASK:
1219 case VC_SBC_CONFIG:
1220 case VC_AT_MACRO_KILL_MASK:
1221 case VC_VSD_TABLE_ADDR:
1222 case PC_VSD_TABLE_ADDR:
1223 case VC_VSD_TABLE_DATA:
1224 case PC_VSD_TABLE_DATA:
1225 case PC_THREAD_EN_REG0:
1226 case PC_THREAD_EN_REG1:
1227 val = xive->regs[reg];
1228 break;
1231 * XIVE hardware thread enablement
1233 case PC_THREAD_EN_REG0_SET:
1234 case PC_THREAD_EN_REG0_CLR:
1235 val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1236 break;
1237 case PC_THREAD_EN_REG1_SET:
1238 case PC_THREAD_EN_REG1_CLR:
1239 val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1240 break;
1242 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1243 val = 0xffffff0000000000;
1244 break;
1247 * XIVE PC & VC cache updates for EAS, NVT and END
1249 case VC_EQC_CWATCH_SPEC:
1250 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1251 val = xive->regs[reg];
1252 break;
1253 case VC_EQC_CWATCH_DAT0:
1255 * Load DATA registers from cache with data requested by the
1256 * SPEC register
1258 pnv_xive_end_cache_load(xive);
1259 val = xive->regs[reg];
1260 break;
1261 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1262 val = xive->regs[reg];
1263 break;
1265 case PC_VPC_CWATCH_SPEC:
1266 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1267 val = xive->regs[reg];
1268 break;
1269 case PC_VPC_CWATCH_DAT0:
1271 * Load DATA registers from cache with data requested by the
1272 * SPEC register
1274 pnv_xive_nvt_cache_load(xive);
1275 val = xive->regs[reg];
1276 break;
1277 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1278 val = xive->regs[reg];
1279 break;
1281 case PC_VPC_SCRUB_TRIG:
1282 case VC_IVC_SCRUB_TRIG:
1283 case VC_EQC_SCRUB_TRIG:
1284 xive->regs[reg] &= ~VC_SCRUB_VALID;
1285 val = xive->regs[reg];
1286 break;
1289 * XIVE PC & VC cache invalidation
1291 case PC_AT_KILL:
1292 xive->regs[reg] &= ~PC_AT_KILL_VALID;
1293 val = xive->regs[reg];
1294 break;
1295 case VC_AT_MACRO_KILL:
1296 xive->regs[reg] &= ~VC_KILL_VALID;
1297 val = xive->regs[reg];
1298 break;
1301 * XIVE synchronisation
1303 case VC_EQC_CONFIG:
1304 val = VC_EQC_SYNC_MASK;
1305 break;
1307 default:
1308 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1311 return val;
1314 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1315 .read = pnv_xive_ic_reg_read,
1316 .write = pnv_xive_ic_reg_write,
1317 .endianness = DEVICE_BIG_ENDIAN,
1318 .valid = {
1319 .min_access_size = 8,
1320 .max_access_size = 8,
1322 .impl = {
1323 .min_access_size = 8,
1324 .max_access_size = 8,
1329 * IC - Notify MMIO port page (write only)
1331 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1332 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1333 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1334 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1335 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1336 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1337 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1338 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1340 /* VC synchronisation */
1341 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1342 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1343 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1344 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1345 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1347 /* PC synchronisation */
1348 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1349 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1350 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1352 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1354 uint8_t blk;
1355 uint32_t idx;
1357 trace_pnv_xive_ic_hw_trigger(addr, val);
1359 if (val & XIVE_TRIGGER_END) {
1360 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1361 addr, val);
1362 return;
1366 * Forward the source event notification directly to the Router.
1367 * The source interrupt number should already be correctly encoded
1368 * with the chip block id by the sending device (PHB, PSI).
1370 blk = XIVE_EAS_BLOCK(val);
1371 idx = XIVE_EAS_INDEX(val);
1373 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1374 !!(val & XIVE_TRIGGER_PQ));
1377 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1378 unsigned size)
1380 PnvXive *xive = PNV_XIVE(opaque);
1382 /* VC: HW triggers */
1383 switch (addr) {
1384 case 0x000 ... 0x7FF:
1385 pnv_xive_ic_hw_trigger(opaque, addr, val);
1386 break;
1388 /* VC: Forwarded IRQs */
1389 case PNV_XIVE_FORWARD_IPI:
1390 case PNV_XIVE_FORWARD_HW:
1391 case PNV_XIVE_FORWARD_OS_ESC:
1392 case PNV_XIVE_FORWARD_HW_ESC:
1393 case PNV_XIVE_FORWARD_REDIS:
1394 /* TODO: forwarded IRQs. Should be like HW triggers */
1395 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1396 addr, val);
1397 break;
1399 /* VC syncs */
1400 case PNV_XIVE_SYNC_IPI:
1401 case PNV_XIVE_SYNC_HW:
1402 case PNV_XIVE_SYNC_OS_ESC:
1403 case PNV_XIVE_SYNC_HW_ESC:
1404 case PNV_XIVE_SYNC_REDIS:
1405 break;
1407 /* PC syncs */
1408 case PNV_XIVE_SYNC_PULL:
1409 case PNV_XIVE_SYNC_PUSH:
1410 case PNV_XIVE_SYNC_VPC:
1411 break;
1413 default:
1414 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1418 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1419 unsigned size)
1421 PnvXive *xive = PNV_XIVE(opaque);
1423 /* loads are invalid */
1424 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1425 return -1;
1428 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1429 .read = pnv_xive_ic_notify_read,
1430 .write = pnv_xive_ic_notify_write,
1431 .endianness = DEVICE_BIG_ENDIAN,
1432 .valid = {
1433 .min_access_size = 8,
1434 .max_access_size = 8,
1436 .impl = {
1437 .min_access_size = 8,
1438 .max_access_size = 8,
1443 * IC - LSI MMIO handlers (not modeled)
1446 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1447 uint64_t val, unsigned size)
1449 PnvXive *xive = PNV_XIVE(opaque);
1451 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1454 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1456 PnvXive *xive = PNV_XIVE(opaque);
1458 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1459 return -1;
1462 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1463 .read = pnv_xive_ic_lsi_read,
1464 .write = pnv_xive_ic_lsi_write,
1465 .endianness = DEVICE_BIG_ENDIAN,
1466 .valid = {
1467 .min_access_size = 8,
1468 .max_access_size = 8,
1470 .impl = {
1471 .min_access_size = 8,
1472 .max_access_size = 8,
1477 * IC - Indirect TIMA MMIO handlers
1481 * When the TIMA is accessed from the indirect page, the thread id of
1482 * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1483 * use. This is used for resets and for debug purpose also.
1485 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1487 PnvChip *chip = xive->chip;
1488 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1489 PowerPCCPU *cpu = NULL;
1490 int pir;
1492 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1493 xive_error(xive, "IC: no indirect TIMA access in progress");
1494 return NULL;
1497 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1498 cpu = pnv_chip_find_cpu(chip, pir);
1499 if (!cpu) {
1500 xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1501 return NULL;
1504 /* Check that HW thread is XIVE enabled */
1505 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1506 xive_error(xive, "IC: CPU %x is not enabled", pir);
1509 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1512 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1513 uint64_t value, unsigned size)
1515 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1517 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1520 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1521 unsigned size)
1523 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1525 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1528 static const MemoryRegionOps xive_tm_indirect_ops = {
1529 .read = xive_tm_indirect_read,
1530 .write = xive_tm_indirect_write,
1531 .endianness = DEVICE_BIG_ENDIAN,
1532 .valid = {
1533 .min_access_size = 1,
1534 .max_access_size = 8,
1536 .impl = {
1537 .min_access_size = 1,
1538 .max_access_size = 8,
1542 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1543 uint64_t value, unsigned size)
1545 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1546 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1547 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1549 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1552 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1554 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1555 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1556 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1558 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1561 const MemoryRegionOps pnv_xive_tm_ops = {
1562 .read = pnv_xive_tm_read,
1563 .write = pnv_xive_tm_write,
1564 .endianness = DEVICE_BIG_ENDIAN,
1565 .valid = {
1566 .min_access_size = 1,
1567 .max_access_size = 8,
1569 .impl = {
1570 .min_access_size = 1,
1571 .max_access_size = 8,
1576 * Interrupt controller XSCOM region.
1578 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1580 switch (addr >> 3) {
1581 case X_VC_EQC_CONFIG:
1582 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1583 return VC_EQC_SYNC_MASK;
1584 default:
1585 return pnv_xive_ic_reg_read(opaque, addr, size);
1589 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1590 uint64_t val, unsigned size)
1592 pnv_xive_ic_reg_write(opaque, addr, val, size);
1595 static const MemoryRegionOps pnv_xive_xscom_ops = {
1596 .read = pnv_xive_xscom_read,
1597 .write = pnv_xive_xscom_write,
1598 .endianness = DEVICE_BIG_ENDIAN,
1599 .valid = {
1600 .min_access_size = 8,
1601 .max_access_size = 8,
1603 .impl = {
1604 .min_access_size = 8,
1605 .max_access_size = 8,
1610 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1612 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1613 unsigned size)
1615 PnvXive *xive = PNV_XIVE(opaque);
1616 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1617 uint64_t edt_type = 0;
1618 uint64_t edt_offset;
1619 MemTxResult result;
1620 AddressSpace *edt_as = NULL;
1621 uint64_t ret = -1;
1623 if (edt_index < XIVE_TABLE_EDT_MAX) {
1624 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1627 switch (edt_type) {
1628 case CQ_TDR_EDT_IPI:
1629 edt_as = &xive->ipi_as;
1630 break;
1631 case CQ_TDR_EDT_EQ:
1632 edt_as = &xive->end_as;
1633 break;
1634 default:
1635 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1636 return -1;
1639 /* Remap the offset for the targeted address space */
1640 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1642 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1643 &result);
1645 if (result != MEMTX_OK) {
1646 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1647 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1648 offset, edt_offset);
1649 return -1;
1652 return ret;
1655 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1656 uint64_t val, unsigned size)
1658 PnvXive *xive = PNV_XIVE(opaque);
1659 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1660 uint64_t edt_type = 0;
1661 uint64_t edt_offset;
1662 MemTxResult result;
1663 AddressSpace *edt_as = NULL;
1665 if (edt_index < XIVE_TABLE_EDT_MAX) {
1666 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1669 switch (edt_type) {
1670 case CQ_TDR_EDT_IPI:
1671 edt_as = &xive->ipi_as;
1672 break;
1673 case CQ_TDR_EDT_EQ:
1674 edt_as = &xive->end_as;
1675 break;
1676 default:
1677 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1678 offset);
1679 return;
1682 /* Remap the offset for the targeted address space */
1683 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1685 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1686 if (result != MEMTX_OK) {
1687 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1691 static const MemoryRegionOps pnv_xive_vc_ops = {
1692 .read = pnv_xive_vc_read,
1693 .write = pnv_xive_vc_write,
1694 .endianness = DEVICE_BIG_ENDIAN,
1695 .valid = {
1696 .min_access_size = 8,
1697 .max_access_size = 8,
1699 .impl = {
1700 .min_access_size = 8,
1701 .max_access_size = 8,
1706 * Presenter Controller MMIO region. The Virtualization Controller
1707 * updates the IPB in the NVT table when required. Not modeled.
1709 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1710 unsigned size)
1712 PnvXive *xive = PNV_XIVE(opaque);
1714 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1715 return -1;
1718 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1719 uint64_t value, unsigned size)
1721 PnvXive *xive = PNV_XIVE(opaque);
1723 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1726 static const MemoryRegionOps pnv_xive_pc_ops = {
1727 .read = pnv_xive_pc_read,
1728 .write = pnv_xive_pc_write,
1729 .endianness = DEVICE_BIG_ENDIAN,
1730 .valid = {
1731 .min_access_size = 8,
1732 .max_access_size = 8,
1734 .impl = {
1735 .min_access_size = 8,
1736 .max_access_size = 8,
1740 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1741 Monitor *mon)
1743 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1744 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1746 if (!xive_nvt_is_valid(nvt)) {
1747 return;
1750 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1751 eq_blk, eq_idx,
1752 xive_get_field32(NVT_W4_IPB, nvt->w4));
1755 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1757 XiveRouter *xrtr = XIVE_ROUTER(xive);
1758 uint8_t blk = pnv_xive_block_id(xive);
1759 uint8_t chip_id = xive->chip->chip_id;
1760 uint32_t srcno0 = XIVE_EAS(blk, 0);
1761 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1762 XiveEAS eas;
1763 XiveEND end;
1764 XiveNVT nvt;
1765 int i;
1766 uint64_t xive_nvt_per_subpage;
1768 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1769 srcno0, srcno0 + nr_ipis - 1);
1770 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1772 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1773 srcno0, srcno0 + nr_ipis - 1);
1774 for (i = 0; i < nr_ipis; i++) {
1775 if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1776 break;
1778 if (!xive_eas_is_masked(&eas)) {
1779 xive_eas_pic_print_info(&eas, i, mon);
1783 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1784 i = 0;
1785 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1786 xive_end_pic_print_info(&end, i++, mon);
1789 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1790 i = 0;
1791 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1792 xive_end_eas_pic_print_info(&end, i++, mon);
1795 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1796 0, XIVE_NVT_COUNT - 1);
1797 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1798 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1799 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1800 xive_nvt_pic_print_info(&nvt, i++, mon);
1805 static void pnv_xive_reset(void *dev)
1807 PnvXive *xive = PNV_XIVE(dev);
1808 XiveSource *xsrc = &xive->ipi_source;
1809 XiveENDSource *end_xsrc = &xive->end_source;
1811 /* Default page size (Should be changed at runtime to 64k) */
1812 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1814 /* Clear subregions */
1815 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1816 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1819 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1820 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1823 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1824 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1827 if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1828 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1832 static void pnv_xive_init(Object *obj)
1834 PnvXive *xive = PNV_XIVE(obj);
1836 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1837 TYPE_XIVE_SOURCE);
1838 object_initialize_child(obj, "end_source", &xive->end_source,
1839 TYPE_XIVE_END_SOURCE);
1843 * Maximum number of IRQs and ENDs supported by HW
1845 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1846 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1848 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1850 PnvXive *xive = PNV_XIVE(dev);
1851 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1852 XiveSource *xsrc = &xive->ipi_source;
1853 XiveENDSource *end_xsrc = &xive->end_source;
1854 Error *local_err = NULL;
1856 pxc->parent_realize(dev, &local_err);
1857 if (local_err) {
1858 error_propagate(errp, local_err);
1859 return;
1862 assert(xive->chip);
1865 * The XiveSource and XiveENDSource objects are realized with the
1866 * maximum allowed HW configuration. The ESB MMIO regions will be
1867 * resized dynamically when the controller is configured by the FW
1868 * to limit accesses to resources not provisioned.
1870 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1871 &error_fatal);
1872 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1873 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1874 return;
1877 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1878 &error_fatal);
1879 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1880 &error_abort);
1881 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1882 return;
1885 /* Default page size. Generally changed at runtime to 64k */
1886 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1888 /* XSCOM region, used for initial configuration of the BARs */
1889 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1890 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1892 /* Interrupt controller MMIO regions */
1893 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1894 PNV9_XIVE_IC_SIZE);
1896 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1897 xive, "xive-ic-reg", 1 << xive->ic_shift);
1898 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1899 &pnv_xive_ic_notify_ops,
1900 xive, "xive-ic-notify", 1 << xive->ic_shift);
1902 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1903 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1904 xive, "xive-ic-lsi", 2 << xive->ic_shift);
1906 /* Thread Interrupt Management Area (Indirect) */
1907 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1908 &xive_tm_indirect_ops,
1909 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1911 * Overall Virtualization Controller MMIO region containing the
1912 * IPI ESB pages and END ESB pages. The layout is defined by the
1913 * EDT "Domain table" and the accesses are dispatched using
1914 * address spaces for each.
1916 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1917 "xive-vc", PNV9_XIVE_VC_SIZE);
1919 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1920 PNV9_XIVE_VC_SIZE);
1921 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1922 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1923 PNV9_XIVE_VC_SIZE);
1924 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1927 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1928 * VC region. Their size is configured by the FW in the EDT table.
1930 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1931 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1933 /* Presenter Controller MMIO region (not modeled) */
1934 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1935 "xive-pc", PNV9_XIVE_PC_SIZE);
1937 /* Thread Interrupt Management Area (Direct) */
1938 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1939 xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1941 qemu_register_reset(pnv_xive_reset, dev);
1944 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1945 int xscom_offset)
1947 const char compat[] = "ibm,power9-xive-x";
1948 char *name;
1949 int offset;
1950 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1951 uint32_t reg[] = {
1952 cpu_to_be32(lpc_pcba),
1953 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1956 name = g_strdup_printf("xive@%x", lpc_pcba);
1957 offset = fdt_add_subnode(fdt, xscom_offset, name);
1958 _FDT(offset);
1959 g_free(name);
1961 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1962 _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1963 sizeof(compat))));
1964 return 0;
1967 static Property pnv_xive_properties[] = {
1968 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1969 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1970 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1971 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1972 /* The PnvChip id identifies the XIVE interrupt controller. */
1973 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1974 DEFINE_PROP_END_OF_LIST(),
1977 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1979 DeviceClass *dc = DEVICE_CLASS(klass);
1980 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1981 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1982 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1983 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1984 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1986 xdc->dt_xscom = pnv_xive_dt_xscom;
1988 dc->desc = "PowerNV XIVE Interrupt Controller";
1989 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1990 dc->realize = pnv_xive_realize;
1991 device_class_set_props(dc, pnv_xive_properties);
1993 xrc->get_eas = pnv_xive_get_eas;
1994 xrc->get_pq = pnv_xive_get_pq;
1995 xrc->set_pq = pnv_xive_set_pq;
1996 xrc->get_end = pnv_xive_get_end;
1997 xrc->write_end = pnv_xive_write_end;
1998 xrc->get_nvt = pnv_xive_get_nvt;
1999 xrc->write_nvt = pnv_xive_write_nvt;
2000 xrc->get_block_id = pnv_xive_get_block_id;
2002 xnc->notify = pnv_xive_notify;
2003 xpc->match_nvt = pnv_xive_match_nvt;
2004 xpc->get_config = pnv_xive_presenter_get_config;
2007 static const TypeInfo pnv_xive_info = {
2008 .name = TYPE_PNV_XIVE,
2009 .parent = TYPE_XIVE_ROUTER,
2010 .instance_init = pnv_xive_init,
2011 .instance_size = sizeof(PnvXive),
2012 .class_init = pnv_xive_class_init,
2013 .class_size = sizeof(PnvXiveClass),
2014 .interfaces = (InterfaceInfo[]) {
2015 { TYPE_PNV_XSCOM_INTERFACE },
2020 static void pnv_xive_register_types(void)
2022 type_register_static(&pnv_xive_info);
2025 type_init(pnv_xive_register_types)