stream: Traverse graph after modification
[qemu.git] / hw / intc / pnv_xive.c
blobad43483612e586fd21a85f7088afe57ee37a2ef5
1 /*
2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 #include "trace.h"
29 #include <libfdt.h>
31 #include "pnv_xive_regs.h"
33 #undef XIVE_DEBUG
36 * Virtual structures table (VST)
38 #define SBE_PER_BYTE 4
40 typedef struct XiveVstInfo {
41 const char *name;
42 uint32_t size;
43 uint32_t max_blocks;
44 } XiveVstInfo;
46 static const XiveVstInfo vst_infos[] = {
47 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
48 [VST_TSEL_SBE] = { "SBE", 1, 16 },
49 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
50 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
53 * Interrupt fifo backing store table (not modeled) :
55 * 0 - IPI,
56 * 1 - HWD,
57 * 2 - First escalate,
58 * 3 - Second escalate,
59 * 4 - Redistribution,
60 * 5 - IPI cascaded queue ?
62 [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
65 #define xive_error(xive, fmt, ...) \
66 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
67 (xive)->chip->chip_id, ## __VA_ARGS__);
70 * QEMU version of the GETFIELD/SETFIELD macros
72 * TODO: It might be better to use the existing extract64() and
73 * deposit64() but this means that all the register definitions will
74 * change and become incompatible with the ones found in skiboot.
76 * Keep it as it is for now until we find a common ground.
78 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
80 return (word & mask) >> ctz64(mask);
83 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
84 uint64_t value)
86 return (word & ~mask) | ((value << ctz64(mask)) & mask);
90 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
91 * field overrides the hardwired chip ID in the Powerbus operations
92 * and for CAM compares
94 static uint8_t pnv_xive_block_id(PnvXive *xive)
96 uint8_t blk = xive->chip->chip_id;
97 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
99 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
100 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
103 return blk;
107 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
108 * of the chips is good enough.
110 * TODO: Block scope support
112 static PnvXive *pnv_xive_get_remote(uint8_t blk)
114 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
115 int i;
117 for (i = 0; i < pnv->num_chips; i++) {
118 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
119 PnvXive *xive = &chip9->xive;
121 if (pnv_xive_block_id(xive) == blk) {
122 return xive;
125 return NULL;
129 * VST accessors for SBE, EAT, ENDT, NVT
131 * Indirect VST tables are arrays of VSDs pointing to a page (of same
132 * size). Each page is a direct VST table.
135 #define XIVE_VSD_SIZE 8
137 /* Indirect page size can be 4K, 64K, 2M, 16M. */
138 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
140 return page_shift == 12 || page_shift == 16 ||
141 page_shift == 21 || page_shift == 24;
144 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
145 uint64_t vsd, uint32_t idx)
147 const XiveVstInfo *info = &vst_infos[type];
148 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
149 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
150 uint32_t idx_max;
152 idx_max = vst_tsize / info->size - 1;
153 if (idx > idx_max) {
154 #ifdef XIVE_DEBUG
155 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
156 info->name, idx, idx_max);
157 #endif
158 return 0;
161 return vst_addr + idx * info->size;
164 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
165 uint64_t vsd, uint32_t idx)
167 const XiveVstInfo *info = &vst_infos[type];
168 uint64_t vsd_addr;
169 uint32_t vsd_idx;
170 uint32_t page_shift;
171 uint32_t vst_per_page;
173 /* Get the page size of the indirect table. */
174 vsd_addr = vsd & VSD_ADDRESS_MASK;
175 vsd = ldq_be_dma(&address_space_memory, vsd_addr);
177 if (!(vsd & VSD_ADDRESS_MASK)) {
178 #ifdef XIVE_DEBUG
179 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
180 #endif
181 return 0;
184 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
186 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
187 xive_error(xive, "VST: invalid %s page shift %d", info->name,
188 page_shift);
189 return 0;
192 vst_per_page = (1ull << page_shift) / info->size;
193 vsd_idx = idx / vst_per_page;
195 /* Load the VSD we are looking for, if not already done */
196 if (vsd_idx) {
197 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
198 vsd = ldq_be_dma(&address_space_memory, vsd_addr);
200 if (!(vsd & VSD_ADDRESS_MASK)) {
201 #ifdef XIVE_DEBUG
202 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
203 #endif
204 return 0;
208 * Check that the pages have a consistent size across the
209 * indirect table
211 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
212 xive_error(xive, "VST: %s entry %x indirect page size differ !?",
213 info->name, idx);
214 return 0;
218 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
221 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
222 uint32_t idx)
224 const XiveVstInfo *info = &vst_infos[type];
225 uint64_t vsd;
227 if (blk >= info->max_blocks) {
228 xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
229 blk, info->name, idx);
230 return 0;
233 vsd = xive->vsds[type][blk];
235 /* Remote VST access */
236 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
237 xive = pnv_xive_get_remote(blk);
239 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
242 if (VSD_INDIRECT & vsd) {
243 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
246 return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
249 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
250 uint32_t idx, void *data)
252 const XiveVstInfo *info = &vst_infos[type];
253 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
255 if (!addr) {
256 return -1;
259 cpu_physical_memory_read(addr, data, info->size);
260 return 0;
263 #define XIVE_VST_WORD_ALL -1
265 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
266 uint32_t idx, void *data, uint32_t word_number)
268 const XiveVstInfo *info = &vst_infos[type];
269 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
271 if (!addr) {
272 return -1;
275 if (word_number == XIVE_VST_WORD_ALL) {
276 cpu_physical_memory_write(addr, data, info->size);
277 } else {
278 cpu_physical_memory_write(addr + word_number * 4,
279 data + word_number * 4, 4);
281 return 0;
284 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
285 XiveEND *end)
287 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
290 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
291 XiveEND *end, uint8_t word_number)
293 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
294 word_number);
297 static int pnv_xive_end_update(PnvXive *xive)
299 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
300 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
301 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
302 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
303 int i;
304 uint64_t eqc_watch[4];
306 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
307 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
310 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
311 XIVE_VST_WORD_ALL);
314 static void pnv_xive_end_cache_load(PnvXive *xive)
316 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
317 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
318 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
319 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
320 uint64_t eqc_watch[4] = { 0 };
321 int i;
323 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
324 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
327 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
328 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
332 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
333 XiveNVT *nvt)
335 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
338 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
339 XiveNVT *nvt, uint8_t word_number)
341 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
342 word_number);
345 static int pnv_xive_nvt_update(PnvXive *xive)
347 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
348 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
349 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
350 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
351 int i;
352 uint64_t vpc_watch[8];
354 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
355 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
358 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
359 XIVE_VST_WORD_ALL);
362 static void pnv_xive_nvt_cache_load(PnvXive *xive)
364 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
365 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
366 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
367 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
368 uint64_t vpc_watch[8] = { 0 };
369 int i;
371 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
372 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
375 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
376 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
380 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
381 XiveEAS *eas)
383 PnvXive *xive = PNV_XIVE(xrtr);
386 * EAT lookups should be local to the IC
388 if (pnv_xive_block_id(xive) != blk) {
389 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
390 return -1;
393 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
397 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
398 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
399 * second register covers cores 16-23 (normal) or 8-11 (fused).
401 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
403 int pir = ppc_cpu_pir(cpu);
404 uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
405 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
406 uint32_t bit = pir & 0x3f;
408 return xive->regs[reg >> 3] & PPC_BIT(bit);
411 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
412 uint8_t nvt_blk, uint32_t nvt_idx,
413 bool cam_ignore, uint8_t priority,
414 uint32_t logic_serv, XiveTCTXMatch *match)
416 PnvXive *xive = PNV_XIVE(xptr);
417 PnvChip *chip = xive->chip;
418 int count = 0;
419 int i, j;
421 for (i = 0; i < chip->nr_cores; i++) {
422 PnvCore *pc = chip->cores[i];
423 CPUCore *cc = CPU_CORE(pc);
425 for (j = 0; j < cc->nr_threads; j++) {
426 PowerPCCPU *cpu = pc->threads[j];
427 XiveTCTX *tctx;
428 int ring;
430 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
431 continue;
434 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
437 * Check the thread context CAM lines and record matches.
439 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
440 nvt_idx, cam_ignore, logic_serv);
442 * Save the context and follow on to catch duplicates, that we
443 * don't support yet.
445 if (ring != -1) {
446 if (match->tctx) {
447 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
448 "thread context NVT %x/%x\n",
449 nvt_blk, nvt_idx);
450 return -1;
453 match->ring = ring;
454 match->tctx = tctx;
455 count++;
460 return count;
463 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
465 return pnv_xive_block_id(PNV_XIVE(xrtr));
469 * The TIMA MMIO space is shared among the chips and to identify the
470 * chip from which the access is being done, we extract the chip id
471 * from the PIR.
473 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
475 int pir = ppc_cpu_pir(cpu);
476 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
477 PnvXive *xive = PNV_XIVE(xptr);
479 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
480 xive_error(xive, "IC: CPU %x is not enabled", pir);
482 return xive;
486 * The internal sources (IPIs) of the interrupt controller have no
487 * knowledge of the XIVE chip on which they reside. Encode the block
488 * id in the source interrupt number before forwarding the source
489 * event notification to the Router. This is required on a multichip
490 * system.
492 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
494 PnvXive *xive = PNV_XIVE(xn);
495 uint8_t blk = pnv_xive_block_id(xive);
497 xive_router_notify(xn, XIVE_EAS(blk, srcno));
501 * XIVE helpers
504 static uint64_t pnv_xive_vc_size(PnvXive *xive)
506 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
509 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
511 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
514 static uint64_t pnv_xive_pc_size(PnvXive *xive)
516 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
519 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
521 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
522 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
524 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
528 * Compute the number of entries per indirect subpage.
530 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
532 uint8_t blk = pnv_xive_block_id(xive);
533 uint64_t vsd = xive->vsds[type][blk];
534 const XiveVstInfo *info = &vst_infos[type];
535 uint64_t vsd_addr;
536 uint32_t page_shift;
538 /* For direct tables, fake a valid value */
539 if (!(VSD_INDIRECT & vsd)) {
540 return 1;
543 /* Get the page size of the indirect table. */
544 vsd_addr = vsd & VSD_ADDRESS_MASK;
545 vsd = ldq_be_dma(&address_space_memory, vsd_addr);
547 if (!(vsd & VSD_ADDRESS_MASK)) {
548 #ifdef XIVE_DEBUG
549 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
550 #endif
551 return 0;
554 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
556 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
557 xive_error(xive, "VST: invalid %s page shift %d", info->name,
558 page_shift);
559 return 0;
562 return (1ull << page_shift) / info->size;
566 * EDT Table
568 * The Virtualization Controller MMIO region containing the IPI ESB
569 * pages and END ESB pages is sub-divided into "sets" which map
570 * portions of the VC region to the different ESB pages. It is
571 * configured at runtime through the EDT "Domain Table" to let the
572 * firmware decide how to split the VC address space between IPI ESB
573 * pages and END ESB pages.
577 * Computes the overall size of the IPI or the END ESB pages
579 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
581 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
582 uint64_t size = 0;
583 int i;
585 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
586 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
588 if (edt_type == type) {
589 size += edt_size;
593 return size;
597 * Maps an offset of the VC region in the IPI or END region using the
598 * layout defined by the EDT "Domaine Table"
600 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
601 uint64_t type)
603 int i;
604 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
605 uint64_t edt_offset = vc_offset;
607 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
608 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
610 if (edt_type != type) {
611 edt_offset -= edt_size;
615 return edt_offset;
618 static void pnv_xive_edt_resize(PnvXive *xive)
620 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
621 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
623 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
624 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
626 memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
627 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
631 * XIVE Table configuration. Only EDT is supported.
633 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
635 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
636 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
637 uint64_t *xive_table;
638 uint8_t max_index;
640 switch (tsel) {
641 case CQ_TAR_TSEL_BLK:
642 max_index = ARRAY_SIZE(xive->blk);
643 xive_table = xive->blk;
644 break;
645 case CQ_TAR_TSEL_MIG:
646 max_index = ARRAY_SIZE(xive->mig);
647 xive_table = xive->mig;
648 break;
649 case CQ_TAR_TSEL_EDT:
650 max_index = ARRAY_SIZE(xive->edt);
651 xive_table = xive->edt;
652 break;
653 case CQ_TAR_TSEL_VDT:
654 max_index = ARRAY_SIZE(xive->vdt);
655 xive_table = xive->vdt;
656 break;
657 default:
658 xive_error(xive, "IC: invalid table %d", (int) tsel);
659 return -1;
662 if (tsel_index >= max_index) {
663 xive_error(xive, "IC: invalid index %d", (int) tsel_index);
664 return -1;
667 xive_table[tsel_index] = val;
669 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
670 xive->regs[CQ_TAR >> 3] =
671 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
675 * EDT configuration is complete. Resize the MMIO windows exposing
676 * the IPI and the END ESBs in the VC region.
678 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
679 pnv_xive_edt_resize(xive);
682 return 0;
686 * Virtual Structure Tables (VST) configuration
688 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
689 uint8_t blk, uint64_t vsd)
691 XiveENDSource *end_xsrc = &xive->end_source;
692 XiveSource *xsrc = &xive->ipi_source;
693 const XiveVstInfo *info = &vst_infos[type];
694 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
695 uint64_t vst_tsize = 1ull << page_shift;
696 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
698 /* Basic checks */
700 if (VSD_INDIRECT & vsd) {
701 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
702 xive_error(xive, "VST: %s indirect tables are not enabled",
703 info->name);
704 return;
707 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
708 xive_error(xive, "VST: invalid %s page shift %d", info->name,
709 page_shift);
710 return;
714 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
715 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
716 " page shift %d", info->name, vst_addr, page_shift);
717 return;
720 /* Record the table configuration (in SRAM on HW) */
721 xive->vsds[type][blk] = vsd;
723 /* Now tune the models with the configuration provided by the FW */
725 switch (type) {
726 case VST_TSEL_IVT: /* Nothing to be done */
727 break;
729 case VST_TSEL_EQDT:
731 * Backing store pages for the END.
733 * If the table is direct, we can compute the number of PQ
734 * entries provisioned by FW (such as skiboot) and resize the
735 * END ESB window accordingly.
737 if (!(VSD_INDIRECT & vsd)) {
738 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
739 * (1ull << xsrc->esb_shift));
741 memory_region_add_subregion(&xive->end_edt_mmio, 0,
742 &end_xsrc->esb_mmio);
743 break;
745 case VST_TSEL_SBE:
747 * Backing store pages for the source PQ bits. The model does
748 * not use these PQ bits backed in RAM because the XiveSource
749 * model has its own.
751 * If the table is direct, we can compute the number of PQ
752 * entries provisioned by FW (such as skiboot) and resize the
753 * ESB window accordingly.
755 if (!(VSD_INDIRECT & vsd)) {
756 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
757 * (1ull << xsrc->esb_shift));
759 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
760 break;
762 case VST_TSEL_VPDT: /* Not modeled */
763 case VST_TSEL_IRQ: /* Not modeled */
765 * These tables contains the backing store pages for the
766 * interrupt fifos of the VC sub-engine in case of overflow.
768 break;
770 default:
771 g_assert_not_reached();
776 * Both PC and VC sub-engines are configured as each use the Virtual
777 * Structure Tables : SBE, EAS, END and NVT.
779 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
781 uint8_t mode = GETFIELD(VSD_MODE, vsd);
782 uint8_t type = GETFIELD(VST_TABLE_SELECT,
783 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
784 uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
785 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
786 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
788 if (type > VST_TSEL_IRQ) {
789 xive_error(xive, "VST: invalid table type %d", type);
790 return;
793 if (blk >= vst_infos[type].max_blocks) {
794 xive_error(xive, "VST: invalid block id %d for"
795 " %s table", blk, vst_infos[type].name);
796 return;
800 * Only take the VC sub-engine configuration into account because
801 * the XiveRouter model combines both VC and PC sub-engines
803 if (pc_engine) {
804 return;
807 if (!vst_addr) {
808 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
809 return;
812 switch (mode) {
813 case VSD_MODE_FORWARD:
814 xive->vsds[type][blk] = vsd;
815 break;
817 case VSD_MODE_EXCLUSIVE:
818 pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
819 break;
821 default:
822 xive_error(xive, "VST: unsupported table mode %d", mode);
823 return;
828 * Interrupt controller MMIO region. The layout is compatible between
829 * 4K and 64K pages :
831 * Page 0 sub-engine BARs
832 * 0x000 - 0x3FF IC registers
833 * 0x400 - 0x7FF PC registers
834 * 0x800 - 0xFFF VC registers
836 * Page 1 Notify page (writes only)
837 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
838 * 0x800 - 0xFFF forwards and syncs
840 * Page 2 LSI Trigger page (writes only) (not modeled)
841 * Page 3 LSI SB EOI page (reads only) (not modeled)
843 * Page 4-7 indirect TIMA
847 * IC - registers MMIO
849 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
850 uint64_t val, unsigned size)
852 PnvXive *xive = PNV_XIVE(opaque);
853 MemoryRegion *sysmem = get_system_memory();
854 uint32_t reg = offset >> 3;
855 bool is_chip0 = xive->chip->chip_id == 0;
857 switch (offset) {
860 * XIVE CQ (PowerBus bridge) settings
862 case CQ_MSGSND: /* msgsnd for doorbells */
863 case CQ_FIRMASK_OR: /* FIR error reporting */
864 break;
865 case CQ_PBI_CTL:
866 if (val & CQ_PBI_PC_64K) {
867 xive->pc_shift = 16;
869 if (val & CQ_PBI_VC_64K) {
870 xive->vc_shift = 16;
872 break;
873 case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
875 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
877 break;
880 * XIVE Virtualization Controller settings
882 case VC_GLOBAL_CONFIG:
883 break;
886 * XIVE Presenter Controller settings
888 case PC_GLOBAL_CONFIG:
890 * PC_GCONF_CHIPID_OVR
891 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
893 break;
894 case PC_TCTXT_CFG:
896 * TODO: block group support
898 break;
899 case PC_TCTXT_TRACK:
901 * PC_TCTXT_TRACK_EN:
902 * enable block tracking and exchange of block ownership
903 * information between Interrupt controllers
905 break;
908 * Misc settings
910 case VC_SBC_CONFIG: /* Store EOI configuration */
912 * Configure store EOI if required by firwmare (skiboot has removed
913 * support recently though)
915 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
916 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
918 break;
920 case VC_EQC_CONFIG: /* TODO: silent escalation */
921 case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
922 break;
925 * XIVE BAR settings (XSCOM only)
927 case CQ_RST_CTL:
928 /* bit4: resets all BAR registers */
929 break;
931 case CQ_IC_BAR: /* IC BAR. 8 pages */
932 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
933 if (!(val & CQ_IC_BAR_VALID)) {
934 xive->ic_base = 0;
935 if (xive->regs[reg] & CQ_IC_BAR_VALID) {
936 memory_region_del_subregion(&xive->ic_mmio,
937 &xive->ic_reg_mmio);
938 memory_region_del_subregion(&xive->ic_mmio,
939 &xive->ic_notify_mmio);
940 memory_region_del_subregion(&xive->ic_mmio,
941 &xive->ic_lsi_mmio);
942 memory_region_del_subregion(&xive->ic_mmio,
943 &xive->tm_indirect_mmio);
945 memory_region_del_subregion(sysmem, &xive->ic_mmio);
947 } else {
948 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
949 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
950 memory_region_add_subregion(sysmem, xive->ic_base,
951 &xive->ic_mmio);
953 memory_region_add_subregion(&xive->ic_mmio, 0,
954 &xive->ic_reg_mmio);
955 memory_region_add_subregion(&xive->ic_mmio,
956 1ul << xive->ic_shift,
957 &xive->ic_notify_mmio);
958 memory_region_add_subregion(&xive->ic_mmio,
959 2ul << xive->ic_shift,
960 &xive->ic_lsi_mmio);
961 memory_region_add_subregion(&xive->ic_mmio,
962 4ull << xive->ic_shift,
963 &xive->tm_indirect_mmio);
966 break;
968 case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
969 case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
970 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
971 if (!(val & CQ_TM_BAR_VALID)) {
972 xive->tm_base = 0;
973 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
974 memory_region_del_subregion(sysmem, &xive->tm_mmio);
976 } else {
977 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
978 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
979 memory_region_add_subregion(sysmem, xive->tm_base,
980 &xive->tm_mmio);
983 break;
985 case CQ_PC_BARM:
986 xive->regs[reg] = val;
987 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
988 break;
989 case CQ_PC_BAR: /* From 32M to 512G */
990 if (!(val & CQ_PC_BAR_VALID)) {
991 xive->pc_base = 0;
992 if (xive->regs[reg] & CQ_PC_BAR_VALID) {
993 memory_region_del_subregion(sysmem, &xive->pc_mmio);
995 } else {
996 xive->pc_base = val & ~(CQ_PC_BAR_VALID);
997 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
998 memory_region_add_subregion(sysmem, xive->pc_base,
999 &xive->pc_mmio);
1002 break;
1004 case CQ_VC_BARM:
1005 xive->regs[reg] = val;
1006 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1007 break;
1008 case CQ_VC_BAR: /* From 64M to 4TB */
1009 if (!(val & CQ_VC_BAR_VALID)) {
1010 xive->vc_base = 0;
1011 if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1012 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1014 } else {
1015 xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1016 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1017 memory_region_add_subregion(sysmem, xive->vc_base,
1018 &xive->vc_mmio);
1021 break;
1024 * XIVE Table settings.
1026 case CQ_TAR: /* Table Address */
1027 break;
1028 case CQ_TDR: /* Table Data */
1029 pnv_xive_table_set_data(xive, val);
1030 break;
1033 * XIVE VC & PC Virtual Structure Table settings
1035 case VC_VSD_TABLE_ADDR:
1036 case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1037 break;
1038 case VC_VSD_TABLE_DATA: /* Virtual table setting */
1039 case PC_VSD_TABLE_DATA:
1040 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1041 break;
1044 * Interrupt fifo overflow in memory backing store (Not modeled)
1046 case VC_IRQ_CONFIG_IPI:
1047 case VC_IRQ_CONFIG_HW:
1048 case VC_IRQ_CONFIG_CASCADE1:
1049 case VC_IRQ_CONFIG_CASCADE2:
1050 case VC_IRQ_CONFIG_REDIST:
1051 case VC_IRQ_CONFIG_IPI_CASC:
1052 break;
1055 * XIVE hardware thread enablement
1057 case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1058 case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1059 break;
1061 case PC_THREAD_EN_REG0_SET:
1062 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1063 break;
1064 case PC_THREAD_EN_REG1_SET:
1065 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1066 break;
1067 case PC_THREAD_EN_REG0_CLR:
1068 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1069 break;
1070 case PC_THREAD_EN_REG1_CLR:
1071 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1072 break;
1075 * Indirect TIMA access set up. Defines the PIR of the HW thread
1076 * to use.
1078 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1079 break;
1082 * XIVE PC & VC cache updates for EAS, NVT and END
1084 case VC_IVC_SCRUB_MASK:
1085 case VC_IVC_SCRUB_TRIG:
1086 break;
1088 case VC_EQC_CWATCH_SPEC:
1089 val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1090 break;
1091 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1092 break;
1093 case VC_EQC_CWATCH_DAT0:
1094 /* writing to DATA0 triggers the cache write */
1095 xive->regs[reg] = val;
1096 pnv_xive_end_update(xive);
1097 break;
1098 case VC_EQC_SCRUB_MASK:
1099 case VC_EQC_SCRUB_TRIG:
1101 * The scrubbing registers flush the cache in RAM and can also
1102 * invalidate.
1104 break;
1106 case PC_VPC_CWATCH_SPEC:
1107 val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1108 break;
1109 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1110 break;
1111 case PC_VPC_CWATCH_DAT0:
1112 /* writing to DATA0 triggers the cache write */
1113 xive->regs[reg] = val;
1114 pnv_xive_nvt_update(xive);
1115 break;
1116 case PC_VPC_SCRUB_MASK:
1117 case PC_VPC_SCRUB_TRIG:
1119 * The scrubbing registers flush the cache in RAM and can also
1120 * invalidate.
1122 break;
1126 * XIVE PC & VC cache invalidation
1128 case PC_AT_KILL:
1129 break;
1130 case VC_AT_MACRO_KILL:
1131 break;
1132 case PC_AT_KILL_MASK:
1133 case VC_AT_MACRO_KILL_MASK:
1134 break;
1136 default:
1137 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1138 return;
1141 xive->regs[reg] = val;
1144 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1146 PnvXive *xive = PNV_XIVE(opaque);
1147 uint64_t val = 0;
1148 uint32_t reg = offset >> 3;
1150 switch (offset) {
1151 case CQ_CFG_PB_GEN:
1152 case CQ_IC_BAR:
1153 case CQ_TM1_BAR:
1154 case CQ_TM2_BAR:
1155 case CQ_PC_BAR:
1156 case CQ_PC_BARM:
1157 case CQ_VC_BAR:
1158 case CQ_VC_BARM:
1159 case CQ_TAR:
1160 case CQ_TDR:
1161 case CQ_PBI_CTL:
1163 case PC_TCTXT_CFG:
1164 case PC_TCTXT_TRACK:
1165 case PC_TCTXT_INDIR0:
1166 case PC_TCTXT_INDIR1:
1167 case PC_TCTXT_INDIR2:
1168 case PC_TCTXT_INDIR3:
1169 case PC_GLOBAL_CONFIG:
1171 case PC_VPC_SCRUB_MASK:
1173 case VC_GLOBAL_CONFIG:
1174 case VC_AIB_TX_ORDER_TAG2:
1176 case VC_IRQ_CONFIG_IPI:
1177 case VC_IRQ_CONFIG_HW:
1178 case VC_IRQ_CONFIG_CASCADE1:
1179 case VC_IRQ_CONFIG_CASCADE2:
1180 case VC_IRQ_CONFIG_REDIST:
1181 case VC_IRQ_CONFIG_IPI_CASC:
1183 case VC_EQC_SCRUB_MASK:
1184 case VC_IVC_SCRUB_MASK:
1185 case VC_SBC_CONFIG:
1186 case VC_AT_MACRO_KILL_MASK:
1187 case VC_VSD_TABLE_ADDR:
1188 case PC_VSD_TABLE_ADDR:
1189 case VC_VSD_TABLE_DATA:
1190 case PC_VSD_TABLE_DATA:
1191 case PC_THREAD_EN_REG0:
1192 case PC_THREAD_EN_REG1:
1193 val = xive->regs[reg];
1194 break;
1197 * XIVE hardware thread enablement
1199 case PC_THREAD_EN_REG0_SET:
1200 case PC_THREAD_EN_REG0_CLR:
1201 val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1202 break;
1203 case PC_THREAD_EN_REG1_SET:
1204 case PC_THREAD_EN_REG1_CLR:
1205 val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1206 break;
1208 case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1209 val = 0xffffff0000000000;
1210 break;
1213 * XIVE PC & VC cache updates for EAS, NVT and END
1215 case VC_EQC_CWATCH_SPEC:
1216 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1217 val = xive->regs[reg];
1218 break;
1219 case VC_EQC_CWATCH_DAT0:
1221 * Load DATA registers from cache with data requested by the
1222 * SPEC register
1224 pnv_xive_end_cache_load(xive);
1225 val = xive->regs[reg];
1226 break;
1227 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1228 val = xive->regs[reg];
1229 break;
1231 case PC_VPC_CWATCH_SPEC:
1232 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1233 val = xive->regs[reg];
1234 break;
1235 case PC_VPC_CWATCH_DAT0:
1237 * Load DATA registers from cache with data requested by the
1238 * SPEC register
1240 pnv_xive_nvt_cache_load(xive);
1241 val = xive->regs[reg];
1242 break;
1243 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1244 val = xive->regs[reg];
1245 break;
1247 case PC_VPC_SCRUB_TRIG:
1248 case VC_IVC_SCRUB_TRIG:
1249 case VC_EQC_SCRUB_TRIG:
1250 xive->regs[reg] &= ~VC_SCRUB_VALID;
1251 val = xive->regs[reg];
1252 break;
1255 * XIVE PC & VC cache invalidation
1257 case PC_AT_KILL:
1258 xive->regs[reg] &= ~PC_AT_KILL_VALID;
1259 val = xive->regs[reg];
1260 break;
1261 case VC_AT_MACRO_KILL:
1262 xive->regs[reg] &= ~VC_KILL_VALID;
1263 val = xive->regs[reg];
1264 break;
1267 * XIVE synchronisation
1269 case VC_EQC_CONFIG:
1270 val = VC_EQC_SYNC_MASK;
1271 break;
1273 default:
1274 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1277 return val;
1280 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1281 .read = pnv_xive_ic_reg_read,
1282 .write = pnv_xive_ic_reg_write,
1283 .endianness = DEVICE_BIG_ENDIAN,
1284 .valid = {
1285 .min_access_size = 8,
1286 .max_access_size = 8,
1288 .impl = {
1289 .min_access_size = 8,
1290 .max_access_size = 8,
1295 * IC - Notify MMIO port page (write only)
1297 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1298 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1299 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1300 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1301 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1302 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1303 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1304 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1306 /* VC synchronisation */
1307 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1308 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1309 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1310 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1311 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1313 /* PC synchronisation */
1314 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1315 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1316 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1318 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1320 uint8_t blk;
1321 uint32_t idx;
1323 trace_pnv_xive_ic_hw_trigger(addr, val);
1325 if (val & XIVE_TRIGGER_END) {
1326 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1327 addr, val);
1328 return;
1332 * Forward the source event notification directly to the Router.
1333 * The source interrupt number should already be correctly encoded
1334 * with the chip block id by the sending device (PHB, PSI).
1336 blk = XIVE_EAS_BLOCK(val);
1337 idx = XIVE_EAS_INDEX(val);
1339 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1342 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1343 unsigned size)
1345 PnvXive *xive = PNV_XIVE(opaque);
1347 /* VC: HW triggers */
1348 switch (addr) {
1349 case 0x000 ... 0x7FF:
1350 pnv_xive_ic_hw_trigger(opaque, addr, val);
1351 break;
1353 /* VC: Forwarded IRQs */
1354 case PNV_XIVE_FORWARD_IPI:
1355 case PNV_XIVE_FORWARD_HW:
1356 case PNV_XIVE_FORWARD_OS_ESC:
1357 case PNV_XIVE_FORWARD_HW_ESC:
1358 case PNV_XIVE_FORWARD_REDIS:
1359 /* TODO: forwarded IRQs. Should be like HW triggers */
1360 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1361 addr, val);
1362 break;
1364 /* VC syncs */
1365 case PNV_XIVE_SYNC_IPI:
1366 case PNV_XIVE_SYNC_HW:
1367 case PNV_XIVE_SYNC_OS_ESC:
1368 case PNV_XIVE_SYNC_HW_ESC:
1369 case PNV_XIVE_SYNC_REDIS:
1370 break;
1372 /* PC syncs */
1373 case PNV_XIVE_SYNC_PULL:
1374 case PNV_XIVE_SYNC_PUSH:
1375 case PNV_XIVE_SYNC_VPC:
1376 break;
1378 default:
1379 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1383 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1384 unsigned size)
1386 PnvXive *xive = PNV_XIVE(opaque);
1388 /* loads are invalid */
1389 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1390 return -1;
1393 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1394 .read = pnv_xive_ic_notify_read,
1395 .write = pnv_xive_ic_notify_write,
1396 .endianness = DEVICE_BIG_ENDIAN,
1397 .valid = {
1398 .min_access_size = 8,
1399 .max_access_size = 8,
1401 .impl = {
1402 .min_access_size = 8,
1403 .max_access_size = 8,
1408 * IC - LSI MMIO handlers (not modeled)
1411 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1412 uint64_t val, unsigned size)
1414 PnvXive *xive = PNV_XIVE(opaque);
1416 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1419 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1421 PnvXive *xive = PNV_XIVE(opaque);
1423 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1424 return -1;
1427 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1428 .read = pnv_xive_ic_lsi_read,
1429 .write = pnv_xive_ic_lsi_write,
1430 .endianness = DEVICE_BIG_ENDIAN,
1431 .valid = {
1432 .min_access_size = 8,
1433 .max_access_size = 8,
1435 .impl = {
1436 .min_access_size = 8,
1437 .max_access_size = 8,
1442 * IC - Indirect TIMA MMIO handlers
1446 * When the TIMA is accessed from the indirect page, the thread id of
1447 * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1448 * use. This is used for resets and for debug purpose also.
1450 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1452 PnvChip *chip = xive->chip;
1453 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1454 PowerPCCPU *cpu = NULL;
1455 int pir;
1457 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1458 xive_error(xive, "IC: no indirect TIMA access in progress");
1459 return NULL;
1462 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1463 cpu = pnv_chip_find_cpu(chip, pir);
1464 if (!cpu) {
1465 xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1466 return NULL;
1469 /* Check that HW thread is XIVE enabled */
1470 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1471 xive_error(xive, "IC: CPU %x is not enabled", pir);
1474 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1477 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1478 uint64_t value, unsigned size)
1480 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1482 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1485 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1486 unsigned size)
1488 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1490 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1493 static const MemoryRegionOps xive_tm_indirect_ops = {
1494 .read = xive_tm_indirect_read,
1495 .write = xive_tm_indirect_write,
1496 .endianness = DEVICE_BIG_ENDIAN,
1497 .valid = {
1498 .min_access_size = 1,
1499 .max_access_size = 8,
1501 .impl = {
1502 .min_access_size = 1,
1503 .max_access_size = 8,
1507 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1508 uint64_t value, unsigned size)
1510 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1511 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1512 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1514 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1517 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1519 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1520 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1521 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1523 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1526 const MemoryRegionOps pnv_xive_tm_ops = {
1527 .read = pnv_xive_tm_read,
1528 .write = pnv_xive_tm_write,
1529 .endianness = DEVICE_BIG_ENDIAN,
1530 .valid = {
1531 .min_access_size = 1,
1532 .max_access_size = 8,
1534 .impl = {
1535 .min_access_size = 1,
1536 .max_access_size = 8,
1541 * Interrupt controller XSCOM region.
1543 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1545 switch (addr >> 3) {
1546 case X_VC_EQC_CONFIG:
1547 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1548 return VC_EQC_SYNC_MASK;
1549 default:
1550 return pnv_xive_ic_reg_read(opaque, addr, size);
1554 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1555 uint64_t val, unsigned size)
1557 pnv_xive_ic_reg_write(opaque, addr, val, size);
1560 static const MemoryRegionOps pnv_xive_xscom_ops = {
1561 .read = pnv_xive_xscom_read,
1562 .write = pnv_xive_xscom_write,
1563 .endianness = DEVICE_BIG_ENDIAN,
1564 .valid = {
1565 .min_access_size = 8,
1566 .max_access_size = 8,
1568 .impl = {
1569 .min_access_size = 8,
1570 .max_access_size = 8,
1575 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1577 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1578 unsigned size)
1580 PnvXive *xive = PNV_XIVE(opaque);
1581 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1582 uint64_t edt_type = 0;
1583 uint64_t edt_offset;
1584 MemTxResult result;
1585 AddressSpace *edt_as = NULL;
1586 uint64_t ret = -1;
1588 if (edt_index < XIVE_TABLE_EDT_MAX) {
1589 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1592 switch (edt_type) {
1593 case CQ_TDR_EDT_IPI:
1594 edt_as = &xive->ipi_as;
1595 break;
1596 case CQ_TDR_EDT_EQ:
1597 edt_as = &xive->end_as;
1598 break;
1599 default:
1600 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1601 return -1;
1604 /* Remap the offset for the targeted address space */
1605 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1607 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1608 &result);
1610 if (result != MEMTX_OK) {
1611 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1612 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1613 offset, edt_offset);
1614 return -1;
1617 return ret;
1620 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1621 uint64_t val, unsigned size)
1623 PnvXive *xive = PNV_XIVE(opaque);
1624 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1625 uint64_t edt_type = 0;
1626 uint64_t edt_offset;
1627 MemTxResult result;
1628 AddressSpace *edt_as = NULL;
1630 if (edt_index < XIVE_TABLE_EDT_MAX) {
1631 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1634 switch (edt_type) {
1635 case CQ_TDR_EDT_IPI:
1636 edt_as = &xive->ipi_as;
1637 break;
1638 case CQ_TDR_EDT_EQ:
1639 edt_as = &xive->end_as;
1640 break;
1641 default:
1642 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1643 offset);
1644 return;
1647 /* Remap the offset for the targeted address space */
1648 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1650 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1651 if (result != MEMTX_OK) {
1652 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1656 static const MemoryRegionOps pnv_xive_vc_ops = {
1657 .read = pnv_xive_vc_read,
1658 .write = pnv_xive_vc_write,
1659 .endianness = DEVICE_BIG_ENDIAN,
1660 .valid = {
1661 .min_access_size = 8,
1662 .max_access_size = 8,
1664 .impl = {
1665 .min_access_size = 8,
1666 .max_access_size = 8,
1671 * Presenter Controller MMIO region. The Virtualization Controller
1672 * updates the IPB in the NVT table when required. Not modeled.
1674 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1675 unsigned size)
1677 PnvXive *xive = PNV_XIVE(opaque);
1679 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1680 return -1;
1683 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1684 uint64_t value, unsigned size)
1686 PnvXive *xive = PNV_XIVE(opaque);
1688 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1691 static const MemoryRegionOps pnv_xive_pc_ops = {
1692 .read = pnv_xive_pc_read,
1693 .write = pnv_xive_pc_write,
1694 .endianness = DEVICE_BIG_ENDIAN,
1695 .valid = {
1696 .min_access_size = 8,
1697 .max_access_size = 8,
1699 .impl = {
1700 .min_access_size = 8,
1701 .max_access_size = 8,
1705 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1706 Monitor *mon)
1708 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1709 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1711 if (!xive_nvt_is_valid(nvt)) {
1712 return;
1715 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1716 eq_blk, eq_idx,
1717 xive_get_field32(NVT_W4_IPB, nvt->w4));
1720 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1722 XiveRouter *xrtr = XIVE_ROUTER(xive);
1723 uint8_t blk = pnv_xive_block_id(xive);
1724 uint8_t chip_id = xive->chip->chip_id;
1725 uint32_t srcno0 = XIVE_EAS(blk, 0);
1726 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1727 XiveEAS eas;
1728 XiveEND end;
1729 XiveNVT nvt;
1730 int i;
1731 uint64_t xive_nvt_per_subpage;
1733 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1734 srcno0, srcno0 + nr_ipis - 1);
1735 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1737 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1738 srcno0, srcno0 + nr_ipis - 1);
1739 for (i = 0; i < nr_ipis; i++) {
1740 if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1741 break;
1743 if (!xive_eas_is_masked(&eas)) {
1744 xive_eas_pic_print_info(&eas, i, mon);
1748 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1749 i = 0;
1750 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1751 xive_end_pic_print_info(&end, i++, mon);
1754 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1755 i = 0;
1756 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1757 xive_end_eas_pic_print_info(&end, i++, mon);
1760 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1761 0, XIVE_NVT_COUNT - 1);
1762 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1763 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1764 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1765 xive_nvt_pic_print_info(&nvt, i++, mon);
1770 static void pnv_xive_reset(void *dev)
1772 PnvXive *xive = PNV_XIVE(dev);
1773 XiveSource *xsrc = &xive->ipi_source;
1774 XiveENDSource *end_xsrc = &xive->end_source;
1776 /* Default page size (Should be changed at runtime to 64k) */
1777 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1779 /* Clear subregions */
1780 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1781 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1784 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1785 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1788 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1789 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1792 if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1793 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1797 static void pnv_xive_init(Object *obj)
1799 PnvXive *xive = PNV_XIVE(obj);
1801 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1802 TYPE_XIVE_SOURCE);
1803 object_initialize_child(obj, "end_source", &xive->end_source,
1804 TYPE_XIVE_END_SOURCE);
1808 * Maximum number of IRQs and ENDs supported by HW
1810 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1811 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1813 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1815 PnvXive *xive = PNV_XIVE(dev);
1816 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1817 XiveSource *xsrc = &xive->ipi_source;
1818 XiveENDSource *end_xsrc = &xive->end_source;
1819 Error *local_err = NULL;
1821 pxc->parent_realize(dev, &local_err);
1822 if (local_err) {
1823 error_propagate(errp, local_err);
1824 return;
1827 assert(xive->chip);
1830 * The XiveSource and XiveENDSource objects are realized with the
1831 * maximum allowed HW configuration. The ESB MMIO regions will be
1832 * resized dynamically when the controller is configured by the FW
1833 * to limit accesses to resources not provisioned.
1835 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1836 &error_fatal);
1837 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1838 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1839 return;
1842 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1843 &error_fatal);
1844 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1845 &error_abort);
1846 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1847 return;
1850 /* Default page size. Generally changed at runtime to 64k */
1851 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1853 /* XSCOM region, used for initial configuration of the BARs */
1854 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1855 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1857 /* Interrupt controller MMIO regions */
1858 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1859 PNV9_XIVE_IC_SIZE);
1861 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1862 xive, "xive-ic-reg", 1 << xive->ic_shift);
1863 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1864 &pnv_xive_ic_notify_ops,
1865 xive, "xive-ic-notify", 1 << xive->ic_shift);
1867 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1868 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1869 xive, "xive-ic-lsi", 2 << xive->ic_shift);
1871 /* Thread Interrupt Management Area (Indirect) */
1872 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1873 &xive_tm_indirect_ops,
1874 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1876 * Overall Virtualization Controller MMIO region containing the
1877 * IPI ESB pages and END ESB pages. The layout is defined by the
1878 * EDT "Domain table" and the accesses are dispatched using
1879 * address spaces for each.
1881 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1882 "xive-vc", PNV9_XIVE_VC_SIZE);
1884 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1885 PNV9_XIVE_VC_SIZE);
1886 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1887 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1888 PNV9_XIVE_VC_SIZE);
1889 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1892 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1893 * VC region. Their size is configured by the FW in the EDT table.
1895 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1896 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1898 /* Presenter Controller MMIO region (not modeled) */
1899 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1900 "xive-pc", PNV9_XIVE_PC_SIZE);
1902 /* Thread Interrupt Management Area (Direct) */
1903 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1904 xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1906 qemu_register_reset(pnv_xive_reset, dev);
1909 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1910 int xscom_offset)
1912 const char compat[] = "ibm,power9-xive-x";
1913 char *name;
1914 int offset;
1915 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1916 uint32_t reg[] = {
1917 cpu_to_be32(lpc_pcba),
1918 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1921 name = g_strdup_printf("xive@%x", lpc_pcba);
1922 offset = fdt_add_subnode(fdt, xscom_offset, name);
1923 _FDT(offset);
1924 g_free(name);
1926 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1927 _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1928 sizeof(compat))));
1929 return 0;
1932 static Property pnv_xive_properties[] = {
1933 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1934 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1935 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1936 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1937 /* The PnvChip id identifies the XIVE interrupt controller. */
1938 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1939 DEFINE_PROP_END_OF_LIST(),
1942 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1944 DeviceClass *dc = DEVICE_CLASS(klass);
1945 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1946 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1947 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1948 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1949 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1951 xdc->dt_xscom = pnv_xive_dt_xscom;
1953 dc->desc = "PowerNV XIVE Interrupt Controller";
1954 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1955 dc->realize = pnv_xive_realize;
1956 device_class_set_props(dc, pnv_xive_properties);
1958 xrc->get_eas = pnv_xive_get_eas;
1959 xrc->get_end = pnv_xive_get_end;
1960 xrc->write_end = pnv_xive_write_end;
1961 xrc->get_nvt = pnv_xive_get_nvt;
1962 xrc->write_nvt = pnv_xive_write_nvt;
1963 xrc->get_block_id = pnv_xive_get_block_id;
1965 xnc->notify = pnv_xive_notify;
1966 xpc->match_nvt = pnv_xive_match_nvt;
1969 static const TypeInfo pnv_xive_info = {
1970 .name = TYPE_PNV_XIVE,
1971 .parent = TYPE_XIVE_ROUTER,
1972 .instance_init = pnv_xive_init,
1973 .instance_size = sizeof(PnvXive),
1974 .class_init = pnv_xive_class_init,
1975 .class_size = sizeof(PnvXiveClass),
1976 .interfaces = (InterfaceInfo[]) {
1977 { TYPE_PNV_XSCOM_INTERFACE },
1982 static void pnv_xive_register_types(void)
1984 type_register_static(&pnv_xive_info);
1987 type_init(pnv_xive_register_types)