2 * QEMU PowerPC PowerNV (POWER9) PHB4 model
4 * Copyright (c) 2018-2020, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
9 #include "qemu/osdep.h"
11 #include "qapi/visitor.h"
12 #include "qapi/error.h"
13 #include "qemu-common.h"
14 #include "monitor/monitor.h"
15 #include "target/ppc/cpu.h"
16 #include "hw/pci-host/pnv_phb4_regs.h"
17 #include "hw/pci-host/pnv_phb4.h"
18 #include "hw/pci/pcie_host.h"
19 #include "hw/pci/pcie_port.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/qdev-properties.h"
25 #define phb_error(phb, fmt, ...) \
26 qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \
27 (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__)
30 * QEMU version of the GETFIELD/SETFIELD macros
32 * These are common with the PnvXive model.
34 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
36 return (word
& mask
) >> ctz64(mask
);
39 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
42 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
45 static PCIDevice
*pnv_phb4_find_cfg_dev(PnvPHB4
*phb
)
47 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
48 uint64_t addr
= phb
->regs
[PHB_CONFIG_ADDRESS
>> 3];
54 bus
= (addr
>> 52) & 0xff;
55 devfn
= (addr
>> 44) & 0xff;
57 /* We don't access the root complex this way */
58 if (bus
== 0 && devfn
== 0) {
61 return pci_find_device(pci
->bus
, bus
, devfn
);
65 * The CONFIG_DATA register expects little endian accesses, but as the
66 * region is big endian, we have to swap the value.
68 static void pnv_phb4_config_write(PnvPHB4
*phb
, unsigned off
,
69 unsigned size
, uint64_t val
)
71 uint32_t cfg_addr
, limit
;
74 pdev
= pnv_phb4_find_cfg_dev(phb
);
78 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
80 limit
= pci_config_size(pdev
);
81 if (limit
<= cfg_addr
) {
83 * conventional pci device can be behind pcie-to-pci bridge.
84 * 256 <= addr < 4K has no effects.
98 g_assert_not_reached();
100 pci_host_config_write_common(pdev
, cfg_addr
, limit
, val
, size
);
103 static uint64_t pnv_phb4_config_read(PnvPHB4
*phb
, unsigned off
,
106 uint32_t cfg_addr
, limit
;
110 pdev
= pnv_phb4_find_cfg_dev(phb
);
114 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
116 limit
= pci_config_size(pdev
);
117 if (limit
<= cfg_addr
) {
119 * conventional pci device can be behind pcie-to-pci bridge.
120 * 256 <= addr < 4K has no effects.
124 val
= pci_host_config_read_common(pdev
, cfg_addr
, limit
, size
);
133 g_assert_not_reached();
138 * Root complex register accesses are memory mapped.
140 static void pnv_phb4_rc_config_write(PnvPHB4
*phb
, unsigned off
,
141 unsigned size
, uint64_t val
)
143 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
147 phb_error(phb
, "rc_config_write invalid size %d\n", size
);
151 pdev
= pci_find_device(pci
->bus
, 0, 0);
154 pci_host_config_write_common(pdev
, off
, PHB_RC_CONFIG_SIZE
,
158 static uint64_t pnv_phb4_rc_config_read(PnvPHB4
*phb
, unsigned off
,
161 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
166 phb_error(phb
, "rc_config_read invalid size %d\n", size
);
170 pdev
= pci_find_device(pci
->bus
, 0, 0);
173 val
= pci_host_config_read_common(pdev
, off
, PHB_RC_CONFIG_SIZE
, 4);
177 static void pnv_phb4_check_mbt(PnvPHB4
*phb
, uint32_t index
)
179 uint64_t base
, start
, size
, mbe0
, mbe1
;
180 MemoryRegion
*parent
;
184 if (memory_region_is_mapped(&phb
->mr_mmio
[index
])) {
185 /* Should we destroy it in RCU friendly way... ? */
186 memory_region_del_subregion(phb
->mr_mmio
[index
].container
,
187 &phb
->mr_mmio
[index
]);
190 /* Get table entry */
191 mbe0
= phb
->ioda_MBT
[(index
<< 1)];
192 mbe1
= phb
->ioda_MBT
[(index
<< 1) + 1];
194 if (!(mbe0
& IODA3_MBT0_ENABLE
)) {
198 /* Grab geometry from registers */
199 base
= GETFIELD(IODA3_MBT0_BASE_ADDR
, mbe0
) << 12;
200 size
= GETFIELD(IODA3_MBT1_MASK
, mbe1
) << 12;
201 size
|= 0xff00000000000000ull
;
204 /* Calculate PCI side start address based on M32/M64 window type */
205 if (mbe0
& IODA3_MBT0_TYPE_M32
) {
206 start
= phb
->regs
[PHB_M32_START_ADDR
>> 3];
207 if ((start
+ size
) > 0x100000000ull
) {
208 phb_error(phb
, "M32 set beyond 4GB boundary !");
209 size
= 0x100000000 - start
;
212 start
= base
| (phb
->regs
[PHB_M64_UPPER_BITS
>> 3]);
215 /* TODO: Figure out how to implemet/decode AOMASK */
217 /* Check if it matches an enabled MMIO region in the PEC stack */
218 if (memory_region_is_mapped(&phb
->stack
->mmbar0
) &&
219 base
>= phb
->stack
->mmio0_base
&&
220 (base
+ size
) <= (phb
->stack
->mmio0_base
+ phb
->stack
->mmio0_size
)) {
221 parent
= &phb
->stack
->mmbar0
;
222 base
-= phb
->stack
->mmio0_base
;
223 } else if (memory_region_is_mapped(&phb
->stack
->mmbar1
) &&
224 base
>= phb
->stack
->mmio1_base
&&
225 (base
+ size
) <= (phb
->stack
->mmio1_base
+ phb
->stack
->mmio1_size
)) {
226 parent
= &phb
->stack
->mmbar1
;
227 base
-= phb
->stack
->mmio1_base
;
229 phb_error(phb
, "PHB MBAR %d out of parent bounds", index
);
233 /* Create alias (better name ?) */
234 snprintf(name
, sizeof(name
), "phb4-mbar%d", index
);
235 memory_region_init_alias(&phb
->mr_mmio
[index
], OBJECT(phb
), name
,
236 &phb
->pci_mmio
, start
, size
);
237 memory_region_add_subregion(parent
, base
, &phb
->mr_mmio
[index
]);
240 static void pnv_phb4_check_all_mbt(PnvPHB4
*phb
)
243 uint32_t num_windows
= phb
->big_phb
? PNV_PHB4_MAX_MMIO_WINDOWS
:
244 PNV_PHB4_MIN_MMIO_WINDOWS
;
246 for (i
= 0; i
< num_windows
; i
++) {
247 pnv_phb4_check_mbt(phb
, i
);
251 static uint64_t *pnv_phb4_ioda_access(PnvPHB4
*phb
,
252 unsigned *out_table
, unsigned *out_idx
)
254 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
255 unsigned int index
= GETFIELD(PHB_IODA_AD_TADR
, adreg
);
256 unsigned int table
= GETFIELD(PHB_IODA_AD_TSEL
, adreg
);
258 uint64_t *tptr
= NULL
;
262 tptr
= phb
->ioda_LIST
;
266 tptr
= phb
->ioda_MIST
;
267 mask
= phb
->big_phb
? PNV_PHB4_MAX_MIST
: (PNV_PHB4_MAX_MIST
>> 1);
271 mask
= phb
->big_phb
? 127 : 63;
274 mask
= phb
->big_phb
? 15 : 7;
276 case IODA3_TBL_PESTA
:
277 case IODA3_TBL_PESTB
:
278 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
282 tptr
= phb
->ioda_TVT
;
283 mask
= phb
->big_phb
? PNV_PHB4_MAX_TVEs
: (PNV_PHB4_MAX_TVEs
>> 1);
288 mask
= phb
->big_phb
? 1023 : 511;
291 tptr
= phb
->ioda_MBT
;
292 mask
= phb
->big_phb
? PNV_PHB4_MAX_MBEs
: (PNV_PHB4_MAX_MBEs
>> 1);
296 tptr
= phb
->ioda_MDT
;
297 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
301 tptr
= phb
->ioda_PEEV
;
302 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEEVs
: (PNV_PHB4_MAX_PEEVs
>> 1);
306 phb_error(phb
, "invalid IODA table %d", table
);
319 if (adreg
& PHB_IODA_AD_AUTOINC
) {
320 index
= (index
+ 1) & mask
;
321 adreg
= SETFIELD(PHB_IODA_AD_TADR
, adreg
, index
);
324 phb
->regs
[PHB_IODA_ADDR
>> 3] = adreg
;
328 static uint64_t pnv_phb4_ioda_read(PnvPHB4
*phb
)
333 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
335 /* Special PESTA case */
336 if (table
== IODA3_TBL_PESTA
) {
337 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 1)) << 63;
338 } else if (table
== IODA3_TBL_PESTB
) {
339 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 2)) << 62;
341 /* Return 0 on unsupported tables, not ff's */
347 static void pnv_phb4_ioda_write(PnvPHB4
*phb
, uint64_t val
)
352 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
354 /* Special PESTA case */
355 if (table
== IODA3_TBL_PESTA
) {
356 phb
->ioda_PEST_AB
[idx
] &= ~1;
357 phb
->ioda_PEST_AB
[idx
] |= (val
>> 63) & 1;
358 } else if (table
== IODA3_TBL_PESTB
) {
359 phb
->ioda_PEST_AB
[idx
] &= ~2;
360 phb
->ioda_PEST_AB
[idx
] |= (val
>> 62) & 2;
365 /* Handle side effects */
369 case IODA3_TBL_MIST
: {
370 /* Special mask for MIST partial write */
371 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
372 uint32_t mmask
= GETFIELD(PHB_IODA_AD_MIST_PWV
, adreg
);
378 v
&= 0x0000ffffffffffffull
;
379 v
|= 0xcfff000000000000ull
& val
;
382 v
&= 0xffff0000ffffffffull
;
383 v
|= 0x0000cfff00000000ull
& val
;
386 v
&= 0xffffffff0000ffffull
;
387 v
|= 0x00000000cfff0000ull
& val
;
390 v
&= 0xffffffffffff0000ull
;
391 v
|= 0x000000000000cfffull
& val
;
399 /* Copy accross the valid bit to the other half */
400 phb
->ioda_MBT
[idx
^ 1] &= 0x7fffffffffffffffull
;
401 phb
->ioda_MBT
[idx
^ 1] |= 0x8000000000000000ull
& val
;
403 /* Update mappings */
404 pnv_phb4_check_mbt(phb
, idx
>> 1);
411 static void pnv_phb4_rtc_invalidate(PnvPHB4
*phb
, uint64_t val
)
415 /* Always invalidate all for now ... */
416 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
417 ds
->pe_num
= PHB_INVALID_PE
;
421 static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace
*ds
)
423 uint64_t cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
425 if (cfg
& PHB_PHB4C_32BIT_MSI_EN
) {
426 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
427 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
428 0xffff0000, &ds
->msi32_mr
);
431 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
432 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
437 if (cfg
& PHB_PHB4C_64BIT_MSI_EN
) {
438 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
439 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
440 (1ull << 60), &ds
->msi64_mr
);
443 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
444 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
450 static void pnv_phb4_update_all_msi_regions(PnvPHB4
*phb
)
454 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
455 pnv_phb4_update_msi_regions(ds
);
459 static void pnv_phb4_update_xsrc(PnvPHB4
*phb
)
461 int shift
, flags
, i
, lsi_base
;
462 XiveSource
*xsrc
= &phb
->xsrc
;
464 /* The XIVE source characteristics can be set at run time */
465 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_PGSZ_64K
) {
466 shift
= XIVE_ESB_64K
;
470 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_STORE_EOI
) {
471 flags
= XIVE_SRC_STORE_EOI
;
476 phb
->xsrc
.esb_shift
= shift
;
477 phb
->xsrc
.esb_flags
= flags
;
479 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
482 /* TODO: handle reset values of PHB_LSI_SRC_ID */
487 /* TODO: need a xive_source_irq_reset_lsi() */
488 bitmap_zero(xsrc
->lsi_map
, xsrc
->nr_irqs
);
490 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
491 bool msi
= (i
< lsi_base
|| i
>= (lsi_base
+ 8));
493 xive_source_irq_set_lsi(xsrc
, i
);
498 static void pnv_phb4_reg_write(void *opaque
, hwaddr off
, uint64_t val
,
501 PnvPHB4
*phb
= PNV_PHB4(opaque
);
504 /* Special case outbound configuration data */
505 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
506 pnv_phb4_config_write(phb
, off
& 0x3, size
, val
);
510 /* Special case RC configuration space */
511 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
512 pnv_phb4_rc_config_write(phb
, off
& 0x7ff, size
, val
);
516 /* Other registers are 64-bit only */
517 if (size
!= 8 || off
& 0x7) {
518 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
525 case PHB_LSI_SOURCE_ID
:
526 val
&= PHB_LSI_SRC_ID
;
528 case PHB_M64_UPPER_BITS
:
529 val
&= 0xff00000000000000ull
;
533 /* Clear top 3 bits which HW does to indicate successful queuing */
534 val
&= ~(PHB_TCE_KILL_ALL
| PHB_TCE_KILL_PE
| PHB_TCE_KILL_ONE
);
538 * This is enough logic to make SW happy but we aren't
539 * actually quiescing the DMAs
541 if (val
& PHB_Q_DMA_R_AUTORESET
) {
544 val
&= PHB_Q_DMA_R_QUIESCE_DMA
;
548 case PHB_LEM_FIR_AND_MASK
:
549 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] &= val
;
551 case PHB_LEM_FIR_OR_MASK
:
552 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] |= val
;
554 case PHB_LEM_ERROR_AND_MASK
:
555 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] &= val
;
557 case PHB_LEM_ERROR_OR_MASK
:
558 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] |= val
;
563 /* TODO: More regs ..., maybe create a table with masks... */
565 /* Read only registers */
566 case PHB_CPU_LOADSTORE_STATUS
:
567 case PHB_ETU_ERR_SUMMARY
:
568 case PHB_PHB4_GEN_CAP
:
569 case PHB_PHB4_TCE_CAP
:
570 case PHB_PHB4_IRQ_CAP
:
571 case PHB_PHB4_EEH_CAP
:
575 /* Record whether it changed */
576 changed
= phb
->regs
[off
>> 3] != val
;
578 /* Store in register cache first */
579 phb
->regs
[off
>> 3] = val
;
581 /* Handle side effects */
583 case PHB_PHB4_CONFIG
:
585 pnv_phb4_update_all_msi_regions(phb
);
588 case PHB_M32_START_ADDR
:
589 case PHB_M64_UPPER_BITS
:
591 pnv_phb4_check_all_mbt(phb
);
595 /* IODA table accesses */
597 pnv_phb4_ioda_write(phb
, val
);
600 /* RTC invalidation */
601 case PHB_RTC_INVALIDATE
:
602 pnv_phb4_rtc_invalidate(phb
, val
);
605 /* PHB Control (Affects XIVE source) */
607 case PHB_LSI_SOURCE_ID
:
608 pnv_phb4_update_xsrc(phb
);
611 /* Silent simple writes */
613 case PHB_CONFIG_ADDRESS
:
616 case PHB_TCE_SPEC_CTL
:
620 case PHB_LEM_FIR_ACCUM
:
621 case PHB_LEM_ERROR_MASK
:
622 case PHB_LEM_ACTION0
:
623 case PHB_LEM_ACTION1
:
624 case PHB_TCE_TAG_ENABLE
:
625 case PHB_INT_NOTIFY_ADDR
:
626 case PHB_INT_NOTIFY_INDEX
:
630 /* Noise on anything else */
632 qemu_log_mask(LOG_UNIMP
, "phb4: reg_write 0x%"PRIx64
"=%"PRIx64
"\n",
637 static uint64_t pnv_phb4_reg_read(void *opaque
, hwaddr off
, unsigned size
)
639 PnvPHB4
*phb
= PNV_PHB4(opaque
);
642 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
643 return pnv_phb4_config_read(phb
, off
& 0x3, size
);
646 /* Special case RC configuration space */
647 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
648 return pnv_phb4_rc_config_read(phb
, off
& 0x7ff, size
);
651 /* Other registers are 64-bit only */
652 if (size
!= 8 || off
& 0x7) {
653 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
658 /* Default read from cache */
659 val
= phb
->regs
[off
>> 3];
666 case PHB_PHB4_GEN_CAP
:
667 return 0xe4b8000000000000ull
;
668 case PHB_PHB4_TCE_CAP
:
669 return phb
->big_phb
? 0x4008440000000400ull
: 0x2008440000000200ull
;
670 case PHB_PHB4_IRQ_CAP
:
671 return phb
->big_phb
? 0x0800000000001000ull
: 0x0800000000000800ull
;
672 case PHB_PHB4_EEH_CAP
:
673 return phb
->big_phb
? 0x2000000000000000ull
: 0x1000000000000000ull
;
675 /* IODA table accesses */
677 return pnv_phb4_ioda_read(phb
);
679 /* Link training always appears trained */
680 case PHB_PCIE_DLP_TRAIN_CTL
:
681 /* TODO: Do something sensible with speed ? */
682 return PHB_PCIE_DLP_INBAND_PRESENCE
| PHB_PCIE_DLP_TL_LINKACT
;
684 /* DMA read sync: make it look like it's complete */
686 return PHB_DMARD_SYNC_COMPLETE
;
688 /* Silent simple reads */
689 case PHB_LSI_SOURCE_ID
:
690 case PHB_CPU_LOADSTORE_STATUS
:
692 case PHB_PHB4_CONFIG
:
693 case PHB_M32_START_ADDR
:
694 case PHB_CONFIG_ADDRESS
:
696 case PHB_RTC_INVALIDATE
:
698 case PHB_TCE_SPEC_CTL
:
702 case PHB_M64_UPPER_BITS
:
704 case PHB_LEM_FIR_ACCUM
:
705 case PHB_LEM_ERROR_MASK
:
706 case PHB_LEM_ACTION0
:
707 case PHB_LEM_ACTION1
:
708 case PHB_TCE_TAG_ENABLE
:
709 case PHB_INT_NOTIFY_ADDR
:
710 case PHB_INT_NOTIFY_INDEX
:
712 case PHB_ETU_ERR_SUMMARY
:
715 /* Noise on anything else */
717 qemu_log_mask(LOG_UNIMP
, "phb4: reg_read 0x%"PRIx64
"=%"PRIx64
"\n",
723 static const MemoryRegionOps pnv_phb4_reg_ops
= {
724 .read
= pnv_phb4_reg_read
,
725 .write
= pnv_phb4_reg_write
,
726 .valid
.min_access_size
= 1,
727 .valid
.max_access_size
= 8,
728 .impl
.min_access_size
= 1,
729 .impl
.max_access_size
= 8,
730 .endianness
= DEVICE_BIG_ENDIAN
,
733 static uint64_t pnv_phb4_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
735 PnvPHB4
*phb
= PNV_PHB4(opaque
);
736 uint32_t reg
= addr
>> 3;
741 case PHB_SCOM_HV_IND_ADDR
:
742 return phb
->scom_hv_ind_addr_reg
;
744 case PHB_SCOM_HV_IND_DATA
:
745 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
746 phb_error(phb
, "Invalid indirect address");
749 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
750 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
751 val
= pnv_phb4_reg_read(phb
, offset
, size
);
752 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
755 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
756 phb
->scom_hv_ind_addr_reg
,
760 case PHB_SCOM_ETU_LEM_FIR
:
761 case PHB_SCOM_ETU_LEM_FIR_AND
:
762 case PHB_SCOM_ETU_LEM_FIR_OR
:
763 case PHB_SCOM_ETU_LEM_FIR_MSK
:
764 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
765 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
766 case PHB_SCOM_ETU_LEM_ACT0
:
767 case PHB_SCOM_ETU_LEM_ACT1
:
768 case PHB_SCOM_ETU_LEM_WOF
:
769 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
770 return pnv_phb4_reg_read(phb
, offset
, size
);
771 case PHB_SCOM_ETU_PMON_CONFIG
:
772 case PHB_SCOM_ETU_PMON_CTR0
:
773 case PHB_SCOM_ETU_PMON_CTR1
:
774 case PHB_SCOM_ETU_PMON_CTR2
:
775 case PHB_SCOM_ETU_PMON_CTR3
:
776 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
777 return pnv_phb4_reg_read(phb
, offset
, size
);
780 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_read 0x%"HWADDR_PRIx
"\n", addr
);
785 static void pnv_phb4_xscom_write(void *opaque
, hwaddr addr
,
786 uint64_t val
, unsigned size
)
788 PnvPHB4
*phb
= PNV_PHB4(opaque
);
789 uint32_t reg
= addr
>> 3;
793 case PHB_SCOM_HV_IND_ADDR
:
794 phb
->scom_hv_ind_addr_reg
= val
& 0xe000000000001fff;
796 case PHB_SCOM_HV_IND_DATA
:
797 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
798 phb_error(phb
, "Invalid indirect address");
801 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
802 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
803 pnv_phb4_reg_write(phb
, offset
, val
, size
);
804 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
807 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
808 phb
->scom_hv_ind_addr_reg
,
812 case PHB_SCOM_ETU_LEM_FIR
:
813 case PHB_SCOM_ETU_LEM_FIR_AND
:
814 case PHB_SCOM_ETU_LEM_FIR_OR
:
815 case PHB_SCOM_ETU_LEM_FIR_MSK
:
816 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
817 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
818 case PHB_SCOM_ETU_LEM_ACT0
:
819 case PHB_SCOM_ETU_LEM_ACT1
:
820 case PHB_SCOM_ETU_LEM_WOF
:
821 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
822 pnv_phb4_reg_write(phb
, offset
, val
, size
);
824 case PHB_SCOM_ETU_PMON_CONFIG
:
825 case PHB_SCOM_ETU_PMON_CTR0
:
826 case PHB_SCOM_ETU_PMON_CTR1
:
827 case PHB_SCOM_ETU_PMON_CTR2
:
828 case PHB_SCOM_ETU_PMON_CTR3
:
829 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
830 pnv_phb4_reg_write(phb
, offset
, val
, size
);
833 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_write 0x%"HWADDR_PRIx
834 "=%"PRIx64
"\n", addr
, val
);
838 const MemoryRegionOps pnv_phb4_xscom_ops
= {
839 .read
= pnv_phb4_xscom_read
,
840 .write
= pnv_phb4_xscom_write
,
841 .valid
.min_access_size
= 8,
842 .valid
.max_access_size
= 8,
843 .impl
.min_access_size
= 8,
844 .impl
.max_access_size
= 8,
845 .endianness
= DEVICE_BIG_ENDIAN
,
848 static int pnv_phb4_map_irq(PCIDevice
*pci_dev
, int irq_num
)
850 /* Check that out properly ... */
854 static void pnv_phb4_set_irq(void *opaque
, int irq_num
, int level
)
856 PnvPHB4
*phb
= PNV_PHB4(opaque
);
861 phb_error(phb
, "IRQ %x is not an LSI", irq_num
);
863 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
865 qemu_set_irq(phb
->qirqs
[lsi_base
+ irq_num
], level
);
868 static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace
*ds
)
875 /* Already resolved ? */
876 if (ds
->pe_num
!= PHB_INVALID_PE
) {
880 /* We need to lookup the RTT */
881 rtt
= ds
->phb
->regs
[PHB_RTT_BAR
>> 3];
882 if (!(rtt
& PHB_RTT_BAR_ENABLE
)) {
883 phb_error(ds
->phb
, "DMA with RTT BAR disabled !");
884 /* Set error bits ? fence ? ... */
889 bus_num
= pci_bus_num(ds
->bus
);
890 addr
= rtt
& PHB_RTT_BASE_ADDRESS_MASK
;
891 addr
+= 2 * ((bus_num
<< 8) | ds
->devfn
);
892 if (dma_memory_read(&address_space_memory
, addr
, &rte
, sizeof(rte
))) {
893 phb_error(ds
->phb
, "Failed to read RTT entry at 0x%"PRIx64
, addr
);
894 /* Set error bits ? fence ? ... */
897 rte
= be16_to_cpu(rte
);
899 /* Fail upon reading of invalid PE# */
900 num_PEs
= ds
->phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
901 if (rte
>= num_PEs
) {
902 phb_error(ds
->phb
, "RTE for RID 0x%x invalid (%04x", ds
->devfn
, rte
);
909 static void pnv_phb4_translate_tve(PnvPhb4DMASpace
*ds
, hwaddr addr
,
910 bool is_write
, uint64_t tve
,
913 uint64_t tta
= GETFIELD(IODA3_TVT_TABLE_ADDR
, tve
);
914 int32_t lev
= GETFIELD(IODA3_TVT_NUM_LEVELS
, tve
);
915 uint32_t tts
= GETFIELD(IODA3_TVT_TCE_TABLE_SIZE
, tve
);
916 uint32_t tps
= GETFIELD(IODA3_TVT_IO_PSIZE
, tve
);
920 phb_error(ds
->phb
, "Invalid #levels in TVE %d", lev
);
926 phb_error(ds
->phb
, "Access to invalid TVE");
930 /* IO Page Size of 0 means untranslated, else use TCEs */
932 /* TODO: Handle boundaries */
934 /* Use 4k pages like q35 ... for now */
935 tlb
->iova
= addr
& 0xfffffffffffff000ull
;
936 tlb
->translated_addr
= addr
& 0x0003fffffffff000ull
;
937 tlb
->addr_mask
= 0xfffull
;
938 tlb
->perm
= IOMMU_RW
;
940 uint32_t tce_shift
, tbl_shift
, sh
;
941 uint64_t base
, taddr
, tce
, tce_mask
;
943 /* Address bits per bottom level TCE entry */
944 tce_shift
= tps
+ 11;
946 /* Address bits per table level */
949 /* Top level table base address */
952 /* Total shift to first level */
953 sh
= tbl_shift
* lev
+ tce_shift
;
955 /* TODO: Limit to support IO page sizes */
957 /* TODO: Multi-level untested */
958 while ((lev
--) >= 0) {
959 /* Grab the TCE address */
960 taddr
= base
| (((addr
>> sh
) & ((1ul << tbl_shift
) - 1)) << 3);
961 if (dma_memory_read(&address_space_memory
, taddr
, &tce
,
963 phb_error(ds
->phb
, "Failed to read TCE at 0x%"PRIx64
, taddr
);
966 tce
= be64_to_cpu(tce
);
968 /* Check permission for indirect TCE */
969 if ((lev
>= 0) && !(tce
& 3)) {
970 phb_error(ds
->phb
, "Invalid indirect TCE at 0x%"PRIx64
, taddr
);
971 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
972 is_write
? 'W' : 'R', tve
);
973 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
978 base
= tce
& ~0xfffull
;
981 /* We exit the loop with TCE being the final TCE */
982 tce_mask
= ~((1ull << tce_shift
) - 1);
983 tlb
->iova
= addr
& tce_mask
;
984 tlb
->translated_addr
= tce
& tce_mask
;
985 tlb
->addr_mask
= ~tce_mask
;
987 if ((is_write
& !(tce
& 2)) || ((!is_write
) && !(tce
& 1))) {
988 phb_error(ds
->phb
, "TCE access fault at 0x%"PRIx64
, taddr
);
989 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
990 is_write
? 'W' : 'R', tve
);
991 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
997 static IOMMUTLBEntry
pnv_phb4_translate_iommu(IOMMUMemoryRegion
*iommu
,
999 IOMMUAccessFlags flag
,
1002 PnvPhb4DMASpace
*ds
= container_of(iommu
, PnvPhb4DMASpace
, dma_mr
);
1005 IOMMUTLBEntry ret
= {
1006 .target_as
= &address_space_memory
,
1008 .translated_addr
= 0,
1009 .addr_mask
= ~(hwaddr
)0,
1014 if (!pnv_phb4_resolve_pe(ds
)) {
1015 phb_error(ds
->phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1016 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1020 /* Check top bits */
1021 switch (addr
>> 60) {
1023 /* DMA or 32-bit MSI ? */
1024 cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
1025 if ((cfg
& PHB_PHB4C_32BIT_MSI_EN
) &&
1026 ((addr
& 0xffffffffffff0000ull
) == 0xffff0000ull
)) {
1027 phb_error(ds
->phb
, "xlate on 32-bit MSI region");
1030 /* Choose TVE XXX Use PHB4 Control Register */
1031 tve_sel
= (addr
>> 59) & 1;
1032 tve
= ds
->phb
->ioda_TVT
[ds
->pe_num
* 2 + tve_sel
];
1033 pnv_phb4_translate_tve(ds
, addr
, flag
& IOMMU_WO
, tve
, &ret
);
1036 phb_error(ds
->phb
, "xlate on 64-bit MSI region");
1039 phb_error(ds
->phb
, "xlate on unsupported address 0x%"PRIx64
, addr
);
1044 #define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region"
1045 #define PNV_PHB4_IOMMU_MEMORY_REGION(obj) \
1046 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_PNV_PHB4_IOMMU_MEMORY_REGION)
1048 static void pnv_phb4_iommu_memory_region_class_init(ObjectClass
*klass
,
1051 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1053 imrc
->translate
= pnv_phb4_translate_iommu
;
1056 static const TypeInfo pnv_phb4_iommu_memory_region_info
= {
1057 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1058 .name
= TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1059 .class_init
= pnv_phb4_iommu_memory_region_class_init
,
1063 * MSI/MSIX memory region implementation.
1064 * The handler handles both MSI and MSIX.
1066 static void pnv_phb4_msi_write(void *opaque
, hwaddr addr
,
1067 uint64_t data
, unsigned size
)
1069 PnvPhb4DMASpace
*ds
= opaque
;
1070 PnvPHB4
*phb
= ds
->phb
;
1072 uint32_t src
= ((addr
>> 4) & 0xffff) | (data
& 0x1f);
1075 if (!pnv_phb4_resolve_pe(ds
)) {
1076 phb_error(phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1077 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1081 /* TODO: Check it doesn't collide with LSIs */
1082 if (src
>= phb
->xsrc
.nr_irqs
) {
1083 phb_error(phb
, "MSI %d out of bounds", src
);
1087 /* TODO: check PE/MSI assignement */
1089 qemu_irq_pulse(phb
->qirqs
[src
]);
1092 /* There is no .read as the read result is undefined by PCI spec */
1093 static uint64_t pnv_phb4_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
1095 PnvPhb4DMASpace
*ds
= opaque
;
1097 phb_error(ds
->phb
, "Invalid MSI read @ 0x%" HWADDR_PRIx
, addr
);
1101 static const MemoryRegionOps pnv_phb4_msi_ops
= {
1102 .read
= pnv_phb4_msi_read
,
1103 .write
= pnv_phb4_msi_write
,
1104 .endianness
= DEVICE_LITTLE_ENDIAN
1107 static PnvPhb4DMASpace
*pnv_phb4_dma_find(PnvPHB4
*phb
, PCIBus
*bus
, int devfn
)
1109 PnvPhb4DMASpace
*ds
;
1111 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
1112 if (ds
->bus
== bus
&& ds
->devfn
== devfn
) {
1119 static AddressSpace
*pnv_phb4_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
1121 PnvPHB4
*phb
= opaque
;
1122 PnvPhb4DMASpace
*ds
;
1125 ds
= pnv_phb4_dma_find(phb
, bus
, devfn
);
1128 ds
= g_malloc0(sizeof(PnvPhb4DMASpace
));
1131 ds
->pe_num
= PHB_INVALID_PE
;
1133 snprintf(name
, sizeof(name
), "phb4-%d.%d-iommu", phb
->chip_id
,
1135 memory_region_init_iommu(&ds
->dma_mr
, sizeof(ds
->dma_mr
),
1136 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1137 OBJECT(phb
), name
, UINT64_MAX
);
1138 address_space_init(&ds
->dma_as
, MEMORY_REGION(&ds
->dma_mr
),
1140 memory_region_init_io(&ds
->msi32_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1141 ds
, "msi32", 0x10000);
1142 memory_region_init_io(&ds
->msi64_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1143 ds
, "msi64", 0x100000);
1144 pnv_phb4_update_msi_regions(ds
);
1146 QLIST_INSERT_HEAD(&phb
->dma_spaces
, ds
, list
);
1151 static void pnv_phb4_instance_init(Object
*obj
)
1153 PnvPHB4
*phb
= PNV_PHB4(obj
);
1155 QLIST_INIT(&phb
->dma_spaces
);
1157 /* XIVE interrupt source object */
1158 object_initialize_child(obj
, "source", &phb
->xsrc
, TYPE_XIVE_SOURCE
);
1161 object_initialize_child(obj
, "root", &phb
->root
, TYPE_PNV_PHB4_ROOT_PORT
);
1163 qdev_prop_set_int32(DEVICE(&phb
->root
), "addr", PCI_DEVFN(0, 0));
1164 qdev_prop_set_bit(DEVICE(&phb
->root
), "multifunction", false);
1167 static void pnv_phb4_realize(DeviceState
*dev
, Error
**errp
)
1169 PnvPHB4
*phb
= PNV_PHB4(dev
);
1170 PCIHostState
*pci
= PCI_HOST_BRIDGE(dev
);
1171 XiveSource
*xsrc
= &phb
->xsrc
;
1172 Error
*local_err
= NULL
;
1178 /* Set the "big_phb" flag */
1179 phb
->big_phb
= phb
->phb_id
== 0 || phb
->phb_id
== 3;
1181 /* Controller Registers */
1182 snprintf(name
, sizeof(name
), "phb4-%d.%d-regs", phb
->chip_id
,
1184 memory_region_init_io(&phb
->mr_regs
, OBJECT(phb
), &pnv_phb4_reg_ops
, phb
,
1188 * PHB4 doesn't support IO space. However, qemu gets very upset if
1189 * we don't have an IO region to anchor IO BARs onto so we just
1190 * initialize one which we never hook up to anything
1193 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-io", phb
->chip_id
,
1195 memory_region_init(&phb
->pci_io
, OBJECT(phb
), name
, 0x10000);
1197 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-mmio", phb
->chip_id
,
1199 memory_region_init(&phb
->pci_mmio
, OBJECT(phb
), name
,
1200 PCI_MMIO_TOTAL_SIZE
);
1202 pci
->bus
= pci_register_root_bus(dev
, "root-bus",
1203 pnv_phb4_set_irq
, pnv_phb4_map_irq
, phb
,
1204 &phb
->pci_mmio
, &phb
->pci_io
,
1205 0, 4, TYPE_PNV_PHB4_ROOT_BUS
);
1206 pci_setup_iommu(pci
->bus
, pnv_phb4_dma_iommu
, phb
);
1208 /* Add a single Root port */
1209 qdev_prop_set_uint8(DEVICE(&phb
->root
), "chassis", phb
->chip_id
);
1210 qdev_prop_set_uint16(DEVICE(&phb
->root
), "slot", phb
->phb_id
);
1211 qdev_realize(DEVICE(&phb
->root
), BUS(pci
->bus
), &error_fatal
);
1213 /* Setup XIVE Source */
1215 nr_irqs
= PNV_PHB4_MAX_INTs
;
1217 nr_irqs
= PNV_PHB4_MAX_INTs
>> 1;
1219 object_property_set_int(OBJECT(xsrc
), nr_irqs
, "nr-irqs", &error_fatal
);
1220 object_property_set_link(OBJECT(xsrc
), OBJECT(phb
), "xive", &error_fatal
);
1221 qdev_realize(DEVICE(xsrc
), NULL
, &local_err
);
1223 error_propagate(errp
, local_err
);
1227 pnv_phb4_update_xsrc(phb
);
1229 phb
->qirqs
= qemu_allocate_irqs(xive_source_set_irq
, xsrc
, xsrc
->nr_irqs
);
1232 static void pnv_phb4_reset(DeviceState
*dev
)
1234 PnvPHB4
*phb
= PNV_PHB4(dev
);
1235 PCIDevice
*root_dev
= PCI_DEVICE(&phb
->root
);
1238 * Configure PCI device id at reset using a property.
1240 pci_config_set_vendor_id(root_dev
->config
, PCI_VENDOR_ID_IBM
);
1241 pci_config_set_device_id(root_dev
->config
, phb
->device_id
);
1244 static const char *pnv_phb4_root_bus_path(PCIHostState
*host_bridge
,
1247 PnvPHB4
*phb
= PNV_PHB4(host_bridge
);
1249 snprintf(phb
->bus_path
, sizeof(phb
->bus_path
), "00%02x:%02x",
1250 phb
->chip_id
, phb
->phb_id
);
1251 return phb
->bus_path
;
1254 static void pnv_phb4_xive_notify(XiveNotifier
*xf
, uint32_t srcno
)
1256 PnvPHB4
*phb
= PNV_PHB4(xf
);
1257 uint64_t notif_port
= phb
->regs
[PHB_INT_NOTIFY_ADDR
>> 3];
1258 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1259 uint64_t data
= XIVE_TRIGGER_PQ
| offset
| srcno
;
1262 address_space_stq_be(&address_space_memory
, notif_port
, data
,
1263 MEMTXATTRS_UNSPECIFIED
, &result
);
1264 if (result
!= MEMTX_OK
) {
1265 phb_error(phb
, "trigger failed @%"HWADDR_PRIx
"\n", notif_port
);
1270 static Property pnv_phb4_properties
[] = {
1271 DEFINE_PROP_UINT32("index", PnvPHB4
, phb_id
, 0),
1272 DEFINE_PROP_UINT32("chip-id", PnvPHB4
, chip_id
, 0),
1273 DEFINE_PROP_UINT64("version", PnvPHB4
, version
, 0),
1274 DEFINE_PROP_UINT16("device-id", PnvPHB4
, device_id
, 0),
1275 DEFINE_PROP_LINK("stack", PnvPHB4
, stack
, TYPE_PNV_PHB4_PEC_STACK
,
1277 DEFINE_PROP_END_OF_LIST(),
1280 static void pnv_phb4_class_init(ObjectClass
*klass
, void *data
)
1282 PCIHostBridgeClass
*hc
= PCI_HOST_BRIDGE_CLASS(klass
);
1283 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1284 XiveNotifierClass
*xfc
= XIVE_NOTIFIER_CLASS(klass
);
1286 hc
->root_bus_path
= pnv_phb4_root_bus_path
;
1287 dc
->realize
= pnv_phb4_realize
;
1288 device_class_set_props(dc
, pnv_phb4_properties
);
1289 set_bit(DEVICE_CATEGORY_BRIDGE
, dc
->categories
);
1290 dc
->user_creatable
= false;
1291 dc
->reset
= pnv_phb4_reset
;
1293 xfc
->notify
= pnv_phb4_xive_notify
;
1296 static const TypeInfo pnv_phb4_type_info
= {
1297 .name
= TYPE_PNV_PHB4
,
1298 .parent
= TYPE_PCIE_HOST_BRIDGE
,
1299 .instance_init
= pnv_phb4_instance_init
,
1300 .instance_size
= sizeof(PnvPHB4
),
1301 .class_init
= pnv_phb4_class_init
,
1302 .interfaces
= (InterfaceInfo
[]) {
1303 { TYPE_XIVE_NOTIFIER
},
1308 static void pnv_phb4_root_bus_class_init(ObjectClass
*klass
, void *data
)
1310 BusClass
*k
= BUS_CLASS(klass
);
1313 * PHB4 has only a single root complex. Enforce the limit on the
1319 static const TypeInfo pnv_phb4_root_bus_info
= {
1320 .name
= TYPE_PNV_PHB4_ROOT_BUS
,
1321 .parent
= TYPE_PCIE_BUS
,
1322 .class_init
= pnv_phb4_root_bus_class_init
,
1323 .interfaces
= (InterfaceInfo
[]) {
1324 { INTERFACE_PCIE_DEVICE
},
1329 static void pnv_phb4_root_port_reset(DeviceState
*dev
)
1331 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_GET_CLASS(dev
);
1332 PCIDevice
*d
= PCI_DEVICE(dev
);
1333 uint8_t *conf
= d
->config
;
1335 rpc
->parent_reset(dev
);
1337 pci_byte_test_and_set_mask(conf
+ PCI_IO_BASE
,
1338 PCI_IO_RANGE_MASK
& 0xff);
1339 pci_byte_test_and_clear_mask(conf
+ PCI_IO_LIMIT
,
1340 PCI_IO_RANGE_MASK
& 0xff);
1341 pci_set_word(conf
+ PCI_MEMORY_BASE
, 0);
1342 pci_set_word(conf
+ PCI_MEMORY_LIMIT
, 0xfff0);
1343 pci_set_word(conf
+ PCI_PREF_MEMORY_BASE
, 0x1);
1344 pci_set_word(conf
+ PCI_PREF_MEMORY_LIMIT
, 0xfff1);
1345 pci_set_long(conf
+ PCI_PREF_BASE_UPPER32
, 0x1); /* Hack */
1346 pci_set_long(conf
+ PCI_PREF_LIMIT_UPPER32
, 0xffffffff);
1349 static void pnv_phb4_root_port_realize(DeviceState
*dev
, Error
**errp
)
1351 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_GET_CLASS(dev
);
1352 Error
*local_err
= NULL
;
1354 rpc
->parent_realize(dev
, &local_err
);
1356 error_propagate(errp
, local_err
);
1361 static void pnv_phb4_root_port_class_init(ObjectClass
*klass
, void *data
)
1363 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1364 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1365 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_CLASS(klass
);
1367 dc
->desc
= "IBM PHB4 PCIE Root Port";
1368 dc
->user_creatable
= false;
1370 device_class_set_parent_realize(dc
, pnv_phb4_root_port_realize
,
1371 &rpc
->parent_realize
);
1372 device_class_set_parent_reset(dc
, pnv_phb4_root_port_reset
,
1373 &rpc
->parent_reset
);
1375 k
->vendor_id
= PCI_VENDOR_ID_IBM
;
1376 k
->device_id
= PNV_PHB4_DEVICE_ID
;
1379 rpc
->exp_offset
= 0x48;
1380 rpc
->aer_offset
= 0x100;
1382 dc
->reset
= &pnv_phb4_root_port_reset
;
1385 static const TypeInfo pnv_phb4_root_port_info
= {
1386 .name
= TYPE_PNV_PHB4_ROOT_PORT
,
1387 .parent
= TYPE_PCIE_ROOT_PORT
,
1388 .instance_size
= sizeof(PnvPHB4RootPort
),
1389 .class_init
= pnv_phb4_root_port_class_init
,
1392 static void pnv_phb4_register_types(void)
1394 type_register_static(&pnv_phb4_root_bus_info
);
1395 type_register_static(&pnv_phb4_root_port_info
);
1396 type_register_static(&pnv_phb4_type_info
);
1397 type_register_static(&pnv_phb4_iommu_memory_region_info
);
1400 type_init(pnv_phb4_register_types
);
1402 void pnv_phb4_update_regions(PnvPhb4PecStack
*stack
)
1404 PnvPHB4
*phb
= &stack
->phb
;
1406 /* Unmap first always */
1407 if (memory_region_is_mapped(&phb
->mr_regs
)) {
1408 memory_region_del_subregion(&stack
->phbbar
, &phb
->mr_regs
);
1410 if (memory_region_is_mapped(&phb
->xsrc
.esb_mmio
)) {
1411 memory_region_del_subregion(&stack
->intbar
, &phb
->xsrc
.esb_mmio
);
1414 /* Map registers if enabled */
1415 if (memory_region_is_mapped(&stack
->phbbar
)) {
1416 memory_region_add_subregion(&stack
->phbbar
, 0, &phb
->mr_regs
);
1419 /* Map ESB if enabled */
1420 if (memory_region_is_mapped(&stack
->intbar
)) {
1421 memory_region_add_subregion(&stack
->intbar
, 0, &phb
->xsrc
.esb_mmio
);
1424 /* Check/update m32 */
1425 pnv_phb4_check_all_mbt(phb
);
1428 void pnv_phb4_pic_print_info(PnvPHB4
*phb
, Monitor
*mon
)
1430 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1432 monitor_printf(mon
, "PHB4[%x:%x] Source %08x .. %08x\n",
1433 phb
->chip_id
, phb
->phb_id
,
1434 offset
, offset
+ phb
->xsrc
.nr_irqs
- 1);
1435 xive_source_pic_print_info(&phb
->xsrc
, 0, mon
);