2 * QEMU PowerPC PowerNV (POWER9) PHB4 model
4 * Copyright (c) 2018-2020, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
9 #include "qemu/osdep.h"
11 #include "qapi/visitor.h"
12 #include "qapi/error.h"
13 #include "qemu-common.h"
14 #include "monitor/monitor.h"
15 #include "target/ppc/cpu.h"
16 #include "hw/pci-host/pnv_phb4_regs.h"
17 #include "hw/pci-host/pnv_phb4.h"
18 #include "hw/pci/pcie_host.h"
19 #include "hw/pci/pcie_port.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/qdev-properties.h"
24 #include "qom/object.h"
27 #define phb_error(phb, fmt, ...) \
28 qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \
29 (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__)
32 * QEMU version of the GETFIELD/SETFIELD macros
34 * These are common with the PnvXive model.
36 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
38 return (word
& mask
) >> ctz64(mask
);
41 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
44 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
47 static PCIDevice
*pnv_phb4_find_cfg_dev(PnvPHB4
*phb
)
49 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
50 uint64_t addr
= phb
->regs
[PHB_CONFIG_ADDRESS
>> 3];
56 bus
= (addr
>> 52) & 0xff;
57 devfn
= (addr
>> 44) & 0xff;
59 /* We don't access the root complex this way */
60 if (bus
== 0 && devfn
== 0) {
63 return pci_find_device(pci
->bus
, bus
, devfn
);
67 * The CONFIG_DATA register expects little endian accesses, but as the
68 * region is big endian, we have to swap the value.
70 static void pnv_phb4_config_write(PnvPHB4
*phb
, unsigned off
,
71 unsigned size
, uint64_t val
)
73 uint32_t cfg_addr
, limit
;
76 pdev
= pnv_phb4_find_cfg_dev(phb
);
80 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
82 limit
= pci_config_size(pdev
);
83 if (limit
<= cfg_addr
) {
85 * conventional pci device can be behind pcie-to-pci bridge.
86 * 256 <= addr < 4K has no effects.
100 g_assert_not_reached();
102 pci_host_config_write_common(pdev
, cfg_addr
, limit
, val
, size
);
105 static uint64_t pnv_phb4_config_read(PnvPHB4
*phb
, unsigned off
,
108 uint32_t cfg_addr
, limit
;
112 pdev
= pnv_phb4_find_cfg_dev(phb
);
116 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
118 limit
= pci_config_size(pdev
);
119 if (limit
<= cfg_addr
) {
121 * conventional pci device can be behind pcie-to-pci bridge.
122 * 256 <= addr < 4K has no effects.
126 val
= pci_host_config_read_common(pdev
, cfg_addr
, limit
, size
);
135 g_assert_not_reached();
140 * Root complex register accesses are memory mapped.
142 static void pnv_phb4_rc_config_write(PnvPHB4
*phb
, unsigned off
,
143 unsigned size
, uint64_t val
)
145 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
149 phb_error(phb
, "rc_config_write invalid size %d\n", size
);
153 pdev
= pci_find_device(pci
->bus
, 0, 0);
156 pci_host_config_write_common(pdev
, off
, PHB_RC_CONFIG_SIZE
,
160 static uint64_t pnv_phb4_rc_config_read(PnvPHB4
*phb
, unsigned off
,
163 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
);
168 phb_error(phb
, "rc_config_read invalid size %d\n", size
);
172 pdev
= pci_find_device(pci
->bus
, 0, 0);
175 val
= pci_host_config_read_common(pdev
, off
, PHB_RC_CONFIG_SIZE
, 4);
179 static void pnv_phb4_check_mbt(PnvPHB4
*phb
, uint32_t index
)
181 uint64_t base
, start
, size
, mbe0
, mbe1
;
182 MemoryRegion
*parent
;
186 if (memory_region_is_mapped(&phb
->mr_mmio
[index
])) {
187 /* Should we destroy it in RCU friendly way... ? */
188 memory_region_del_subregion(phb
->mr_mmio
[index
].container
,
189 &phb
->mr_mmio
[index
]);
192 /* Get table entry */
193 mbe0
= phb
->ioda_MBT
[(index
<< 1)];
194 mbe1
= phb
->ioda_MBT
[(index
<< 1) + 1];
196 if (!(mbe0
& IODA3_MBT0_ENABLE
)) {
200 /* Grab geometry from registers */
201 base
= GETFIELD(IODA3_MBT0_BASE_ADDR
, mbe0
) << 12;
202 size
= GETFIELD(IODA3_MBT1_MASK
, mbe1
) << 12;
203 size
|= 0xff00000000000000ull
;
206 /* Calculate PCI side start address based on M32/M64 window type */
207 if (mbe0
& IODA3_MBT0_TYPE_M32
) {
208 start
= phb
->regs
[PHB_M32_START_ADDR
>> 3];
209 if ((start
+ size
) > 0x100000000ull
) {
210 phb_error(phb
, "M32 set beyond 4GB boundary !");
211 size
= 0x100000000 - start
;
214 start
= base
| (phb
->regs
[PHB_M64_UPPER_BITS
>> 3]);
217 /* TODO: Figure out how to implemet/decode AOMASK */
219 /* Check if it matches an enabled MMIO region in the PEC stack */
220 if (memory_region_is_mapped(&phb
->stack
->mmbar0
) &&
221 base
>= phb
->stack
->mmio0_base
&&
222 (base
+ size
) <= (phb
->stack
->mmio0_base
+ phb
->stack
->mmio0_size
)) {
223 parent
= &phb
->stack
->mmbar0
;
224 base
-= phb
->stack
->mmio0_base
;
225 } else if (memory_region_is_mapped(&phb
->stack
->mmbar1
) &&
226 base
>= phb
->stack
->mmio1_base
&&
227 (base
+ size
) <= (phb
->stack
->mmio1_base
+ phb
->stack
->mmio1_size
)) {
228 parent
= &phb
->stack
->mmbar1
;
229 base
-= phb
->stack
->mmio1_base
;
231 phb_error(phb
, "PHB MBAR %d out of parent bounds", index
);
235 /* Create alias (better name ?) */
236 snprintf(name
, sizeof(name
), "phb4-mbar%d", index
);
237 memory_region_init_alias(&phb
->mr_mmio
[index
], OBJECT(phb
), name
,
238 &phb
->pci_mmio
, start
, size
);
239 memory_region_add_subregion(parent
, base
, &phb
->mr_mmio
[index
]);
242 static void pnv_phb4_check_all_mbt(PnvPHB4
*phb
)
245 uint32_t num_windows
= phb
->big_phb
? PNV_PHB4_MAX_MMIO_WINDOWS
:
246 PNV_PHB4_MIN_MMIO_WINDOWS
;
248 for (i
= 0; i
< num_windows
; i
++) {
249 pnv_phb4_check_mbt(phb
, i
);
253 static uint64_t *pnv_phb4_ioda_access(PnvPHB4
*phb
,
254 unsigned *out_table
, unsigned *out_idx
)
256 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
257 unsigned int index
= GETFIELD(PHB_IODA_AD_TADR
, adreg
);
258 unsigned int table
= GETFIELD(PHB_IODA_AD_TSEL
, adreg
);
260 uint64_t *tptr
= NULL
;
264 tptr
= phb
->ioda_LIST
;
268 tptr
= phb
->ioda_MIST
;
269 mask
= phb
->big_phb
? PNV_PHB4_MAX_MIST
: (PNV_PHB4_MAX_MIST
>> 1);
273 mask
= phb
->big_phb
? 127 : 63;
276 mask
= phb
->big_phb
? 15 : 7;
278 case IODA3_TBL_PESTA
:
279 case IODA3_TBL_PESTB
:
280 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
284 tptr
= phb
->ioda_TVT
;
285 mask
= phb
->big_phb
? PNV_PHB4_MAX_TVEs
: (PNV_PHB4_MAX_TVEs
>> 1);
290 mask
= phb
->big_phb
? 1023 : 511;
293 tptr
= phb
->ioda_MBT
;
294 mask
= phb
->big_phb
? PNV_PHB4_MAX_MBEs
: (PNV_PHB4_MAX_MBEs
>> 1);
298 tptr
= phb
->ioda_MDT
;
299 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
303 tptr
= phb
->ioda_PEEV
;
304 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEEVs
: (PNV_PHB4_MAX_PEEVs
>> 1);
308 phb_error(phb
, "invalid IODA table %d", table
);
321 if (adreg
& PHB_IODA_AD_AUTOINC
) {
322 index
= (index
+ 1) & mask
;
323 adreg
= SETFIELD(PHB_IODA_AD_TADR
, adreg
, index
);
326 phb
->regs
[PHB_IODA_ADDR
>> 3] = adreg
;
330 static uint64_t pnv_phb4_ioda_read(PnvPHB4
*phb
)
335 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
337 /* Special PESTA case */
338 if (table
== IODA3_TBL_PESTA
) {
339 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 1)) << 63;
340 } else if (table
== IODA3_TBL_PESTB
) {
341 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 2)) << 62;
343 /* Return 0 on unsupported tables, not ff's */
349 static void pnv_phb4_ioda_write(PnvPHB4
*phb
, uint64_t val
)
354 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
356 /* Special PESTA case */
357 if (table
== IODA3_TBL_PESTA
) {
358 phb
->ioda_PEST_AB
[idx
] &= ~1;
359 phb
->ioda_PEST_AB
[idx
] |= (val
>> 63) & 1;
360 } else if (table
== IODA3_TBL_PESTB
) {
361 phb
->ioda_PEST_AB
[idx
] &= ~2;
362 phb
->ioda_PEST_AB
[idx
] |= (val
>> 62) & 2;
367 /* Handle side effects */
371 case IODA3_TBL_MIST
: {
372 /* Special mask for MIST partial write */
373 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
374 uint32_t mmask
= GETFIELD(PHB_IODA_AD_MIST_PWV
, adreg
);
380 v
&= 0x0000ffffffffffffull
;
381 v
|= 0xcfff000000000000ull
& val
;
384 v
&= 0xffff0000ffffffffull
;
385 v
|= 0x0000cfff00000000ull
& val
;
388 v
&= 0xffffffff0000ffffull
;
389 v
|= 0x00000000cfff0000ull
& val
;
392 v
&= 0xffffffffffff0000ull
;
393 v
|= 0x000000000000cfffull
& val
;
401 /* Copy accross the valid bit to the other half */
402 phb
->ioda_MBT
[idx
^ 1] &= 0x7fffffffffffffffull
;
403 phb
->ioda_MBT
[idx
^ 1] |= 0x8000000000000000ull
& val
;
405 /* Update mappings */
406 pnv_phb4_check_mbt(phb
, idx
>> 1);
413 static void pnv_phb4_rtc_invalidate(PnvPHB4
*phb
, uint64_t val
)
417 /* Always invalidate all for now ... */
418 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
419 ds
->pe_num
= PHB_INVALID_PE
;
423 static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace
*ds
)
425 uint64_t cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
427 if (cfg
& PHB_PHB4C_32BIT_MSI_EN
) {
428 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
429 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
430 0xffff0000, &ds
->msi32_mr
);
433 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
434 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
439 if (cfg
& PHB_PHB4C_64BIT_MSI_EN
) {
440 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
441 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
442 (1ull << 60), &ds
->msi64_mr
);
445 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
446 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
452 static void pnv_phb4_update_all_msi_regions(PnvPHB4
*phb
)
456 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
457 pnv_phb4_update_msi_regions(ds
);
461 static void pnv_phb4_update_xsrc(PnvPHB4
*phb
)
463 int shift
, flags
, i
, lsi_base
;
464 XiveSource
*xsrc
= &phb
->xsrc
;
466 /* The XIVE source characteristics can be set at run time */
467 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_PGSZ_64K
) {
468 shift
= XIVE_ESB_64K
;
472 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_STORE_EOI
) {
473 flags
= XIVE_SRC_STORE_EOI
;
478 phb
->xsrc
.esb_shift
= shift
;
479 phb
->xsrc
.esb_flags
= flags
;
481 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
484 /* TODO: handle reset values of PHB_LSI_SRC_ID */
489 /* TODO: need a xive_source_irq_reset_lsi() */
490 bitmap_zero(xsrc
->lsi_map
, xsrc
->nr_irqs
);
492 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
493 bool msi
= (i
< lsi_base
|| i
>= (lsi_base
+ 8));
495 xive_source_irq_set_lsi(xsrc
, i
);
500 static void pnv_phb4_reg_write(void *opaque
, hwaddr off
, uint64_t val
,
503 PnvPHB4
*phb
= PNV_PHB4(opaque
);
506 /* Special case outbound configuration data */
507 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
508 pnv_phb4_config_write(phb
, off
& 0x3, size
, val
);
512 /* Special case RC configuration space */
513 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
514 pnv_phb4_rc_config_write(phb
, off
& 0x7ff, size
, val
);
518 /* Other registers are 64-bit only */
519 if (size
!= 8 || off
& 0x7) {
520 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
527 case PHB_LSI_SOURCE_ID
:
528 val
&= PHB_LSI_SRC_ID
;
530 case PHB_M64_UPPER_BITS
:
531 val
&= 0xff00000000000000ull
;
535 /* Clear top 3 bits which HW does to indicate successful queuing */
536 val
&= ~(PHB_TCE_KILL_ALL
| PHB_TCE_KILL_PE
| PHB_TCE_KILL_ONE
);
540 * This is enough logic to make SW happy but we aren't
541 * actually quiescing the DMAs
543 if (val
& PHB_Q_DMA_R_AUTORESET
) {
546 val
&= PHB_Q_DMA_R_QUIESCE_DMA
;
550 case PHB_LEM_FIR_AND_MASK
:
551 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] &= val
;
553 case PHB_LEM_FIR_OR_MASK
:
554 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] |= val
;
556 case PHB_LEM_ERROR_AND_MASK
:
557 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] &= val
;
559 case PHB_LEM_ERROR_OR_MASK
:
560 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] |= val
;
565 /* TODO: More regs ..., maybe create a table with masks... */
567 /* Read only registers */
568 case PHB_CPU_LOADSTORE_STATUS
:
569 case PHB_ETU_ERR_SUMMARY
:
570 case PHB_PHB4_GEN_CAP
:
571 case PHB_PHB4_TCE_CAP
:
572 case PHB_PHB4_IRQ_CAP
:
573 case PHB_PHB4_EEH_CAP
:
577 /* Record whether it changed */
578 changed
= phb
->regs
[off
>> 3] != val
;
580 /* Store in register cache first */
581 phb
->regs
[off
>> 3] = val
;
583 /* Handle side effects */
585 case PHB_PHB4_CONFIG
:
587 pnv_phb4_update_all_msi_regions(phb
);
590 case PHB_M32_START_ADDR
:
591 case PHB_M64_UPPER_BITS
:
593 pnv_phb4_check_all_mbt(phb
);
597 /* IODA table accesses */
599 pnv_phb4_ioda_write(phb
, val
);
602 /* RTC invalidation */
603 case PHB_RTC_INVALIDATE
:
604 pnv_phb4_rtc_invalidate(phb
, val
);
607 /* PHB Control (Affects XIVE source) */
609 case PHB_LSI_SOURCE_ID
:
610 pnv_phb4_update_xsrc(phb
);
613 /* Silent simple writes */
615 case PHB_CONFIG_ADDRESS
:
618 case PHB_TCE_SPEC_CTL
:
622 case PHB_LEM_FIR_ACCUM
:
623 case PHB_LEM_ERROR_MASK
:
624 case PHB_LEM_ACTION0
:
625 case PHB_LEM_ACTION1
:
626 case PHB_TCE_TAG_ENABLE
:
627 case PHB_INT_NOTIFY_ADDR
:
628 case PHB_INT_NOTIFY_INDEX
:
632 /* Noise on anything else */
634 qemu_log_mask(LOG_UNIMP
, "phb4: reg_write 0x%"PRIx64
"=%"PRIx64
"\n",
639 static uint64_t pnv_phb4_reg_read(void *opaque
, hwaddr off
, unsigned size
)
641 PnvPHB4
*phb
= PNV_PHB4(opaque
);
644 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
645 return pnv_phb4_config_read(phb
, off
& 0x3, size
);
648 /* Special case RC configuration space */
649 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
650 return pnv_phb4_rc_config_read(phb
, off
& 0x7ff, size
);
653 /* Other registers are 64-bit only */
654 if (size
!= 8 || off
& 0x7) {
655 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
660 /* Default read from cache */
661 val
= phb
->regs
[off
>> 3];
668 case PHB_PHB4_GEN_CAP
:
669 return 0xe4b8000000000000ull
;
670 case PHB_PHB4_TCE_CAP
:
671 return phb
->big_phb
? 0x4008440000000400ull
: 0x2008440000000200ull
;
672 case PHB_PHB4_IRQ_CAP
:
673 return phb
->big_phb
? 0x0800000000001000ull
: 0x0800000000000800ull
;
674 case PHB_PHB4_EEH_CAP
:
675 return phb
->big_phb
? 0x2000000000000000ull
: 0x1000000000000000ull
;
677 /* IODA table accesses */
679 return pnv_phb4_ioda_read(phb
);
681 /* Link training always appears trained */
682 case PHB_PCIE_DLP_TRAIN_CTL
:
683 /* TODO: Do something sensible with speed ? */
684 return PHB_PCIE_DLP_INBAND_PRESENCE
| PHB_PCIE_DLP_TL_LINKACT
;
686 /* DMA read sync: make it look like it's complete */
688 return PHB_DMARD_SYNC_COMPLETE
;
690 /* Silent simple reads */
691 case PHB_LSI_SOURCE_ID
:
692 case PHB_CPU_LOADSTORE_STATUS
:
694 case PHB_PHB4_CONFIG
:
695 case PHB_M32_START_ADDR
:
696 case PHB_CONFIG_ADDRESS
:
698 case PHB_RTC_INVALIDATE
:
700 case PHB_TCE_SPEC_CTL
:
704 case PHB_M64_UPPER_BITS
:
706 case PHB_LEM_FIR_ACCUM
:
707 case PHB_LEM_ERROR_MASK
:
708 case PHB_LEM_ACTION0
:
709 case PHB_LEM_ACTION1
:
710 case PHB_TCE_TAG_ENABLE
:
711 case PHB_INT_NOTIFY_ADDR
:
712 case PHB_INT_NOTIFY_INDEX
:
714 case PHB_ETU_ERR_SUMMARY
:
717 /* Noise on anything else */
719 qemu_log_mask(LOG_UNIMP
, "phb4: reg_read 0x%"PRIx64
"=%"PRIx64
"\n",
725 static const MemoryRegionOps pnv_phb4_reg_ops
= {
726 .read
= pnv_phb4_reg_read
,
727 .write
= pnv_phb4_reg_write
,
728 .valid
.min_access_size
= 1,
729 .valid
.max_access_size
= 8,
730 .impl
.min_access_size
= 1,
731 .impl
.max_access_size
= 8,
732 .endianness
= DEVICE_BIG_ENDIAN
,
735 static uint64_t pnv_phb4_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
737 PnvPHB4
*phb
= PNV_PHB4(opaque
);
738 uint32_t reg
= addr
>> 3;
743 case PHB_SCOM_HV_IND_ADDR
:
744 return phb
->scom_hv_ind_addr_reg
;
746 case PHB_SCOM_HV_IND_DATA
:
747 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
748 phb_error(phb
, "Invalid indirect address");
751 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
752 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
753 val
= pnv_phb4_reg_read(phb
, offset
, size
);
754 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
757 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
758 phb
->scom_hv_ind_addr_reg
,
762 case PHB_SCOM_ETU_LEM_FIR
:
763 case PHB_SCOM_ETU_LEM_FIR_AND
:
764 case PHB_SCOM_ETU_LEM_FIR_OR
:
765 case PHB_SCOM_ETU_LEM_FIR_MSK
:
766 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
767 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
768 case PHB_SCOM_ETU_LEM_ACT0
:
769 case PHB_SCOM_ETU_LEM_ACT1
:
770 case PHB_SCOM_ETU_LEM_WOF
:
771 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
772 return pnv_phb4_reg_read(phb
, offset
, size
);
773 case PHB_SCOM_ETU_PMON_CONFIG
:
774 case PHB_SCOM_ETU_PMON_CTR0
:
775 case PHB_SCOM_ETU_PMON_CTR1
:
776 case PHB_SCOM_ETU_PMON_CTR2
:
777 case PHB_SCOM_ETU_PMON_CTR3
:
778 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
779 return pnv_phb4_reg_read(phb
, offset
, size
);
782 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_read 0x%"HWADDR_PRIx
"\n", addr
);
787 static void pnv_phb4_xscom_write(void *opaque
, hwaddr addr
,
788 uint64_t val
, unsigned size
)
790 PnvPHB4
*phb
= PNV_PHB4(opaque
);
791 uint32_t reg
= addr
>> 3;
795 case PHB_SCOM_HV_IND_ADDR
:
796 phb
->scom_hv_ind_addr_reg
= val
& 0xe000000000001fff;
798 case PHB_SCOM_HV_IND_DATA
:
799 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
800 phb_error(phb
, "Invalid indirect address");
803 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
804 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
805 pnv_phb4_reg_write(phb
, offset
, val
, size
);
806 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
809 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
810 phb
->scom_hv_ind_addr_reg
,
814 case PHB_SCOM_ETU_LEM_FIR
:
815 case PHB_SCOM_ETU_LEM_FIR_AND
:
816 case PHB_SCOM_ETU_LEM_FIR_OR
:
817 case PHB_SCOM_ETU_LEM_FIR_MSK
:
818 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
819 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
820 case PHB_SCOM_ETU_LEM_ACT0
:
821 case PHB_SCOM_ETU_LEM_ACT1
:
822 case PHB_SCOM_ETU_LEM_WOF
:
823 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
824 pnv_phb4_reg_write(phb
, offset
, val
, size
);
826 case PHB_SCOM_ETU_PMON_CONFIG
:
827 case PHB_SCOM_ETU_PMON_CTR0
:
828 case PHB_SCOM_ETU_PMON_CTR1
:
829 case PHB_SCOM_ETU_PMON_CTR2
:
830 case PHB_SCOM_ETU_PMON_CTR3
:
831 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
832 pnv_phb4_reg_write(phb
, offset
, val
, size
);
835 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_write 0x%"HWADDR_PRIx
836 "=%"PRIx64
"\n", addr
, val
);
840 const MemoryRegionOps pnv_phb4_xscom_ops
= {
841 .read
= pnv_phb4_xscom_read
,
842 .write
= pnv_phb4_xscom_write
,
843 .valid
.min_access_size
= 8,
844 .valid
.max_access_size
= 8,
845 .impl
.min_access_size
= 8,
846 .impl
.max_access_size
= 8,
847 .endianness
= DEVICE_BIG_ENDIAN
,
850 static int pnv_phb4_map_irq(PCIDevice
*pci_dev
, int irq_num
)
852 /* Check that out properly ... */
856 static void pnv_phb4_set_irq(void *opaque
, int irq_num
, int level
)
858 PnvPHB4
*phb
= PNV_PHB4(opaque
);
863 phb_error(phb
, "IRQ %x is not an LSI", irq_num
);
865 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
867 qemu_set_irq(phb
->qirqs
[lsi_base
+ irq_num
], level
);
870 static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace
*ds
)
877 /* Already resolved ? */
878 if (ds
->pe_num
!= PHB_INVALID_PE
) {
882 /* We need to lookup the RTT */
883 rtt
= ds
->phb
->regs
[PHB_RTT_BAR
>> 3];
884 if (!(rtt
& PHB_RTT_BAR_ENABLE
)) {
885 phb_error(ds
->phb
, "DMA with RTT BAR disabled !");
886 /* Set error bits ? fence ? ... */
891 bus_num
= pci_bus_num(ds
->bus
);
892 addr
= rtt
& PHB_RTT_BASE_ADDRESS_MASK
;
893 addr
+= 2 * PCI_BUILD_BDF(bus_num
, ds
->devfn
);
894 if (dma_memory_read(&address_space_memory
, addr
, &rte
, sizeof(rte
))) {
895 phb_error(ds
->phb
, "Failed to read RTT entry at 0x%"PRIx64
, addr
);
896 /* Set error bits ? fence ? ... */
899 rte
= be16_to_cpu(rte
);
901 /* Fail upon reading of invalid PE# */
902 num_PEs
= ds
->phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
903 if (rte
>= num_PEs
) {
904 phb_error(ds
->phb
, "RTE for RID 0x%x invalid (%04x", ds
->devfn
, rte
);
911 static void pnv_phb4_translate_tve(PnvPhb4DMASpace
*ds
, hwaddr addr
,
912 bool is_write
, uint64_t tve
,
915 uint64_t tta
= GETFIELD(IODA3_TVT_TABLE_ADDR
, tve
);
916 int32_t lev
= GETFIELD(IODA3_TVT_NUM_LEVELS
, tve
);
917 uint32_t tts
= GETFIELD(IODA3_TVT_TCE_TABLE_SIZE
, tve
);
918 uint32_t tps
= GETFIELD(IODA3_TVT_IO_PSIZE
, tve
);
922 phb_error(ds
->phb
, "Invalid #levels in TVE %d", lev
);
928 phb_error(ds
->phb
, "Access to invalid TVE");
932 /* IO Page Size of 0 means untranslated, else use TCEs */
934 /* TODO: Handle boundaries */
936 /* Use 4k pages like q35 ... for now */
937 tlb
->iova
= addr
& 0xfffffffffffff000ull
;
938 tlb
->translated_addr
= addr
& 0x0003fffffffff000ull
;
939 tlb
->addr_mask
= 0xfffull
;
940 tlb
->perm
= IOMMU_RW
;
942 uint32_t tce_shift
, tbl_shift
, sh
;
943 uint64_t base
, taddr
, tce
, tce_mask
;
945 /* Address bits per bottom level TCE entry */
946 tce_shift
= tps
+ 11;
948 /* Address bits per table level */
951 /* Top level table base address */
954 /* Total shift to first level */
955 sh
= tbl_shift
* lev
+ tce_shift
;
957 /* TODO: Limit to support IO page sizes */
959 /* TODO: Multi-level untested */
960 while ((lev
--) >= 0) {
961 /* Grab the TCE address */
962 taddr
= base
| (((addr
>> sh
) & ((1ul << tbl_shift
) - 1)) << 3);
963 if (dma_memory_read(&address_space_memory
, taddr
, &tce
,
965 phb_error(ds
->phb
, "Failed to read TCE at 0x%"PRIx64
, taddr
);
968 tce
= be64_to_cpu(tce
);
970 /* Check permission for indirect TCE */
971 if ((lev
>= 0) && !(tce
& 3)) {
972 phb_error(ds
->phb
, "Invalid indirect TCE at 0x%"PRIx64
, taddr
);
973 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
974 is_write
? 'W' : 'R', tve
);
975 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
980 base
= tce
& ~0xfffull
;
983 /* We exit the loop with TCE being the final TCE */
984 tce_mask
= ~((1ull << tce_shift
) - 1);
985 tlb
->iova
= addr
& tce_mask
;
986 tlb
->translated_addr
= tce
& tce_mask
;
987 tlb
->addr_mask
= ~tce_mask
;
989 if ((is_write
& !(tce
& 2)) || ((!is_write
) && !(tce
& 1))) {
990 phb_error(ds
->phb
, "TCE access fault at 0x%"PRIx64
, taddr
);
991 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
992 is_write
? 'W' : 'R', tve
);
993 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
999 static IOMMUTLBEntry
pnv_phb4_translate_iommu(IOMMUMemoryRegion
*iommu
,
1001 IOMMUAccessFlags flag
,
1004 PnvPhb4DMASpace
*ds
= container_of(iommu
, PnvPhb4DMASpace
, dma_mr
);
1007 IOMMUTLBEntry ret
= {
1008 .target_as
= &address_space_memory
,
1010 .translated_addr
= 0,
1011 .addr_mask
= ~(hwaddr
)0,
1016 if (!pnv_phb4_resolve_pe(ds
)) {
1017 phb_error(ds
->phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1018 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1022 /* Check top bits */
1023 switch (addr
>> 60) {
1025 /* DMA or 32-bit MSI ? */
1026 cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
1027 if ((cfg
& PHB_PHB4C_32BIT_MSI_EN
) &&
1028 ((addr
& 0xffffffffffff0000ull
) == 0xffff0000ull
)) {
1029 phb_error(ds
->phb
, "xlate on 32-bit MSI region");
1032 /* Choose TVE XXX Use PHB4 Control Register */
1033 tve_sel
= (addr
>> 59) & 1;
1034 tve
= ds
->phb
->ioda_TVT
[ds
->pe_num
* 2 + tve_sel
];
1035 pnv_phb4_translate_tve(ds
, addr
, flag
& IOMMU_WO
, tve
, &ret
);
1038 phb_error(ds
->phb
, "xlate on 64-bit MSI region");
1041 phb_error(ds
->phb
, "xlate on unsupported address 0x%"PRIx64
, addr
);
1046 #define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region"
1047 DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion
, PNV_PHB4_IOMMU_MEMORY_REGION
,
1048 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
)
1050 static void pnv_phb4_iommu_memory_region_class_init(ObjectClass
*klass
,
1053 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1055 imrc
->translate
= pnv_phb4_translate_iommu
;
1058 static const TypeInfo pnv_phb4_iommu_memory_region_info
= {
1059 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1060 .name
= TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1061 .class_init
= pnv_phb4_iommu_memory_region_class_init
,
1065 * MSI/MSIX memory region implementation.
1066 * The handler handles both MSI and MSIX.
1068 static void pnv_phb4_msi_write(void *opaque
, hwaddr addr
,
1069 uint64_t data
, unsigned size
)
1071 PnvPhb4DMASpace
*ds
= opaque
;
1072 PnvPHB4
*phb
= ds
->phb
;
1074 uint32_t src
= ((addr
>> 4) & 0xffff) | (data
& 0x1f);
1077 if (!pnv_phb4_resolve_pe(ds
)) {
1078 phb_error(phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1079 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1083 /* TODO: Check it doesn't collide with LSIs */
1084 if (src
>= phb
->xsrc
.nr_irqs
) {
1085 phb_error(phb
, "MSI %d out of bounds", src
);
1089 /* TODO: check PE/MSI assignement */
1091 qemu_irq_pulse(phb
->qirqs
[src
]);
1094 /* There is no .read as the read result is undefined by PCI spec */
1095 static uint64_t pnv_phb4_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
1097 PnvPhb4DMASpace
*ds
= opaque
;
1099 phb_error(ds
->phb
, "Invalid MSI read @ 0x%" HWADDR_PRIx
, addr
);
1103 static const MemoryRegionOps pnv_phb4_msi_ops
= {
1104 .read
= pnv_phb4_msi_read
,
1105 .write
= pnv_phb4_msi_write
,
1106 .endianness
= DEVICE_LITTLE_ENDIAN
1109 static PnvPhb4DMASpace
*pnv_phb4_dma_find(PnvPHB4
*phb
, PCIBus
*bus
, int devfn
)
1111 PnvPhb4DMASpace
*ds
;
1113 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
1114 if (ds
->bus
== bus
&& ds
->devfn
== devfn
) {
1121 static AddressSpace
*pnv_phb4_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
1123 PnvPHB4
*phb
= opaque
;
1124 PnvPhb4DMASpace
*ds
;
1127 ds
= pnv_phb4_dma_find(phb
, bus
, devfn
);
1130 ds
= g_malloc0(sizeof(PnvPhb4DMASpace
));
1133 ds
->pe_num
= PHB_INVALID_PE
;
1135 snprintf(name
, sizeof(name
), "phb4-%d.%d-iommu", phb
->chip_id
,
1137 memory_region_init_iommu(&ds
->dma_mr
, sizeof(ds
->dma_mr
),
1138 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1139 OBJECT(phb
), name
, UINT64_MAX
);
1140 address_space_init(&ds
->dma_as
, MEMORY_REGION(&ds
->dma_mr
),
1142 memory_region_init_io(&ds
->msi32_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1143 ds
, "msi32", 0x10000);
1144 memory_region_init_io(&ds
->msi64_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1145 ds
, "msi64", 0x100000);
1146 pnv_phb4_update_msi_regions(ds
);
1148 QLIST_INSERT_HEAD(&phb
->dma_spaces
, ds
, list
);
1153 static void pnv_phb4_instance_init(Object
*obj
)
1155 PnvPHB4
*phb
= PNV_PHB4(obj
);
1157 QLIST_INIT(&phb
->dma_spaces
);
1159 /* XIVE interrupt source object */
1160 object_initialize_child(obj
, "source", &phb
->xsrc
, TYPE_XIVE_SOURCE
);
1163 object_initialize_child(obj
, "root", &phb
->root
, TYPE_PNV_PHB4_ROOT_PORT
);
1165 qdev_prop_set_int32(DEVICE(&phb
->root
), "addr", PCI_DEVFN(0, 0));
1166 qdev_prop_set_bit(DEVICE(&phb
->root
), "multifunction", false);
1169 static void pnv_phb4_realize(DeviceState
*dev
, Error
**errp
)
1171 PnvPHB4
*phb
= PNV_PHB4(dev
);
1172 PCIHostState
*pci
= PCI_HOST_BRIDGE(dev
);
1173 XiveSource
*xsrc
= &phb
->xsrc
;
1179 /* Set the "big_phb" flag */
1180 phb
->big_phb
= phb
->phb_id
== 0 || phb
->phb_id
== 3;
1182 /* Controller Registers */
1183 snprintf(name
, sizeof(name
), "phb4-%d.%d-regs", phb
->chip_id
,
1185 memory_region_init_io(&phb
->mr_regs
, OBJECT(phb
), &pnv_phb4_reg_ops
, phb
,
1189 * PHB4 doesn't support IO space. However, qemu gets very upset if
1190 * we don't have an IO region to anchor IO BARs onto so we just
1191 * initialize one which we never hook up to anything
1194 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-io", phb
->chip_id
,
1196 memory_region_init(&phb
->pci_io
, OBJECT(phb
), name
, 0x10000);
1198 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-mmio", phb
->chip_id
,
1200 memory_region_init(&phb
->pci_mmio
, OBJECT(phb
), name
,
1201 PCI_MMIO_TOTAL_SIZE
);
1203 pci
->bus
= pci_register_root_bus(dev
, "root-bus",
1204 pnv_phb4_set_irq
, pnv_phb4_map_irq
, phb
,
1205 &phb
->pci_mmio
, &phb
->pci_io
,
1206 0, 4, TYPE_PNV_PHB4_ROOT_BUS
);
1207 pci_setup_iommu(pci
->bus
, pnv_phb4_dma_iommu
, phb
);
1209 /* Add a single Root port */
1210 qdev_prop_set_uint8(DEVICE(&phb
->root
), "chassis", phb
->chip_id
);
1211 qdev_prop_set_uint16(DEVICE(&phb
->root
), "slot", phb
->phb_id
);
1212 qdev_realize(DEVICE(&phb
->root
), BUS(pci
->bus
), &error_fatal
);
1214 /* Setup XIVE Source */
1216 nr_irqs
= PNV_PHB4_MAX_INTs
;
1218 nr_irqs
= PNV_PHB4_MAX_INTs
>> 1;
1220 object_property_set_int(OBJECT(xsrc
), "nr-irqs", nr_irqs
, &error_fatal
);
1221 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(phb
), &error_fatal
);
1222 if (!qdev_realize(DEVICE(xsrc
), NULL
, errp
)) {
1226 pnv_phb4_update_xsrc(phb
);
1228 phb
->qirqs
= qemu_allocate_irqs(xive_source_set_irq
, xsrc
, xsrc
->nr_irqs
);
1231 static void pnv_phb4_reset(DeviceState
*dev
)
1233 PnvPHB4
*phb
= PNV_PHB4(dev
);
1234 PCIDevice
*root_dev
= PCI_DEVICE(&phb
->root
);
1237 * Configure PCI device id at reset using a property.
1239 pci_config_set_vendor_id(root_dev
->config
, PCI_VENDOR_ID_IBM
);
1240 pci_config_set_device_id(root_dev
->config
, phb
->device_id
);
1243 static const char *pnv_phb4_root_bus_path(PCIHostState
*host_bridge
,
1246 PnvPHB4
*phb
= PNV_PHB4(host_bridge
);
1248 snprintf(phb
->bus_path
, sizeof(phb
->bus_path
), "00%02x:%02x",
1249 phb
->chip_id
, phb
->phb_id
);
1250 return phb
->bus_path
;
1253 static void pnv_phb4_xive_notify(XiveNotifier
*xf
, uint32_t srcno
)
1255 PnvPHB4
*phb
= PNV_PHB4(xf
);
1256 uint64_t notif_port
= phb
->regs
[PHB_INT_NOTIFY_ADDR
>> 3];
1257 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1258 uint64_t data
= XIVE_TRIGGER_PQ
| offset
| srcno
;
1261 trace_pnv_phb4_xive_notify(notif_port
, data
);
1263 address_space_stq_be(&address_space_memory
, notif_port
, data
,
1264 MEMTXATTRS_UNSPECIFIED
, &result
);
1265 if (result
!= MEMTX_OK
) {
1266 phb_error(phb
, "trigger failed @%"HWADDR_PRIx
"\n", notif_port
);
1271 static Property pnv_phb4_properties
[] = {
1272 DEFINE_PROP_UINT32("index", PnvPHB4
, phb_id
, 0),
1273 DEFINE_PROP_UINT32("chip-id", PnvPHB4
, chip_id
, 0),
1274 DEFINE_PROP_UINT64("version", PnvPHB4
, version
, 0),
1275 DEFINE_PROP_UINT16("device-id", PnvPHB4
, device_id
, 0),
1276 DEFINE_PROP_LINK("stack", PnvPHB4
, stack
, TYPE_PNV_PHB4_PEC_STACK
,
1278 DEFINE_PROP_END_OF_LIST(),
1281 static void pnv_phb4_class_init(ObjectClass
*klass
, void *data
)
1283 PCIHostBridgeClass
*hc
= PCI_HOST_BRIDGE_CLASS(klass
);
1284 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1285 XiveNotifierClass
*xfc
= XIVE_NOTIFIER_CLASS(klass
);
1287 hc
->root_bus_path
= pnv_phb4_root_bus_path
;
1288 dc
->realize
= pnv_phb4_realize
;
1289 device_class_set_props(dc
, pnv_phb4_properties
);
1290 set_bit(DEVICE_CATEGORY_BRIDGE
, dc
->categories
);
1291 dc
->user_creatable
= false;
1292 dc
->reset
= pnv_phb4_reset
;
1294 xfc
->notify
= pnv_phb4_xive_notify
;
1297 static const TypeInfo pnv_phb4_type_info
= {
1298 .name
= TYPE_PNV_PHB4
,
1299 .parent
= TYPE_PCIE_HOST_BRIDGE
,
1300 .instance_init
= pnv_phb4_instance_init
,
1301 .instance_size
= sizeof(PnvPHB4
),
1302 .class_init
= pnv_phb4_class_init
,
1303 .interfaces
= (InterfaceInfo
[]) {
1304 { TYPE_XIVE_NOTIFIER
},
1309 static void pnv_phb4_root_bus_class_init(ObjectClass
*klass
, void *data
)
1311 BusClass
*k
= BUS_CLASS(klass
);
1314 * PHB4 has only a single root complex. Enforce the limit on the
1320 static const TypeInfo pnv_phb4_root_bus_info
= {
1321 .name
= TYPE_PNV_PHB4_ROOT_BUS
,
1322 .parent
= TYPE_PCIE_BUS
,
1323 .class_init
= pnv_phb4_root_bus_class_init
,
1324 .interfaces
= (InterfaceInfo
[]) {
1325 { INTERFACE_PCIE_DEVICE
},
1330 static void pnv_phb4_root_port_reset(DeviceState
*dev
)
1332 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_GET_CLASS(dev
);
1333 PCIDevice
*d
= PCI_DEVICE(dev
);
1334 uint8_t *conf
= d
->config
;
1336 rpc
->parent_reset(dev
);
1338 pci_byte_test_and_set_mask(conf
+ PCI_IO_BASE
,
1339 PCI_IO_RANGE_MASK
& 0xff);
1340 pci_byte_test_and_clear_mask(conf
+ PCI_IO_LIMIT
,
1341 PCI_IO_RANGE_MASK
& 0xff);
1342 pci_set_word(conf
+ PCI_MEMORY_BASE
, 0);
1343 pci_set_word(conf
+ PCI_MEMORY_LIMIT
, 0xfff0);
1344 pci_set_word(conf
+ PCI_PREF_MEMORY_BASE
, 0x1);
1345 pci_set_word(conf
+ PCI_PREF_MEMORY_LIMIT
, 0xfff1);
1346 pci_set_long(conf
+ PCI_PREF_BASE_UPPER32
, 0x1); /* Hack */
1347 pci_set_long(conf
+ PCI_PREF_LIMIT_UPPER32
, 0xffffffff);
1350 static void pnv_phb4_root_port_realize(DeviceState
*dev
, Error
**errp
)
1352 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_GET_CLASS(dev
);
1353 Error
*local_err
= NULL
;
1355 rpc
->parent_realize(dev
, &local_err
);
1357 error_propagate(errp
, local_err
);
1362 static void pnv_phb4_root_port_class_init(ObjectClass
*klass
, void *data
)
1364 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1365 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1366 PCIERootPortClass
*rpc
= PCIE_ROOT_PORT_CLASS(klass
);
1368 dc
->desc
= "IBM PHB4 PCIE Root Port";
1369 dc
->user_creatable
= false;
1371 device_class_set_parent_realize(dc
, pnv_phb4_root_port_realize
,
1372 &rpc
->parent_realize
);
1373 device_class_set_parent_reset(dc
, pnv_phb4_root_port_reset
,
1374 &rpc
->parent_reset
);
1376 k
->vendor_id
= PCI_VENDOR_ID_IBM
;
1377 k
->device_id
= PNV_PHB4_DEVICE_ID
;
1380 rpc
->exp_offset
= 0x48;
1381 rpc
->aer_offset
= 0x100;
1383 dc
->reset
= &pnv_phb4_root_port_reset
;
1386 static const TypeInfo pnv_phb4_root_port_info
= {
1387 .name
= TYPE_PNV_PHB4_ROOT_PORT
,
1388 .parent
= TYPE_PCIE_ROOT_PORT
,
1389 .instance_size
= sizeof(PnvPHB4RootPort
),
1390 .class_init
= pnv_phb4_root_port_class_init
,
1393 static void pnv_phb4_register_types(void)
1395 type_register_static(&pnv_phb4_root_bus_info
);
1396 type_register_static(&pnv_phb4_root_port_info
);
1397 type_register_static(&pnv_phb4_type_info
);
1398 type_register_static(&pnv_phb4_iommu_memory_region_info
);
1401 type_init(pnv_phb4_register_types
);
1403 void pnv_phb4_update_regions(PnvPhb4PecStack
*stack
)
1405 PnvPHB4
*phb
= &stack
->phb
;
1407 /* Unmap first always */
1408 if (memory_region_is_mapped(&phb
->mr_regs
)) {
1409 memory_region_del_subregion(&stack
->phbbar
, &phb
->mr_regs
);
1411 if (memory_region_is_mapped(&phb
->xsrc
.esb_mmio
)) {
1412 memory_region_del_subregion(&stack
->intbar
, &phb
->xsrc
.esb_mmio
);
1415 /* Map registers if enabled */
1416 if (memory_region_is_mapped(&stack
->phbbar
)) {
1417 memory_region_add_subregion(&stack
->phbbar
, 0, &phb
->mr_regs
);
1420 /* Map ESB if enabled */
1421 if (memory_region_is_mapped(&stack
->intbar
)) {
1422 memory_region_add_subregion(&stack
->intbar
, 0, &phb
->xsrc
.esb_mmio
);
1425 /* Check/update m32 */
1426 pnv_phb4_check_all_mbt(phb
);
1429 void pnv_phb4_pic_print_info(PnvPHB4
*phb
, Monitor
*mon
)
1431 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1433 monitor_printf(mon
, "PHB4[%x:%x] Source %08x .. %08x\n",
1434 phb
->chip_id
, phb
->phb_id
,
1435 offset
, offset
+ phb
->xsrc
.nr_irqs
- 1);
1436 xive_source_pic_print_info(&phb
->xsrc
, 0, mon
);