2 * QEMU PowerPC PowerNV (POWER9) PHB4 model
4 * Copyright (c) 2018-2020, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
9 #include "qemu/osdep.h"
11 #include "qapi/visitor.h"
12 #include "qapi/error.h"
13 #include "monitor/monitor.h"
14 #include "target/ppc/cpu.h"
15 #include "hw/pci-host/pnv_phb4_regs.h"
16 #include "hw/pci-host/pnv_phb4.h"
17 #include "hw/pci/pcie_host.h"
18 #include "hw/pci/pcie_port.h"
19 #include "hw/ppc/pnv.h"
20 #include "hw/ppc/pnv_xscom.h"
22 #include "hw/qdev-properties.h"
23 #include "qom/object.h"
26 #define phb_error(phb, fmt, ...) \
27 qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \
28 (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__)
30 #define phb_pec_error(pec, fmt, ...) \
31 qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \
32 (pec)->chip_id, (pec)->index, ## __VA_ARGS__)
34 static PCIDevice
*pnv_phb4_find_cfg_dev(PnvPHB4
*phb
)
36 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
->phb_base
);
37 uint64_t addr
= phb
->regs
[PHB_CONFIG_ADDRESS
>> 3];
43 bus
= (addr
>> 52) & 0xff;
44 devfn
= (addr
>> 44) & 0xff;
46 /* We don't access the root complex this way */
47 if (bus
== 0 && devfn
== 0) {
50 return pci_find_device(pci
->bus
, bus
, devfn
);
54 * The CONFIG_DATA register expects little endian accesses, but as the
55 * region is big endian, we have to swap the value.
57 static void pnv_phb4_config_write(PnvPHB4
*phb
, unsigned off
,
58 unsigned size
, uint64_t val
)
60 uint32_t cfg_addr
, limit
;
63 pdev
= pnv_phb4_find_cfg_dev(phb
);
67 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
69 limit
= pci_config_size(pdev
);
70 if (limit
<= cfg_addr
) {
72 * conventional pci device can be behind pcie-to-pci bridge.
73 * 256 <= addr < 4K has no effects.
87 g_assert_not_reached();
89 pci_host_config_write_common(pdev
, cfg_addr
, limit
, val
, size
);
92 static uint64_t pnv_phb4_config_read(PnvPHB4
*phb
, unsigned off
,
95 uint32_t cfg_addr
, limit
;
99 pdev
= pnv_phb4_find_cfg_dev(phb
);
103 cfg_addr
= (phb
->regs
[PHB_CONFIG_ADDRESS
>> 3] >> 32) & 0xffc;
105 limit
= pci_config_size(pdev
);
106 if (limit
<= cfg_addr
) {
108 * conventional pci device can be behind pcie-to-pci bridge.
109 * 256 <= addr < 4K has no effects.
113 val
= pci_host_config_read_common(pdev
, cfg_addr
, limit
, size
);
122 g_assert_not_reached();
127 * Root complex register accesses are memory mapped.
129 static void pnv_phb4_rc_config_write(PnvPHB4
*phb
, unsigned off
,
130 unsigned size
, uint64_t val
)
132 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
->phb_base
);
136 phb_error(phb
, "rc_config_write invalid size %d", size
);
140 pdev
= pci_find_device(pci
->bus
, 0, 0);
142 phb_error(phb
, "rc_config_write device not found");
146 pci_host_config_write_common(pdev
, off
, PHB_RC_CONFIG_SIZE
,
150 static uint64_t pnv_phb4_rc_config_read(PnvPHB4
*phb
, unsigned off
,
153 PCIHostState
*pci
= PCI_HOST_BRIDGE(phb
->phb_base
);
158 phb_error(phb
, "rc_config_read invalid size %d", size
);
162 pdev
= pci_find_device(pci
->bus
, 0, 0);
164 phb_error(phb
, "rc_config_read device not found");
168 val
= pci_host_config_read_common(pdev
, off
, PHB_RC_CONFIG_SIZE
, 4);
172 static void pnv_phb4_check_mbt(PnvPHB4
*phb
, uint32_t index
)
174 uint64_t base
, start
, size
, mbe0
, mbe1
;
175 MemoryRegion
*parent
;
179 if (memory_region_is_mapped(&phb
->mr_mmio
[index
])) {
180 /* Should we destroy it in RCU friendly way... ? */
181 memory_region_del_subregion(phb
->mr_mmio
[index
].container
,
182 &phb
->mr_mmio
[index
]);
185 /* Get table entry */
186 mbe0
= phb
->ioda_MBT
[(index
<< 1)];
187 mbe1
= phb
->ioda_MBT
[(index
<< 1) + 1];
189 if (!(mbe0
& IODA3_MBT0_ENABLE
)) {
193 /* Grab geometry from registers */
194 base
= GETFIELD(IODA3_MBT0_BASE_ADDR
, mbe0
) << 12;
195 size
= GETFIELD(IODA3_MBT1_MASK
, mbe1
) << 12;
196 size
|= 0xff00000000000000ull
;
199 /* Calculate PCI side start address based on M32/M64 window type */
200 if (mbe0
& IODA3_MBT0_TYPE_M32
) {
201 start
= phb
->regs
[PHB_M32_START_ADDR
>> 3];
202 if ((start
+ size
) > 0x100000000ull
) {
203 phb_error(phb
, "M32 set beyond 4GB boundary !");
204 size
= 0x100000000 - start
;
207 start
= base
| (phb
->regs
[PHB_M64_UPPER_BITS
>> 3]);
210 /* TODO: Figure out how to implement/decode AOMASK */
212 /* Check if it matches an enabled MMIO region in the PEC stack */
213 if (memory_region_is_mapped(&phb
->mmbar0
) &&
214 base
>= phb
->mmio0_base
&&
215 (base
+ size
) <= (phb
->mmio0_base
+ phb
->mmio0_size
)) {
216 parent
= &phb
->mmbar0
;
217 base
-= phb
->mmio0_base
;
218 } else if (memory_region_is_mapped(&phb
->mmbar1
) &&
219 base
>= phb
->mmio1_base
&&
220 (base
+ size
) <= (phb
->mmio1_base
+ phb
->mmio1_size
)) {
221 parent
= &phb
->mmbar1
;
222 base
-= phb
->mmio1_base
;
224 phb_error(phb
, "PHB MBAR %d out of parent bounds", index
);
228 /* Create alias (better name ?) */
229 snprintf(name
, sizeof(name
), "phb4-mbar%d", index
);
230 memory_region_init_alias(&phb
->mr_mmio
[index
], OBJECT(phb
), name
,
231 &phb
->pci_mmio
, start
, size
);
232 memory_region_add_subregion(parent
, base
, &phb
->mr_mmio
[index
]);
235 static void pnv_phb4_check_all_mbt(PnvPHB4
*phb
)
238 uint32_t num_windows
= phb
->big_phb
? PNV_PHB4_MAX_MMIO_WINDOWS
:
239 PNV_PHB4_MIN_MMIO_WINDOWS
;
241 for (i
= 0; i
< num_windows
; i
++) {
242 pnv_phb4_check_mbt(phb
, i
);
246 static uint64_t *pnv_phb4_ioda_access(PnvPHB4
*phb
,
247 unsigned *out_table
, unsigned *out_idx
)
249 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
250 unsigned int index
= GETFIELD(PHB_IODA_AD_TADR
, adreg
);
251 unsigned int table
= GETFIELD(PHB_IODA_AD_TSEL
, adreg
);
253 uint64_t *tptr
= NULL
;
257 tptr
= phb
->ioda_LIST
;
261 tptr
= phb
->ioda_MIST
;
262 mask
= phb
->big_phb
? PNV_PHB4_MAX_MIST
: (PNV_PHB4_MAX_MIST
>> 1);
266 mask
= phb
->big_phb
? 127 : 63;
269 mask
= phb
->big_phb
? 15 : 7;
271 case IODA3_TBL_PESTA
:
272 case IODA3_TBL_PESTB
:
273 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
277 tptr
= phb
->ioda_TVT
;
278 mask
= phb
->big_phb
? PNV_PHB4_MAX_TVEs
: (PNV_PHB4_MAX_TVEs
>> 1);
283 mask
= phb
->big_phb
? 1023 : 511;
286 tptr
= phb
->ioda_MBT
;
287 mask
= phb
->big_phb
? PNV_PHB4_MAX_MBEs
: (PNV_PHB4_MAX_MBEs
>> 1);
291 tptr
= phb
->ioda_MDT
;
292 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
296 tptr
= phb
->ioda_PEEV
;
297 mask
= phb
->big_phb
? PNV_PHB4_MAX_PEEVs
: (PNV_PHB4_MAX_PEEVs
>> 1);
301 phb_error(phb
, "invalid IODA table %d", table
);
314 if (adreg
& PHB_IODA_AD_AUTOINC
) {
315 index
= (index
+ 1) & mask
;
316 adreg
= SETFIELD(PHB_IODA_AD_TADR
, adreg
, index
);
319 phb
->regs
[PHB_IODA_ADDR
>> 3] = adreg
;
323 static uint64_t pnv_phb4_ioda_read(PnvPHB4
*phb
)
328 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
330 /* Special PESTA case */
331 if (table
== IODA3_TBL_PESTA
) {
332 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 1)) << 63;
333 } else if (table
== IODA3_TBL_PESTB
) {
334 return ((uint64_t)(phb
->ioda_PEST_AB
[idx
] & 2)) << 62;
336 /* Return 0 on unsupported tables, not ff's */
342 static void pnv_phb4_ioda_write(PnvPHB4
*phb
, uint64_t val
)
347 tptr
= pnv_phb4_ioda_access(phb
, &table
, &idx
);
349 /* Special PESTA case */
350 if (table
== IODA3_TBL_PESTA
) {
351 phb
->ioda_PEST_AB
[idx
] &= ~1;
352 phb
->ioda_PEST_AB
[idx
] |= (val
>> 63) & 1;
353 } else if (table
== IODA3_TBL_PESTB
) {
354 phb
->ioda_PEST_AB
[idx
] &= ~2;
355 phb
->ioda_PEST_AB
[idx
] |= (val
>> 62) & 2;
360 /* Handle side effects */
364 case IODA3_TBL_MIST
: {
365 /* Special mask for MIST partial write */
366 uint64_t adreg
= phb
->regs
[PHB_IODA_ADDR
>> 3];
367 uint32_t mmask
= GETFIELD(PHB_IODA_AD_MIST_PWV
, adreg
);
373 v
&= 0x0000ffffffffffffull
;
374 v
|= 0xcfff000000000000ull
& val
;
377 v
&= 0xffff0000ffffffffull
;
378 v
|= 0x0000cfff00000000ull
& val
;
381 v
&= 0xffffffff0000ffffull
;
382 v
|= 0x00000000cfff0000ull
& val
;
385 v
&= 0xffffffffffff0000ull
;
386 v
|= 0x000000000000cfffull
& val
;
394 /* Copy across the valid bit to the other half */
395 phb
->ioda_MBT
[idx
^ 1] &= 0x7fffffffffffffffull
;
396 phb
->ioda_MBT
[idx
^ 1] |= 0x8000000000000000ull
& val
;
398 /* Update mappings */
399 pnv_phb4_check_mbt(phb
, idx
>> 1);
406 static void pnv_phb4_rtc_invalidate(PnvPHB4
*phb
, uint64_t val
)
410 /* Always invalidate all for now ... */
411 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
412 ds
->pe_num
= PHB_INVALID_PE
;
416 static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace
*ds
)
418 uint64_t cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
420 if (cfg
& PHB_PHB4C_32BIT_MSI_EN
) {
421 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
422 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
423 0xffff0000, &ds
->msi32_mr
);
426 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi32_mr
))) {
427 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
432 if (cfg
& PHB_PHB4C_64BIT_MSI_EN
) {
433 if (!memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
434 memory_region_add_subregion(MEMORY_REGION(&ds
->dma_mr
),
435 (1ull << 60), &ds
->msi64_mr
);
438 if (memory_region_is_mapped(MEMORY_REGION(&ds
->msi64_mr
))) {
439 memory_region_del_subregion(MEMORY_REGION(&ds
->dma_mr
),
445 static void pnv_phb4_update_all_msi_regions(PnvPHB4
*phb
)
449 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
450 pnv_phb4_update_msi_regions(ds
);
454 static void pnv_phb4_update_xsrc(PnvPHB4
*phb
)
456 int shift
, flags
, i
, lsi_base
;
457 XiveSource
*xsrc
= &phb
->xsrc
;
459 /* The XIVE source characteristics can be set at run time */
460 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_PGSZ_64K
) {
461 shift
= XIVE_ESB_64K
;
465 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_STORE_EOI
) {
466 flags
= XIVE_SRC_STORE_EOI
;
472 * When the PQ disable configuration bit is set, the check on the
473 * PQ state bits is disabled on the PHB side (for MSI only) and it
474 * is performed on the IC side instead.
476 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_PQ_DISABLE
) {
477 flags
|= XIVE_SRC_PQ_DISABLE
;
480 phb
->xsrc
.esb_shift
= shift
;
481 phb
->xsrc
.esb_flags
= flags
;
483 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
486 /* TODO: handle reset values of PHB_LSI_SRC_ID */
491 /* TODO: need a xive_source_irq_reset_lsi() */
492 bitmap_zero(xsrc
->lsi_map
, xsrc
->nr_irqs
);
494 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
495 bool msi
= (i
< lsi_base
|| i
>= (lsi_base
+ 8));
497 xive_source_irq_set_lsi(xsrc
, i
);
502 static void pnv_phb4_reg_write(void *opaque
, hwaddr off
, uint64_t val
,
505 PnvPHB4
*phb
= PNV_PHB4(opaque
);
508 /* Special case outbound configuration data */
509 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
510 pnv_phb4_config_write(phb
, off
& 0x3, size
, val
);
514 /* Special case RC configuration space */
515 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
516 pnv_phb4_rc_config_write(phb
, off
& 0x7ff, size
, val
);
520 /* Other registers are 64-bit only */
521 if (size
!= 8 || off
& 0x7) {
522 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
529 case PHB_LSI_SOURCE_ID
:
530 val
&= PHB_LSI_SRC_ID
;
532 case PHB_M64_UPPER_BITS
:
533 val
&= 0xff00000000000000ull
;
537 /* Clear top 3 bits which HW does to indicate successful queuing */
538 val
&= ~(PHB_TCE_KILL_ALL
| PHB_TCE_KILL_PE
| PHB_TCE_KILL_ONE
);
542 * This is enough logic to make SW happy but we aren't
543 * actually quiescing the DMAs
545 if (val
& PHB_Q_DMA_R_AUTORESET
) {
548 val
&= PHB_Q_DMA_R_QUIESCE_DMA
;
552 case PHB_LEM_FIR_AND_MASK
:
553 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] &= val
;
555 case PHB_LEM_FIR_OR_MASK
:
556 phb
->regs
[PHB_LEM_FIR_ACCUM
>> 3] |= val
;
558 case PHB_LEM_ERROR_AND_MASK
:
559 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] &= val
;
561 case PHB_LEM_ERROR_OR_MASK
:
562 phb
->regs
[PHB_LEM_ERROR_MASK
>> 3] |= val
;
567 /* TODO: More regs ..., maybe create a table with masks... */
569 /* Read only registers */
570 case PHB_CPU_LOADSTORE_STATUS
:
571 case PHB_ETU_ERR_SUMMARY
:
572 case PHB_PHB4_GEN_CAP
:
573 case PHB_PHB4_TCE_CAP
:
574 case PHB_PHB4_IRQ_CAP
:
575 case PHB_PHB4_EEH_CAP
:
579 /* Record whether it changed */
580 changed
= phb
->regs
[off
>> 3] != val
;
582 /* Store in register cache first */
583 phb
->regs
[off
>> 3] = val
;
585 /* Handle side effects */
587 case PHB_PHB4_CONFIG
:
589 pnv_phb4_update_all_msi_regions(phb
);
592 case PHB_M32_START_ADDR
:
593 case PHB_M64_UPPER_BITS
:
595 pnv_phb4_check_all_mbt(phb
);
599 /* IODA table accesses */
601 pnv_phb4_ioda_write(phb
, val
);
604 /* RTC invalidation */
605 case PHB_RTC_INVALIDATE
:
606 pnv_phb4_rtc_invalidate(phb
, val
);
609 /* PHB Control (Affects XIVE source) */
611 case PHB_LSI_SOURCE_ID
:
612 pnv_phb4_update_xsrc(phb
);
615 /* Silent simple writes */
617 case PHB_CONFIG_ADDRESS
:
620 case PHB_TCE_SPEC_CTL
:
624 case PHB_LEM_FIR_ACCUM
:
625 case PHB_LEM_ERROR_MASK
:
626 case PHB_LEM_ACTION0
:
627 case PHB_LEM_ACTION1
:
628 case PHB_TCE_TAG_ENABLE
:
629 case PHB_INT_NOTIFY_ADDR
:
630 case PHB_INT_NOTIFY_INDEX
:
634 /* Noise on anything else */
636 qemu_log_mask(LOG_UNIMP
, "phb4: reg_write 0x%"PRIx64
"=%"PRIx64
"\n",
641 static uint64_t pnv_phb4_reg_read(void *opaque
, hwaddr off
, unsigned size
)
643 PnvPHB4
*phb
= PNV_PHB4(opaque
);
646 if ((off
& 0xfffc) == PHB_CONFIG_DATA
) {
647 return pnv_phb4_config_read(phb
, off
& 0x3, size
);
650 /* Special case RC configuration space */
651 if ((off
& 0xf800) == PHB_RC_CONFIG_BASE
) {
652 return pnv_phb4_rc_config_read(phb
, off
& 0x7ff, size
);
655 /* Other registers are 64-bit only */
656 if (size
!= 8 || off
& 0x7) {
657 phb_error(phb
, "Invalid register access, offset: 0x%"PRIx64
" size: %d",
662 /* Default read from cache */
663 val
= phb
->regs
[off
>> 3];
667 return PNV_PHB4_PEC_GET_CLASS(phb
->pec
)->version
;
670 case PHB_PHB4_GEN_CAP
:
671 return 0xe4b8000000000000ull
;
672 case PHB_PHB4_TCE_CAP
:
673 return phb
->big_phb
? 0x4008440000000400ull
: 0x2008440000000200ull
;
674 case PHB_PHB4_IRQ_CAP
:
675 return phb
->big_phb
? 0x0800000000001000ull
: 0x0800000000000800ull
;
676 case PHB_PHB4_EEH_CAP
:
677 return phb
->big_phb
? 0x2000000000000000ull
: 0x1000000000000000ull
;
679 /* IODA table accesses */
681 return pnv_phb4_ioda_read(phb
);
683 /* Link training always appears trained */
684 case PHB_PCIE_DLP_TRAIN_CTL
:
685 /* TODO: Do something sensible with speed ? */
686 return PHB_PCIE_DLP_INBAND_PRESENCE
| PHB_PCIE_DLP_TL_LINKACT
;
688 /* DMA read sync: make it look like it's complete */
690 return PHB_DMARD_SYNC_COMPLETE
;
692 /* Silent simple reads */
693 case PHB_LSI_SOURCE_ID
:
694 case PHB_CPU_LOADSTORE_STATUS
:
696 case PHB_PHB4_CONFIG
:
697 case PHB_M32_START_ADDR
:
698 case PHB_CONFIG_ADDRESS
:
700 case PHB_RTC_INVALIDATE
:
702 case PHB_TCE_SPEC_CTL
:
706 case PHB_M64_UPPER_BITS
:
708 case PHB_LEM_FIR_ACCUM
:
709 case PHB_LEM_ERROR_MASK
:
710 case PHB_LEM_ACTION0
:
711 case PHB_LEM_ACTION1
:
712 case PHB_TCE_TAG_ENABLE
:
713 case PHB_INT_NOTIFY_ADDR
:
714 case PHB_INT_NOTIFY_INDEX
:
716 case PHB_ETU_ERR_SUMMARY
:
719 /* Noise on anything else */
721 qemu_log_mask(LOG_UNIMP
, "phb4: reg_read 0x%"PRIx64
"=%"PRIx64
"\n",
727 static const MemoryRegionOps pnv_phb4_reg_ops
= {
728 .read
= pnv_phb4_reg_read
,
729 .write
= pnv_phb4_reg_write
,
730 .valid
.min_access_size
= 1,
731 .valid
.max_access_size
= 8,
732 .impl
.min_access_size
= 1,
733 .impl
.max_access_size
= 8,
734 .endianness
= DEVICE_BIG_ENDIAN
,
737 static uint64_t pnv_phb4_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
739 PnvPHB4
*phb
= PNV_PHB4(opaque
);
740 uint32_t reg
= addr
>> 3;
745 case PHB_SCOM_HV_IND_ADDR
:
746 return phb
->scom_hv_ind_addr_reg
;
748 case PHB_SCOM_HV_IND_DATA
:
749 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
750 phb_error(phb
, "Invalid indirect address");
753 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
754 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
755 val
= pnv_phb4_reg_read(phb
, offset
, size
);
756 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
759 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
760 phb
->scom_hv_ind_addr_reg
,
764 case PHB_SCOM_ETU_LEM_FIR
:
765 case PHB_SCOM_ETU_LEM_FIR_AND
:
766 case PHB_SCOM_ETU_LEM_FIR_OR
:
767 case PHB_SCOM_ETU_LEM_FIR_MSK
:
768 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
769 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
770 case PHB_SCOM_ETU_LEM_ACT0
:
771 case PHB_SCOM_ETU_LEM_ACT1
:
772 case PHB_SCOM_ETU_LEM_WOF
:
773 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
774 return pnv_phb4_reg_read(phb
, offset
, size
);
775 case PHB_SCOM_ETU_PMON_CONFIG
:
776 case PHB_SCOM_ETU_PMON_CTR0
:
777 case PHB_SCOM_ETU_PMON_CTR1
:
778 case PHB_SCOM_ETU_PMON_CTR2
:
779 case PHB_SCOM_ETU_PMON_CTR3
:
780 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
781 return pnv_phb4_reg_read(phb
, offset
, size
);
784 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_read 0x%"HWADDR_PRIx
"\n", addr
);
789 static void pnv_phb4_xscom_write(void *opaque
, hwaddr addr
,
790 uint64_t val
, unsigned size
)
792 PnvPHB4
*phb
= PNV_PHB4(opaque
);
793 uint32_t reg
= addr
>> 3;
797 case PHB_SCOM_HV_IND_ADDR
:
798 phb
->scom_hv_ind_addr_reg
= val
& 0xe000000000001fff;
800 case PHB_SCOM_HV_IND_DATA
:
801 if (!(phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_VALID
)) {
802 phb_error(phb
, "Invalid indirect address");
805 size
= (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_4B
) ? 4 : 8;
806 offset
= GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
, phb
->scom_hv_ind_addr_reg
);
807 pnv_phb4_reg_write(phb
, offset
, val
, size
);
808 if (phb
->scom_hv_ind_addr_reg
& PHB_SCOM_HV_IND_ADDR_AUTOINC
) {
811 phb
->scom_hv_ind_addr_reg
= SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR
,
812 phb
->scom_hv_ind_addr_reg
,
816 case PHB_SCOM_ETU_LEM_FIR
:
817 case PHB_SCOM_ETU_LEM_FIR_AND
:
818 case PHB_SCOM_ETU_LEM_FIR_OR
:
819 case PHB_SCOM_ETU_LEM_FIR_MSK
:
820 case PHB_SCOM_ETU_LEM_ERR_MSK_AND
:
821 case PHB_SCOM_ETU_LEM_ERR_MSK_OR
:
822 case PHB_SCOM_ETU_LEM_ACT0
:
823 case PHB_SCOM_ETU_LEM_ACT1
:
824 case PHB_SCOM_ETU_LEM_WOF
:
825 offset
= ((reg
- PHB_SCOM_ETU_LEM_FIR
) << 3) + PHB_LEM_FIR_ACCUM
;
826 pnv_phb4_reg_write(phb
, offset
, val
, size
);
828 case PHB_SCOM_ETU_PMON_CONFIG
:
829 case PHB_SCOM_ETU_PMON_CTR0
:
830 case PHB_SCOM_ETU_PMON_CTR1
:
831 case PHB_SCOM_ETU_PMON_CTR2
:
832 case PHB_SCOM_ETU_PMON_CTR3
:
833 offset
= ((reg
- PHB_SCOM_ETU_PMON_CONFIG
) << 3) + PHB_PERFMON_CONFIG
;
834 pnv_phb4_reg_write(phb
, offset
, val
, size
);
837 qemu_log_mask(LOG_UNIMP
, "phb4: xscom_write 0x%"HWADDR_PRIx
838 "=%"PRIx64
"\n", addr
, val
);
842 const MemoryRegionOps pnv_phb4_xscom_ops
= {
843 .read
= pnv_phb4_xscom_read
,
844 .write
= pnv_phb4_xscom_write
,
845 .valid
.min_access_size
= 8,
846 .valid
.max_access_size
= 8,
847 .impl
.min_access_size
= 8,
848 .impl
.max_access_size
= 8,
849 .endianness
= DEVICE_BIG_ENDIAN
,
852 static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque
, hwaddr addr
,
855 PnvPHB4
*phb
= PNV_PHB4(opaque
);
856 uint32_t reg
= addr
>> 3;
858 /* All registers are read-able */
859 return phb
->nest_regs
[reg
];
863 * Return the 'stack_no' of a PHB4. 'stack_no' is the order
864 * the PHB4 occupies in the PEC. This is the reverse of what
865 * pnv_phb4_pec_get_phb_id() does.
867 * E.g. a phb with phb_id = 4 and pec->index = 1 (PEC1) will
868 * be the second phb (stack_no = 1) of the PEC.
870 static int pnv_phb4_get_phb_stack_no(PnvPHB4
*phb
)
872 PnvPhb4PecState
*pec
= phb
->pec
;
873 PnvPhb4PecClass
*pecc
= PNV_PHB4_PEC_GET_CLASS(pec
);
874 int index
= pec
->index
;
875 int stack_no
= phb
->phb_id
;
878 stack_no
-= pecc
->num_phbs
[index
];
884 static void pnv_phb4_update_regions(PnvPHB4
*phb
)
886 /* Unmap first always */
887 if (memory_region_is_mapped(&phb
->mr_regs
)) {
888 memory_region_del_subregion(&phb
->phbbar
, &phb
->mr_regs
);
890 if (memory_region_is_mapped(&phb
->xsrc
.esb_mmio
)) {
891 memory_region_del_subregion(&phb
->intbar
, &phb
->xsrc
.esb_mmio
);
894 /* Map registers if enabled */
895 if (memory_region_is_mapped(&phb
->phbbar
)) {
896 memory_region_add_subregion(&phb
->phbbar
, 0, &phb
->mr_regs
);
899 /* Map ESB if enabled */
900 if (memory_region_is_mapped(&phb
->intbar
)) {
901 memory_region_add_subregion(&phb
->intbar
, 0, &phb
->xsrc
.esb_mmio
);
904 /* Check/update m32 */
905 pnv_phb4_check_all_mbt(phb
);
908 static void pnv_pec_phb_update_map(PnvPHB4
*phb
)
910 PnvPhb4PecState
*pec
= phb
->pec
;
911 MemoryRegion
*sysmem
= get_system_memory();
912 uint64_t bar_en
= phb
->nest_regs
[PEC_NEST_STK_BAR_EN
];
913 int stack_no
= pnv_phb4_get_phb_stack_no(phb
);
914 uint64_t bar
, mask
, size
;
918 * NOTE: This will really not work well if those are remapped
919 * after the PHB has created its sub regions. We could do better
920 * if we had a way to resize regions but we don't really care
921 * that much in practice as the stuff below really only happens
922 * once early during boot
926 if (memory_region_is_mapped(&phb
->mmbar0
) &&
927 !(bar_en
& PEC_NEST_STK_BAR_EN_MMIO0
)) {
928 memory_region_del_subregion(sysmem
, &phb
->mmbar0
);
930 if (memory_region_is_mapped(&phb
->mmbar1
) &&
931 !(bar_en
& PEC_NEST_STK_BAR_EN_MMIO1
)) {
932 memory_region_del_subregion(sysmem
, &phb
->mmbar1
);
934 if (memory_region_is_mapped(&phb
->phbbar
) &&
935 !(bar_en
& PEC_NEST_STK_BAR_EN_PHB
)) {
936 memory_region_del_subregion(sysmem
, &phb
->phbbar
);
938 if (memory_region_is_mapped(&phb
->intbar
) &&
939 !(bar_en
& PEC_NEST_STK_BAR_EN_INT
)) {
940 memory_region_del_subregion(sysmem
, &phb
->intbar
);
944 pnv_phb4_update_regions(phb
);
947 if (!memory_region_is_mapped(&phb
->mmbar0
) &&
948 (bar_en
& PEC_NEST_STK_BAR_EN_MMIO0
)) {
949 bar
= phb
->nest_regs
[PEC_NEST_STK_MMIO_BAR0
] >> 8;
950 mask
= phb
->nest_regs
[PEC_NEST_STK_MMIO_BAR0_MASK
];
951 size
= ((~mask
) >> 8) + 1;
952 snprintf(name
, sizeof(name
), "pec-%d.%d-phb-%d-mmio0",
953 pec
->chip_id
, pec
->index
, stack_no
);
954 memory_region_init(&phb
->mmbar0
, OBJECT(phb
), name
, size
);
955 memory_region_add_subregion(sysmem
, bar
, &phb
->mmbar0
);
956 phb
->mmio0_base
= bar
;
957 phb
->mmio0_size
= size
;
959 if (!memory_region_is_mapped(&phb
->mmbar1
) &&
960 (bar_en
& PEC_NEST_STK_BAR_EN_MMIO1
)) {
961 bar
= phb
->nest_regs
[PEC_NEST_STK_MMIO_BAR1
] >> 8;
962 mask
= phb
->nest_regs
[PEC_NEST_STK_MMIO_BAR1_MASK
];
963 size
= ((~mask
) >> 8) + 1;
964 snprintf(name
, sizeof(name
), "pec-%d.%d-phb-%d-mmio1",
965 pec
->chip_id
, pec
->index
, stack_no
);
966 memory_region_init(&phb
->mmbar1
, OBJECT(phb
), name
, size
);
967 memory_region_add_subregion(sysmem
, bar
, &phb
->mmbar1
);
968 phb
->mmio1_base
= bar
;
969 phb
->mmio1_size
= size
;
971 if (!memory_region_is_mapped(&phb
->phbbar
) &&
972 (bar_en
& PEC_NEST_STK_BAR_EN_PHB
)) {
973 bar
= phb
->nest_regs
[PEC_NEST_STK_PHB_REGS_BAR
] >> 8;
974 size
= PNV_PHB4_NUM_REGS
<< 3;
975 snprintf(name
, sizeof(name
), "pec-%d.%d-phb-%d",
976 pec
->chip_id
, pec
->index
, stack_no
);
977 memory_region_init(&phb
->phbbar
, OBJECT(phb
), name
, size
);
978 memory_region_add_subregion(sysmem
, bar
, &phb
->phbbar
);
980 if (!memory_region_is_mapped(&phb
->intbar
) &&
981 (bar_en
& PEC_NEST_STK_BAR_EN_INT
)) {
982 bar
= phb
->nest_regs
[PEC_NEST_STK_INT_BAR
] >> 8;
983 size
= PNV_PHB4_MAX_INTs
<< 16;
984 snprintf(name
, sizeof(name
), "pec-%d.%d-phb-%d-int",
985 phb
->pec
->chip_id
, phb
->pec
->index
, stack_no
);
986 memory_region_init(&phb
->intbar
, OBJECT(phb
), name
, size
);
987 memory_region_add_subregion(sysmem
, bar
, &phb
->intbar
);
991 pnv_phb4_update_regions(phb
);
994 static void pnv_pec_stk_nest_xscom_write(void *opaque
, hwaddr addr
,
995 uint64_t val
, unsigned size
)
997 PnvPHB4
*phb
= PNV_PHB4(opaque
);
998 PnvPhb4PecState
*pec
= phb
->pec
;
999 uint32_t reg
= addr
>> 3;
1002 case PEC_NEST_STK_PCI_NEST_FIR
:
1003 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR
] = val
& PPC_BITMASK(0, 27);
1005 case PEC_NEST_STK_PCI_NEST_FIR_CLR
:
1006 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR
] &= val
;
1008 case PEC_NEST_STK_PCI_NEST_FIR_SET
:
1009 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR
] |= val
;
1011 case PEC_NEST_STK_PCI_NEST_FIR_MSK
:
1012 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR_MSK
] = val
&
1015 case PEC_NEST_STK_PCI_NEST_FIR_MSKC
:
1016 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR_MSK
] &= val
;
1018 case PEC_NEST_STK_PCI_NEST_FIR_MSKS
:
1019 phb
->nest_regs
[PEC_NEST_STK_PCI_NEST_FIR_MSK
] |= val
;
1021 case PEC_NEST_STK_PCI_NEST_FIR_ACT0
:
1022 case PEC_NEST_STK_PCI_NEST_FIR_ACT1
:
1023 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 27);
1025 case PEC_NEST_STK_PCI_NEST_FIR_WOF
:
1026 phb
->nest_regs
[reg
] = 0;
1028 case PEC_NEST_STK_ERR_REPORT_0
:
1029 case PEC_NEST_STK_ERR_REPORT_1
:
1030 case PEC_NEST_STK_PBCQ_GNRL_STATUS
:
1033 case PEC_NEST_STK_PBCQ_MODE
:
1034 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 7);
1036 case PEC_NEST_STK_MMIO_BAR0
:
1037 case PEC_NEST_STK_MMIO_BAR0_MASK
:
1038 case PEC_NEST_STK_MMIO_BAR1
:
1039 case PEC_NEST_STK_MMIO_BAR1_MASK
:
1040 if (phb
->nest_regs
[PEC_NEST_STK_BAR_EN
] &
1041 (PEC_NEST_STK_BAR_EN_MMIO0
|
1042 PEC_NEST_STK_BAR_EN_MMIO1
)) {
1043 phb_pec_error(pec
, "Changing enabled BAR unsupported");
1045 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 39);
1047 case PEC_NEST_STK_PHB_REGS_BAR
:
1048 if (phb
->nest_regs
[PEC_NEST_STK_BAR_EN
] & PEC_NEST_STK_BAR_EN_PHB
) {
1049 phb_pec_error(pec
, "Changing enabled BAR unsupported");
1051 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 41);
1053 case PEC_NEST_STK_INT_BAR
:
1054 if (phb
->nest_regs
[PEC_NEST_STK_BAR_EN
] & PEC_NEST_STK_BAR_EN_INT
) {
1055 phb_pec_error(pec
, "Changing enabled BAR unsupported");
1057 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 27);
1059 case PEC_NEST_STK_BAR_EN
:
1060 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 3);
1061 pnv_pec_phb_update_map(phb
);
1063 case PEC_NEST_STK_DATA_FRZ_TYPE
:
1064 /* Not used for now */
1065 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 27);
1067 case PEC_NEST_STK_PBCQ_SPARSE_PAGE
:
1068 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(3, 5);
1070 case PEC_NEST_STK_PBCQ_CACHE_INJ
:
1071 phb
->nest_regs
[reg
] = val
& PPC_BITMASK(0, 7);
1074 qemu_log_mask(LOG_UNIMP
, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx
1075 "=%"PRIx64
"\n", addr
, val
);
1079 static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops
= {
1080 .read
= pnv_pec_stk_nest_xscom_read
,
1081 .write
= pnv_pec_stk_nest_xscom_write
,
1082 .valid
.min_access_size
= 8,
1083 .valid
.max_access_size
= 8,
1084 .impl
.min_access_size
= 8,
1085 .impl
.max_access_size
= 8,
1086 .endianness
= DEVICE_BIG_ENDIAN
,
1089 static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque
, hwaddr addr
,
1092 PnvPHB4
*phb
= PNV_PHB4(opaque
);
1093 uint32_t reg
= addr
>> 3;
1095 /* All registers are read-able */
1096 return phb
->pci_regs
[reg
];
1099 static void pnv_pec_stk_pci_xscom_write(void *opaque
, hwaddr addr
,
1100 uint64_t val
, unsigned size
)
1102 PnvPHB4
*phb
= PNV_PHB4(opaque
);
1103 uint32_t reg
= addr
>> 3;
1105 case PEC_PCI_STK_PCI_FIR
:
1106 phb
->pci_regs
[reg
] = val
& PPC_BITMASK(0, 5);
1108 case PEC_PCI_STK_PCI_FIR_CLR
:
1109 phb
->pci_regs
[PEC_PCI_STK_PCI_FIR
] &= val
;
1111 case PEC_PCI_STK_PCI_FIR_SET
:
1112 phb
->pci_regs
[PEC_PCI_STK_PCI_FIR
] |= val
;
1114 case PEC_PCI_STK_PCI_FIR_MSK
:
1115 phb
->pci_regs
[reg
] = val
& PPC_BITMASK(0, 5);
1117 case PEC_PCI_STK_PCI_FIR_MSKC
:
1118 phb
->pci_regs
[PEC_PCI_STK_PCI_FIR_MSK
] &= val
;
1120 case PEC_PCI_STK_PCI_FIR_MSKS
:
1121 phb
->pci_regs
[PEC_PCI_STK_PCI_FIR_MSK
] |= val
;
1123 case PEC_PCI_STK_PCI_FIR_ACT0
:
1124 case PEC_PCI_STK_PCI_FIR_ACT1
:
1125 phb
->pci_regs
[reg
] = val
& PPC_BITMASK(0, 5);
1127 case PEC_PCI_STK_PCI_FIR_WOF
:
1128 phb
->pci_regs
[reg
] = 0;
1130 case PEC_PCI_STK_ETU_RESET
:
1131 phb
->pci_regs
[reg
] = val
& PPC_BIT(0);
1132 /* TODO: Implement reset */
1134 case PEC_PCI_STK_PBAIB_ERR_REPORT
:
1136 case PEC_PCI_STK_PBAIB_TX_CMD_CRED
:
1137 phb
->pci_regs
[reg
] = val
&
1138 ((PPC_BITMASK(0, 2) | PPC_BITMASK(10, 18)
1139 | PPC_BITMASK(26, 34) | PPC_BITMASK(41, 50)
1140 | PPC_BITMASK(58, 63)));
1142 case PEC_PCI_STK_PBAIB_TX_DAT_CRED
:
1143 phb
->pci_regs
[reg
] = val
& (PPC_BITMASK(33, 34) | PPC_BITMASK(44, 47));
1146 qemu_log_mask(LOG_UNIMP
, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx
1147 "=%"PRIx64
"\n", addr
, val
);
1151 static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops
= {
1152 .read
= pnv_pec_stk_pci_xscom_read
,
1153 .write
= pnv_pec_stk_pci_xscom_write
,
1154 .valid
.min_access_size
= 8,
1155 .valid
.max_access_size
= 8,
1156 .impl
.min_access_size
= 8,
1157 .impl
.max_access_size
= 8,
1158 .endianness
= DEVICE_BIG_ENDIAN
,
1161 static int pnv_phb4_map_irq(PCIDevice
*pci_dev
, int irq_num
)
1163 /* Check that out properly ... */
1167 static void pnv_phb4_set_irq(void *opaque
, int irq_num
, int level
)
1169 PnvPHB4
*phb
= PNV_PHB4(opaque
);
1174 phb_error(phb
, "IRQ %x is not an LSI", irq_num
);
1176 lsi_base
= GETFIELD(PHB_LSI_SRC_ID
, phb
->regs
[PHB_LSI_SOURCE_ID
>> 3]);
1178 qemu_set_irq(phb
->qirqs
[lsi_base
+ irq_num
], level
);
1181 static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace
*ds
)
1188 /* Already resolved ? */
1189 if (ds
->pe_num
!= PHB_INVALID_PE
) {
1193 /* We need to lookup the RTT */
1194 rtt
= ds
->phb
->regs
[PHB_RTT_BAR
>> 3];
1195 if (!(rtt
& PHB_RTT_BAR_ENABLE
)) {
1196 phb_error(ds
->phb
, "DMA with RTT BAR disabled !");
1197 /* Set error bits ? fence ? ... */
1202 bus_num
= pci_bus_num(ds
->bus
);
1203 addr
= rtt
& PHB_RTT_BASE_ADDRESS_MASK
;
1204 addr
+= 2 * PCI_BUILD_BDF(bus_num
, ds
->devfn
);
1205 if (dma_memory_read(&address_space_memory
, addr
, &rte
,
1206 sizeof(rte
), MEMTXATTRS_UNSPECIFIED
)) {
1207 phb_error(ds
->phb
, "Failed to read RTT entry at 0x%"PRIx64
, addr
);
1208 /* Set error bits ? fence ? ... */
1211 rte
= be16_to_cpu(rte
);
1213 /* Fail upon reading of invalid PE# */
1214 num_PEs
= ds
->phb
->big_phb
? PNV_PHB4_MAX_PEs
: (PNV_PHB4_MAX_PEs
>> 1);
1215 if (rte
>= num_PEs
) {
1216 phb_error(ds
->phb
, "RTE for RID 0x%x invalid (%04x", ds
->devfn
, rte
);
1223 static void pnv_phb4_translate_tve(PnvPhb4DMASpace
*ds
, hwaddr addr
,
1224 bool is_write
, uint64_t tve
,
1227 uint64_t tta
= GETFIELD(IODA3_TVT_TABLE_ADDR
, tve
);
1228 int32_t lev
= GETFIELD(IODA3_TVT_NUM_LEVELS
, tve
);
1229 uint32_t tts
= GETFIELD(IODA3_TVT_TCE_TABLE_SIZE
, tve
);
1230 uint32_t tps
= GETFIELD(IODA3_TVT_IO_PSIZE
, tve
);
1232 /* Invalid levels */
1234 phb_error(ds
->phb
, "Invalid #levels in TVE %d", lev
);
1240 phb_error(ds
->phb
, "Access to invalid TVE");
1244 /* IO Page Size of 0 means untranslated, else use TCEs */
1246 /* TODO: Handle boundaries */
1248 /* Use 4k pages like q35 ... for now */
1249 tlb
->iova
= addr
& 0xfffffffffffff000ull
;
1250 tlb
->translated_addr
= addr
& 0x0003fffffffff000ull
;
1251 tlb
->addr_mask
= 0xfffull
;
1252 tlb
->perm
= IOMMU_RW
;
1254 uint32_t tce_shift
, tbl_shift
, sh
;
1255 uint64_t base
, taddr
, tce
, tce_mask
;
1257 /* Address bits per bottom level TCE entry */
1258 tce_shift
= tps
+ 11;
1260 /* Address bits per table level */
1261 tbl_shift
= tts
+ 8;
1263 /* Top level table base address */
1266 /* Total shift to first level */
1267 sh
= tbl_shift
* lev
+ tce_shift
;
1269 /* TODO: Limit to support IO page sizes */
1271 /* TODO: Multi-level untested */
1275 /* Grab the TCE address */
1276 taddr
= base
| (((addr
>> sh
) & ((1ul << tbl_shift
) - 1)) << 3);
1277 if (dma_memory_read(&address_space_memory
, taddr
, &tce
,
1278 sizeof(tce
), MEMTXATTRS_UNSPECIFIED
)) {
1279 phb_error(ds
->phb
, "Failed to read TCE at 0x%"PRIx64
, taddr
);
1282 tce
= be64_to_cpu(tce
);
1284 /* Check permission for indirect TCE */
1285 if ((lev
>= 0) && !(tce
& 3)) {
1286 phb_error(ds
->phb
, "Invalid indirect TCE at 0x%"PRIx64
, taddr
);
1287 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
1288 is_write
? 'W' : 'R', tve
);
1289 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
1290 tta
, lev
, tts
, tps
);
1294 base
= tce
& ~0xfffull
;
1297 /* We exit the loop with TCE being the final TCE */
1298 if ((is_write
& !(tce
& 2)) || ((!is_write
) && !(tce
& 1))) {
1299 phb_error(ds
->phb
, "TCE access fault at 0x%"PRIx64
, taddr
);
1300 phb_error(ds
->phb
, " xlate %"PRIx64
":%c TVE=%"PRIx64
, addr
,
1301 is_write
? 'W' : 'R', tve
);
1302 phb_error(ds
->phb
, " tta=%"PRIx64
" lev=%d tts=%d tps=%d",
1303 tta
, lev
, tts
, tps
);
1306 tce_mask
= ~((1ull << tce_shift
) - 1);
1307 tlb
->iova
= addr
& tce_mask
;
1308 tlb
->translated_addr
= tce
& tce_mask
;
1309 tlb
->addr_mask
= ~tce_mask
;
1310 tlb
->perm
= tce
& 3;
1314 static IOMMUTLBEntry
pnv_phb4_translate_iommu(IOMMUMemoryRegion
*iommu
,
1316 IOMMUAccessFlags flag
,
1319 PnvPhb4DMASpace
*ds
= container_of(iommu
, PnvPhb4DMASpace
, dma_mr
);
1322 IOMMUTLBEntry ret
= {
1323 .target_as
= &address_space_memory
,
1325 .translated_addr
= 0,
1326 .addr_mask
= ~(hwaddr
)0,
1331 if (!pnv_phb4_resolve_pe(ds
)) {
1332 phb_error(ds
->phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1333 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1337 /* Check top bits */
1338 switch (addr
>> 60) {
1340 /* DMA or 32-bit MSI ? */
1341 cfg
= ds
->phb
->regs
[PHB_PHB4_CONFIG
>> 3];
1342 if ((cfg
& PHB_PHB4C_32BIT_MSI_EN
) &&
1343 ((addr
& 0xffffffffffff0000ull
) == 0xffff0000ull
)) {
1344 phb_error(ds
->phb
, "xlate on 32-bit MSI region");
1347 /* Choose TVE XXX Use PHB4 Control Register */
1348 tve_sel
= (addr
>> 59) & 1;
1349 tve
= ds
->phb
->ioda_TVT
[ds
->pe_num
* 2 + tve_sel
];
1350 pnv_phb4_translate_tve(ds
, addr
, flag
& IOMMU_WO
, tve
, &ret
);
1353 phb_error(ds
->phb
, "xlate on 64-bit MSI region");
1356 phb_error(ds
->phb
, "xlate on unsupported address 0x%"PRIx64
, addr
);
1361 #define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region"
1362 DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion
, PNV_PHB4_IOMMU_MEMORY_REGION
,
1363 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
)
1365 static void pnv_phb4_iommu_memory_region_class_init(ObjectClass
*klass
,
1368 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1370 imrc
->translate
= pnv_phb4_translate_iommu
;
1373 static const TypeInfo pnv_phb4_iommu_memory_region_info
= {
1374 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1375 .name
= TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1376 .class_init
= pnv_phb4_iommu_memory_region_class_init
,
1380 * Return the index/phb-id of a PHB4 that belongs to a
1381 * pec->stacks[stack_index] stack.
1383 int pnv_phb4_pec_get_phb_id(PnvPhb4PecState
*pec
, int stack_index
)
1385 PnvPhb4PecClass
*pecc
= PNV_PHB4_PEC_GET_CLASS(pec
);
1386 int index
= pec
->index
;
1390 offset
+= pecc
->num_phbs
[index
];
1393 return offset
+ stack_index
;
1397 * MSI/MSIX memory region implementation.
1398 * The handler handles both MSI and MSIX.
1400 static void pnv_phb4_msi_write(void *opaque
, hwaddr addr
,
1401 uint64_t data
, unsigned size
)
1403 PnvPhb4DMASpace
*ds
= opaque
;
1404 PnvPHB4
*phb
= ds
->phb
;
1406 uint32_t src
= ((addr
>> 4) & 0xffff) | (data
& 0x1f);
1409 if (!pnv_phb4_resolve_pe(ds
)) {
1410 phb_error(phb
, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
1411 ds
->bus
, pci_bus_num(ds
->bus
), ds
->devfn
);
1415 /* TODO: Check it doesn't collide with LSIs */
1416 if (src
>= phb
->xsrc
.nr_irqs
) {
1417 phb_error(phb
, "MSI %d out of bounds", src
);
1421 /* TODO: check PE/MSI assignment */
1423 qemu_irq_pulse(phb
->qirqs
[src
]);
1426 /* There is no .read as the read result is undefined by PCI spec */
1427 static uint64_t pnv_phb4_msi_read(void *opaque
, hwaddr addr
, unsigned size
)
1429 PnvPhb4DMASpace
*ds
= opaque
;
1431 phb_error(ds
->phb
, "Invalid MSI read @ 0x%" HWADDR_PRIx
, addr
);
1435 static const MemoryRegionOps pnv_phb4_msi_ops
= {
1436 .read
= pnv_phb4_msi_read
,
1437 .write
= pnv_phb4_msi_write
,
1438 .endianness
= DEVICE_LITTLE_ENDIAN
1441 static PnvPhb4DMASpace
*pnv_phb4_dma_find(PnvPHB4
*phb
, PCIBus
*bus
, int devfn
)
1443 PnvPhb4DMASpace
*ds
;
1445 QLIST_FOREACH(ds
, &phb
->dma_spaces
, list
) {
1446 if (ds
->bus
== bus
&& ds
->devfn
== devfn
) {
1453 static AddressSpace
*pnv_phb4_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
1455 PnvPHB4
*phb
= opaque
;
1456 PnvPhb4DMASpace
*ds
;
1459 ds
= pnv_phb4_dma_find(phb
, bus
, devfn
);
1462 ds
= g_new0(PnvPhb4DMASpace
, 1);
1465 ds
->pe_num
= PHB_INVALID_PE
;
1467 snprintf(name
, sizeof(name
), "phb4-%d.%d-iommu", phb
->chip_id
,
1469 memory_region_init_iommu(&ds
->dma_mr
, sizeof(ds
->dma_mr
),
1470 TYPE_PNV_PHB4_IOMMU_MEMORY_REGION
,
1471 OBJECT(phb
), name
, UINT64_MAX
);
1472 address_space_init(&ds
->dma_as
, MEMORY_REGION(&ds
->dma_mr
),
1474 memory_region_init_io(&ds
->msi32_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1475 ds
, "msi32", 0x10000);
1476 memory_region_init_io(&ds
->msi64_mr
, OBJECT(phb
), &pnv_phb4_msi_ops
,
1477 ds
, "msi64", 0x100000);
1478 pnv_phb4_update_msi_regions(ds
);
1480 QLIST_INSERT_HEAD(&phb
->dma_spaces
, ds
, list
);
1485 static void pnv_phb4_xscom_realize(PnvPHB4
*phb
)
1487 PnvPhb4PecState
*pec
= phb
->pec
;
1488 PnvPhb4PecClass
*pecc
= PNV_PHB4_PEC_GET_CLASS(pec
);
1489 int stack_no
= pnv_phb4_get_phb_stack_no(phb
);
1490 uint32_t pec_nest_base
;
1491 uint32_t pec_pci_base
;
1496 /* Initialize the XSCOM regions for the stack registers */
1497 snprintf(name
, sizeof(name
), "xscom-pec-%d.%d-nest-phb-%d",
1498 pec
->chip_id
, pec
->index
, stack_no
);
1499 pnv_xscom_region_init(&phb
->nest_regs_mr
, OBJECT(phb
),
1500 &pnv_pec_stk_nest_xscom_ops
, phb
, name
,
1501 PHB4_PEC_NEST_STK_REGS_COUNT
);
1503 snprintf(name
, sizeof(name
), "xscom-pec-%d.%d-pci-phb-%d",
1504 pec
->chip_id
, pec
->index
, stack_no
);
1505 pnv_xscom_region_init(&phb
->pci_regs_mr
, OBJECT(phb
),
1506 &pnv_pec_stk_pci_xscom_ops
, phb
, name
,
1507 PHB4_PEC_PCI_STK_REGS_COUNT
);
1509 /* PHB pass-through */
1510 snprintf(name
, sizeof(name
), "xscom-pec-%d.%d-phb-%d",
1511 pec
->chip_id
, pec
->index
, stack_no
);
1512 pnv_xscom_region_init(&phb
->phb_regs_mr
, OBJECT(phb
),
1513 &pnv_phb4_xscom_ops
, phb
, name
, 0x40);
1515 pec_nest_base
= pecc
->xscom_nest_base(pec
);
1516 pec_pci_base
= pecc
->xscom_pci_base(pec
);
1518 /* Populate the XSCOM address space. */
1519 pnv_xscom_add_subregion(pec
->chip
,
1520 pec_nest_base
+ 0x40 * (stack_no
+ 1),
1521 &phb
->nest_regs_mr
);
1522 pnv_xscom_add_subregion(pec
->chip
,
1523 pec_pci_base
+ 0x40 * (stack_no
+ 1),
1525 pnv_xscom_add_subregion(pec
->chip
,
1526 pec_pci_base
+ PNV9_XSCOM_PEC_PCI_STK0
+
1531 static PCIIOMMUOps pnv_phb4_iommu_ops
= {
1532 .get_address_space
= pnv_phb4_dma_iommu
,
1535 static void pnv_phb4_instance_init(Object
*obj
)
1537 PnvPHB4
*phb
= PNV_PHB4(obj
);
1539 QLIST_INIT(&phb
->dma_spaces
);
1541 /* XIVE interrupt source object */
1542 object_initialize_child(obj
, "source", &phb
->xsrc
, TYPE_XIVE_SOURCE
);
1545 void pnv_phb4_bus_init(DeviceState
*dev
, PnvPHB4
*phb
)
1547 PCIHostState
*pci
= PCI_HOST_BRIDGE(dev
);
1551 * PHB4 doesn't support IO space. However, qemu gets very upset if
1552 * we don't have an IO region to anchor IO BARs onto so we just
1553 * initialize one which we never hook up to anything
1555 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-io", phb
->chip_id
,
1557 memory_region_init(&phb
->pci_io
, OBJECT(phb
), name
, 0x10000);
1559 snprintf(name
, sizeof(name
), "phb4-%d.%d-pci-mmio", phb
->chip_id
,
1561 memory_region_init(&phb
->pci_mmio
, OBJECT(phb
), name
,
1562 PCI_MMIO_TOTAL_SIZE
);
1564 pci
->bus
= pci_register_root_bus(dev
, dev
->id
? dev
->id
: NULL
,
1565 pnv_phb4_set_irq
, pnv_phb4_map_irq
, phb
,
1566 &phb
->pci_mmio
, &phb
->pci_io
,
1567 0, 4, TYPE_PNV_PHB4_ROOT_BUS
);
1569 object_property_set_int(OBJECT(pci
->bus
), "phb-id", phb
->phb_id
,
1571 object_property_set_int(OBJECT(pci
->bus
), "chip-id", phb
->chip_id
,
1574 pci_setup_iommu(pci
->bus
, &pnv_phb4_iommu_ops
, phb
);
1575 pci
->bus
->flags
|= PCI_BUS_EXTENDED_CONFIG_SPACE
;
1578 static void pnv_phb4_realize(DeviceState
*dev
, Error
**errp
)
1580 PnvPHB4
*phb
= PNV_PHB4(dev
);
1581 XiveSource
*xsrc
= &phb
->xsrc
;
1585 /* Set the "big_phb" flag */
1586 phb
->big_phb
= phb
->phb_id
== 0 || phb
->phb_id
== 3;
1588 /* Controller Registers */
1589 snprintf(name
, sizeof(name
), "phb4-%d.%d-regs", phb
->chip_id
,
1591 memory_region_init_io(&phb
->mr_regs
, OBJECT(phb
), &pnv_phb4_reg_ops
, phb
,
1594 /* Setup XIVE Source */
1596 nr_irqs
= PNV_PHB4_MAX_INTs
;
1598 nr_irqs
= PNV_PHB4_MAX_INTs
>> 1;
1600 object_property_set_int(OBJECT(xsrc
), "nr-irqs", nr_irqs
, &error_fatal
);
1601 object_property_set_link(OBJECT(xsrc
), "xive", OBJECT(phb
), &error_fatal
);
1602 if (!qdev_realize(DEVICE(xsrc
), NULL
, errp
)) {
1606 pnv_phb4_update_xsrc(phb
);
1608 phb
->qirqs
= qemu_allocate_irqs(xive_source_set_irq
, xsrc
, xsrc
->nr_irqs
);
1610 pnv_phb4_xscom_realize(phb
);
1614 * Address base trigger mode (POWER10)
1616 * Trigger directly the IC ESB page
1618 static void pnv_phb4_xive_notify_abt(PnvPHB4
*phb
, uint32_t srcno
,
1621 uint64_t notif_port
= phb
->regs
[PHB_INT_NOTIFY_ADDR
>> 3];
1622 uint64_t data
= 0; /* trigger data : don't care */
1627 if (notif_port
& PHB_INT_NOTIFY_ADDR_64K
) {
1633 /* Compute the address of the IC ESB management page */
1634 addr
= (notif_port
& ~PHB_INT_NOTIFY_ADDR_64K
);
1635 addr
|= (1ull << (esb_shift
+ 1)) * srcno
;
1636 addr
|= (1ull << esb_shift
);
1639 * When the PQ state bits are checked on the PHB, the associated
1640 * PQ state bits on the IC should be ignored. Use the unconditional
1641 * trigger offset to inject a trigger on the IC. This is always
1645 addr
|= XIVE_ESB_INJECT
;
1648 trace_pnv_phb4_xive_notify_ic(addr
, data
);
1650 address_space_stq_be(&address_space_memory
, addr
, data
,
1651 MEMTXATTRS_UNSPECIFIED
, &result
);
1652 if (result
!= MEMTX_OK
) {
1653 phb_error(phb
, "trigger failed @%"HWADDR_PRIx
"\n", addr
);
1658 static void pnv_phb4_xive_notify_ic(PnvPHB4
*phb
, uint32_t srcno
,
1661 uint64_t notif_port
= phb
->regs
[PHB_INT_NOTIFY_ADDR
>> 3];
1662 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1663 uint64_t data
= offset
| srcno
;
1667 data
|= XIVE_TRIGGER_PQ
;
1670 trace_pnv_phb4_xive_notify_ic(notif_port
, data
);
1672 address_space_stq_be(&address_space_memory
, notif_port
, data
,
1673 MEMTXATTRS_UNSPECIFIED
, &result
);
1674 if (result
!= MEMTX_OK
) {
1675 phb_error(phb
, "trigger failed @%"HWADDR_PRIx
"\n", notif_port
);
1680 static void pnv_phb4_xive_notify(XiveNotifier
*xf
, uint32_t srcno
,
1683 PnvPHB4
*phb
= PNV_PHB4(xf
);
1685 if (phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_ABT_MODE
) {
1686 pnv_phb4_xive_notify_abt(phb
, srcno
, pq_checked
);
1688 pnv_phb4_xive_notify_ic(phb
, srcno
, pq_checked
);
1692 static Property pnv_phb4_properties
[] = {
1693 DEFINE_PROP_UINT32("index", PnvPHB4
, phb_id
, 0),
1694 DEFINE_PROP_UINT32("chip-id", PnvPHB4
, chip_id
, 0),
1695 DEFINE_PROP_LINK("pec", PnvPHB4
, pec
, TYPE_PNV_PHB4_PEC
,
1697 DEFINE_PROP_LINK("phb-base", PnvPHB4
, phb_base
, TYPE_PNV_PHB
, PnvPHB
*),
1698 DEFINE_PROP_END_OF_LIST(),
1701 static void pnv_phb4_class_init(ObjectClass
*klass
, void *data
)
1703 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1704 XiveNotifierClass
*xfc
= XIVE_NOTIFIER_CLASS(klass
);
1706 dc
->realize
= pnv_phb4_realize
;
1707 device_class_set_props(dc
, pnv_phb4_properties
);
1708 dc
->user_creatable
= false;
1710 xfc
->notify
= pnv_phb4_xive_notify
;
1713 static const TypeInfo pnv_phb4_type_info
= {
1714 .name
= TYPE_PNV_PHB4
,
1715 .parent
= TYPE_DEVICE
,
1716 .instance_init
= pnv_phb4_instance_init
,
1717 .instance_size
= sizeof(PnvPHB4
),
1718 .class_init
= pnv_phb4_class_init
,
1719 .interfaces
= (InterfaceInfo
[]) {
1720 { TYPE_XIVE_NOTIFIER
},
1725 static const TypeInfo pnv_phb5_type_info
= {
1726 .name
= TYPE_PNV_PHB5
,
1727 .parent
= TYPE_PNV_PHB4
,
1728 .instance_size
= sizeof(PnvPHB4
),
1731 static void pnv_phb4_root_bus_get_prop(Object
*obj
, Visitor
*v
,
1733 void *opaque
, Error
**errp
)
1735 PnvPHB4RootBus
*bus
= PNV_PHB4_ROOT_BUS(obj
);
1738 if (strcmp(name
, "phb-id") == 0) {
1739 value
= bus
->phb_id
;
1741 value
= bus
->chip_id
;
1744 visit_type_size(v
, name
, &value
, errp
);
1747 static void pnv_phb4_root_bus_set_prop(Object
*obj
, Visitor
*v
,
1749 void *opaque
, Error
**errp
)
1752 PnvPHB4RootBus
*bus
= PNV_PHB4_ROOT_BUS(obj
);
1755 if (!visit_type_size(v
, name
, &value
, errp
)) {
1759 if (strcmp(name
, "phb-id") == 0) {
1760 bus
->phb_id
= value
;
1762 bus
->chip_id
= value
;
1766 static void pnv_phb4_root_bus_class_init(ObjectClass
*klass
, void *data
)
1768 BusClass
*k
= BUS_CLASS(klass
);
1770 object_class_property_add(klass
, "phb-id", "int",
1771 pnv_phb4_root_bus_get_prop
,
1772 pnv_phb4_root_bus_set_prop
,
1775 object_class_property_add(klass
, "chip-id", "int",
1776 pnv_phb4_root_bus_get_prop
,
1777 pnv_phb4_root_bus_set_prop
,
1781 * PHB4 has only a single root complex. Enforce the limit on the
1787 static const TypeInfo pnv_phb4_root_bus_info
= {
1788 .name
= TYPE_PNV_PHB4_ROOT_BUS
,
1789 .parent
= TYPE_PCIE_BUS
,
1790 .instance_size
= sizeof(PnvPHB4RootBus
),
1791 .class_init
= pnv_phb4_root_bus_class_init
,
1794 static void pnv_phb4_register_types(void)
1796 type_register_static(&pnv_phb4_root_bus_info
);
1797 type_register_static(&pnv_phb4_type_info
);
1798 type_register_static(&pnv_phb5_type_info
);
1799 type_register_static(&pnv_phb4_iommu_memory_region_info
);
1802 type_init(pnv_phb4_register_types
);
1804 void pnv_phb4_pic_print_info(PnvPHB4
*phb
, Monitor
*mon
)
1806 uint64_t notif_port
=
1807 phb
->regs
[PHB_INT_NOTIFY_ADDR
>> 3] & ~PHB_INT_NOTIFY_ADDR_64K
;
1808 uint32_t offset
= phb
->regs
[PHB_INT_NOTIFY_INDEX
>> 3];
1809 bool abt
= !!(phb
->regs
[PHB_CTRLR
>> 3] & PHB_CTRLR_IRQ_ABT_MODE
);
1811 monitor_printf(mon
, "PHB4[%x:%x] Source %08x .. %08x %s @%"HWADDR_PRIx
"\n",
1812 phb
->chip_id
, phb
->phb_id
,
1813 offset
, offset
+ phb
->xsrc
.nr_irqs
- 1,
1816 xive_source_pic_print_info(&phb
->xsrc
, 0, mon
);