1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
15 #include <asm/iommu.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
23 #include "iommu_common.h"
25 #include "pci_sun4v.h"
27 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
33 static DEFINE_PER_CPU(struct sun4v_pglist
, iommu_pglists
);
35 static long pci_arena_alloc(struct pci_iommu_arena
*arena
, unsigned long npages
)
37 unsigned long n
, i
, start
, end
, limit
;
45 n
= find_next_zero_bit(arena
->map
, limit
, start
);
47 if (unlikely(end
>= limit
)) {
48 if (likely(pass
< 1)) {
54 /* Scanned the whole thing, give up. */
59 for (i
= n
; i
< end
; i
++) {
60 if (test_bit(i
, arena
->map
)) {
66 for (i
= n
; i
< end
; i
++)
67 __set_bit(i
, arena
->map
);
74 static void pci_arena_free(struct pci_iommu_arena
*arena
, unsigned long base
, unsigned long npages
)
78 for (i
= base
; i
< (base
+ npages
); i
++)
79 __clear_bit(i
, arena
->map
);
82 static void *pci_4v_alloc_consistent(struct pci_dev
*pdev
, size_t size
, dma_addr_t
*dma_addrp
)
84 struct pcidev_cookie
*pcp
;
85 struct pci_iommu
*iommu
;
86 unsigned long flags
, order
, first_page
, npages
, n
;
93 size
= IO_PAGE_ALIGN(size
);
94 order
= get_order(size
);
95 if (order
>= MAX_ORDER
)
98 npages
= size
>> IO_PAGE_SHIFT
;
99 if (npages
> PGLIST_NENTS
)
102 first_page
= __get_free_pages(GFP_ATOMIC
, order
);
103 if (first_page
== 0UL)
106 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
109 devhandle
= pcp
->pbm
->devhandle
;
110 iommu
= pcp
->pbm
->iommu
;
112 spin_lock_irqsave(&iommu
->lock
, flags
);
113 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
114 spin_unlock_irqrestore(&iommu
->lock
, flags
);
116 if (unlikely(entry
< 0L)) {
117 free_pages(first_page
, order
);
121 *dma_addrp
= (iommu
->page_table_map_base
+
122 (entry
<< IO_PAGE_SHIFT
));
123 ret
= (void *) first_page
;
124 first_page
= __pa(first_page
);
128 pglist
= __get_cpu_var(iommu_pglists
).pglist
;
129 for (n
= 0; n
< npages
; n
++)
130 pglist
[n
] = first_page
+ (n
* PAGE_SIZE
);
135 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
137 (HV_PCI_MAP_ATTR_READ
|
138 HV_PCI_MAP_ATTR_WRITE
),
143 } while (npages
!= 0);
150 static void pci_4v_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
152 struct pcidev_cookie
*pcp
;
153 struct pci_iommu
*iommu
;
154 unsigned long flags
, order
, npages
, entry
;
157 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
159 iommu
= pcp
->pbm
->iommu
;
160 devhandle
= pcp
->pbm
->devhandle
;
161 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
163 spin_lock_irqsave(&iommu
->lock
, flags
);
165 pci_arena_free(&iommu
->arena
, entry
, npages
);
170 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
174 } while (npages
!= 0);
176 spin_unlock_irqrestore(&iommu
->lock
, flags
);
178 order
= get_order(size
);
180 free_pages((unsigned long)cpu
, order
);
183 static dma_addr_t
pci_4v_map_single(struct pci_dev
*pdev
, void *ptr
, size_t sz
, int direction
)
185 struct pcidev_cookie
*pcp
;
186 struct pci_iommu
*iommu
;
187 unsigned long flags
, npages
, oaddr
;
188 unsigned long i
, base_paddr
;
189 u32 devhandle
, bus_addr
, ret
;
196 iommu
= pcp
->pbm
->iommu
;
197 devhandle
= pcp
->pbm
->devhandle
;
199 if (unlikely(direction
== PCI_DMA_NONE
))
202 oaddr
= (unsigned long)ptr
;
203 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
204 npages
>>= IO_PAGE_SHIFT
;
205 if (unlikely(npages
> PGLIST_NENTS
))
208 spin_lock_irqsave(&iommu
->lock
, flags
);
209 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
210 spin_unlock_irqrestore(&iommu
->lock
, flags
);
212 if (unlikely(entry
< 0L))
215 bus_addr
= (iommu
->page_table_map_base
+
216 (entry
<< IO_PAGE_SHIFT
));
217 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
218 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
219 prot
= HV_PCI_MAP_ATTR_READ
;
220 if (direction
!= PCI_DMA_TODEVICE
)
221 prot
|= HV_PCI_MAP_ATTR_WRITE
;
225 pglist
= __get_cpu_var(iommu_pglists
).pglist
;
226 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
)
227 pglist
[i
] = base_paddr
;
232 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
238 } while (npages
!= 0);
245 if (printk_ratelimit())
247 return PCI_DMA_ERROR_CODE
;
250 static void pci_4v_unmap_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
252 struct pcidev_cookie
*pcp
;
253 struct pci_iommu
*iommu
;
254 unsigned long flags
, npages
;
258 if (unlikely(direction
== PCI_DMA_NONE
)) {
259 if (printk_ratelimit())
265 iommu
= pcp
->pbm
->iommu
;
266 devhandle
= pcp
->pbm
->devhandle
;
268 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
269 npages
>>= IO_PAGE_SHIFT
;
270 bus_addr
&= IO_PAGE_MASK
;
272 spin_lock_irqsave(&iommu
->lock
, flags
);
274 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
275 pci_arena_free(&iommu
->arena
, entry
, npages
);
280 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
284 } while (npages
!= 0);
286 spin_unlock_irqrestore(&iommu
->lock
, flags
);
289 #define SG_ENT_PHYS_ADDRESS(SG) \
290 (__pa(page_address((SG)->page)) + (SG)->offset)
292 static inline void fill_sg(long entry
, u32 devhandle
,
293 struct scatterlist
*sg
,
294 int nused
, int nelems
, unsigned long prot
)
296 struct scatterlist
*dma_sg
= sg
;
297 struct scatterlist
*sg_end
= sg
+ nelems
;
298 int i
, cpu
, pglist_ent
;
302 pglist
= __get_cpu_var(iommu_pglists
).pglist
;
304 for (i
= 0; i
< nused
; i
++) {
305 unsigned long pteval
= ~0UL;
308 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
310 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
312 unsigned long offset
;
315 /* If we are here, we know we have at least one
316 * more page to map. So walk forward until we
317 * hit a page crossing, and begin creating new
318 * mappings from that spot.
323 tmp
= SG_ENT_PHYS_ADDRESS(sg
);
325 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
326 pteval
= tmp
& IO_PAGE_MASK
;
327 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
330 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
331 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
333 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
339 pteval
= (pteval
& IOPTE_PAGE
);
341 pglist
[pglist_ent
++] = pteval
;
342 pteval
+= IO_PAGE_SIZE
;
343 len
-= (IO_PAGE_SIZE
- offset
);
348 pteval
= (pteval
& IOPTE_PAGE
) + len
;
351 /* Skip over any tail mappings we've fully mapped,
352 * adjusting pteval along the way. Stop when we
353 * detect a page crossing event.
355 while (sg
< sg_end
&&
356 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
357 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
359 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
360 pteval
+= sg
->length
;
363 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
365 } while (dma_npages
!= 0);
369 BUG_ON(pglist_ent
== 0);
374 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
378 } while (pglist_ent
!= 0);
383 static int pci_4v_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
385 struct pcidev_cookie
*pcp
;
386 struct pci_iommu
*iommu
;
387 unsigned long flags
, npages
, prot
;
388 u32 devhandle
, dma_base
;
389 struct scatterlist
*sgtmp
;
393 /* Fast path single entry scatterlists. */
395 sglist
->dma_address
=
396 pci_4v_map_single(pdev
,
397 (page_address(sglist
->page
) + sglist
->offset
),
398 sglist
->length
, direction
);
399 if (unlikely(sglist
->dma_address
== PCI_DMA_ERROR_CODE
))
401 sglist
->dma_length
= sglist
->length
;
406 iommu
= pcp
->pbm
->iommu
;
407 devhandle
= pcp
->pbm
->devhandle
;
409 if (unlikely(direction
== PCI_DMA_NONE
))
412 /* Step 1: Prepare scatter list. */
413 npages
= prepare_sg(sglist
, nelems
);
414 if (unlikely(npages
> PGLIST_NENTS
))
417 /* Step 2: Allocate a cluster and context, if necessary. */
418 spin_lock_irqsave(&iommu
->lock
, flags
);
419 entry
= pci_arena_alloc(&iommu
->arena
, npages
);
420 spin_unlock_irqrestore(&iommu
->lock
, flags
);
422 if (unlikely(entry
< 0L))
425 dma_base
= iommu
->page_table_map_base
+
426 (entry
<< IO_PAGE_SHIFT
);
428 /* Step 3: Normalize DMA addresses. */
432 while (used
&& sgtmp
->dma_length
) {
433 sgtmp
->dma_address
+= dma_base
;
437 used
= nelems
- used
;
439 /* Step 4: Create the mappings. */
440 prot
= HV_PCI_MAP_ATTR_READ
;
441 if (direction
!= PCI_DMA_TODEVICE
)
442 prot
|= HV_PCI_MAP_ATTR_WRITE
;
444 fill_sg(entry
, devhandle
, sglist
, used
, nelems
, prot
);
449 if (printk_ratelimit())
454 static void pci_4v_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
456 struct pcidev_cookie
*pcp
;
457 struct pci_iommu
*iommu
;
458 unsigned long flags
, i
, npages
;
460 u32 devhandle
, bus_addr
;
462 if (unlikely(direction
== PCI_DMA_NONE
)) {
463 if (printk_ratelimit())
468 iommu
= pcp
->pbm
->iommu
;
469 devhandle
= pcp
->pbm
->devhandle
;
471 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
473 for (i
= 1; i
< nelems
; i
++)
474 if (sglist
[i
].dma_length
== 0)
477 npages
= (IO_PAGE_ALIGN(sglist
[i
].dma_address
+ sglist
[i
].dma_length
) -
478 bus_addr
) >> IO_PAGE_SHIFT
;
480 entry
= ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
482 spin_lock_irqsave(&iommu
->lock
, flags
);
484 pci_arena_free(&iommu
->arena
, entry
, npages
);
489 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
493 } while (npages
!= 0);
495 spin_unlock_irqrestore(&iommu
->lock
, flags
);
498 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
500 /* Nothing to do... */
503 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
505 /* Nothing to do... */
508 struct pci_iommu_ops pci_sun4v_iommu_ops
= {
509 .alloc_consistent
= pci_4v_alloc_consistent
,
510 .free_consistent
= pci_4v_free_consistent
,
511 .map_single
= pci_4v_map_single
,
512 .unmap_single
= pci_4v_unmap_single
,
513 .map_sg
= pci_4v_map_sg
,
514 .unmap_sg
= pci_4v_unmap_sg
,
515 .dma_sync_single_for_cpu
= pci_4v_dma_sync_single_for_cpu
,
516 .dma_sync_sg_for_cpu
= pci_4v_dma_sync_sg_for_cpu
,
519 /* SUN4V PCI configuration space accessors. */
521 static inline int pci_sun4v_out_of_range(struct pci_pbm_info
*pbm
, unsigned int bus
, unsigned int device
, unsigned int func
)
523 if (bus
== pbm
->pci_first_busno
) {
524 if (device
== 0 && func
== 0)
529 if (bus
< pbm
->pci_first_busno
||
530 bus
> pbm
->pci_last_busno
)
535 static int pci_sun4v_read_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
536 int where
, int size
, u32
*value
)
538 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
539 u32 devhandle
= pbm
->devhandle
;
540 unsigned int bus
= bus_dev
->number
;
541 unsigned int device
= PCI_SLOT(devfn
);
542 unsigned int func
= PCI_FUNC(devfn
);
545 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
548 ret
= pci_sun4v_config_get(devhandle
,
549 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
552 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
553 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
562 *value
= ret
& 0xffff;
565 *value
= ret
& 0xffffffff;
570 return PCIBIOS_SUCCESSFUL
;
573 static int pci_sun4v_write_pci_cfg(struct pci_bus
*bus_dev
, unsigned int devfn
,
574 int where
, int size
, u32 value
)
576 struct pci_pbm_info
*pbm
= bus_dev
->sysdata
;
577 u32 devhandle
= pbm
->devhandle
;
578 unsigned int bus
= bus_dev
->number
;
579 unsigned int device
= PCI_SLOT(devfn
);
580 unsigned int func
= PCI_FUNC(devfn
);
583 if (pci_sun4v_out_of_range(pbm
, bus
, device
, func
)) {
586 ret
= pci_sun4v_config_put(devhandle
,
587 HV_PCI_DEVICE_BUILD(bus
, device
, func
),
590 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
591 devhandle
, HV_PCI_DEVICE_BUILD(bus
, device
, func
),
592 where
, size
, value
, ret
);
595 return PCIBIOS_SUCCESSFUL
;
598 static struct pci_ops pci_sun4v_ops
= {
599 .read
= pci_sun4v_read_pci_cfg
,
600 .write
= pci_sun4v_write_pci_cfg
,
604 static void pbm_scan_bus(struct pci_controller_info
*p
,
605 struct pci_pbm_info
*pbm
)
607 struct pcidev_cookie
*cookie
= kmalloc(sizeof(*cookie
), GFP_KERNEL
);
610 prom_printf("%s: Critical allocation failure.\n", pbm
->name
);
614 /* All we care about is the PBM. */
615 memset(cookie
, 0, sizeof(*cookie
));
618 pbm
->pci_bus
= pci_scan_bus(pbm
->pci_first_busno
, p
->pci_ops
, pbm
);
620 pci_fixup_host_bridge_self(pbm
->pci_bus
);
621 pbm
->pci_bus
->self
->sysdata
= cookie
;
623 pci_fill_in_pbm_cookies(pbm
->pci_bus
, pbm
,
625 pci_record_assignments(pbm
, pbm
->pci_bus
);
626 pci_assign_unassigned(pbm
, pbm
->pci_bus
);
627 pci_fixup_irq(pbm
, pbm
->pci_bus
);
628 pci_determine_66mhz_disposition(pbm
, pbm
->pci_bus
);
629 pci_setup_busmastering(pbm
, pbm
->pci_bus
);
632 static void pci_sun4v_scan_bus(struct pci_controller_info
*p
)
634 if (p
->pbm_A
.prom_node
) {
635 p
->pbm_A
.is_66mhz_capable
=
636 prom_getbool(p
->pbm_A
.prom_node
, "66mhz-capable");
638 pbm_scan_bus(p
, &p
->pbm_A
);
640 if (p
->pbm_B
.prom_node
) {
641 p
->pbm_B
.is_66mhz_capable
=
642 prom_getbool(p
->pbm_B
.prom_node
, "66mhz-capable");
644 pbm_scan_bus(p
, &p
->pbm_B
);
647 /* XXX register error interrupt handlers XXX */
650 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info
*pbm
,
651 struct pci_dev
*pdev
,
654 u32 devhandle
= pbm
->devhandle
;
659 switch ((pdev
->class >> 16) & 0xff) {
660 case PCI_BASE_CLASS_STORAGE
:
664 case PCI_BASE_CLASS_NETWORK
:
668 case PCI_BASE_CLASS_DISPLAY
:
672 case PCI_BASE_CLASS_MULTIMEDIA
:
673 case PCI_BASE_CLASS_MEMORY
:
674 case PCI_BASE_CLASS_BRIDGE
:
675 case PCI_BASE_CLASS_SERIAL
:
684 BUG_ON(PIL_RESERVED(pil
));
686 return sun4v_build_irq(devhandle
, devino
, pil
, IBF_PCI
);
689 static void pci_sun4v_base_address_update(struct pci_dev
*pdev
, int resource
)
691 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
692 struct pci_pbm_info
*pbm
= pcp
->pbm
;
693 struct resource
*res
, *root
;
695 int where
, size
, is_64bit
;
697 res
= &pdev
->resource
[resource
];
699 where
= PCI_BASE_ADDRESS_0
+ (resource
* 4);
700 } else if (resource
== PCI_ROM_RESOURCE
) {
701 where
= pdev
->rom_base_reg
;
703 /* Somebody might have asked allocation of a non-standard resource */
707 /* XXX 64-bit MEM handling is not %100 correct... XXX */
709 if (res
->flags
& IORESOURCE_IO
)
710 root
= &pbm
->io_space
;
712 root
= &pbm
->mem_space
;
713 if ((res
->flags
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
)
714 == PCI_BASE_ADDRESS_MEM_TYPE_64
)
718 size
= res
->end
- res
->start
;
719 pci_read_config_dword(pdev
, where
, ®
);
720 reg
= ((reg
& size
) |
721 (((u32
)(res
->start
- root
->start
)) & ~size
));
722 if (resource
== PCI_ROM_RESOURCE
) {
723 reg
|= PCI_ROM_ADDRESS_ENABLE
;
724 res
->flags
|= IORESOURCE_ROM_ENABLE
;
726 pci_write_config_dword(pdev
, where
, reg
);
728 /* This knows that the upper 32-bits of the address
729 * must be zero. Our PCI common layer enforces this.
732 pci_write_config_dword(pdev
, where
+ 4, 0);
735 static void pci_sun4v_resource_adjust(struct pci_dev
*pdev
,
736 struct resource
*res
,
737 struct resource
*root
)
739 res
->start
+= root
->start
;
740 res
->end
+= root
->start
;
743 /* Use ranges property to determine where PCI MEM, I/O, and Config
744 * space are for this PCI bus module.
746 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info
*pbm
)
748 int i
, saw_mem
, saw_io
;
750 saw_mem
= saw_io
= 0;
751 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++) {
752 struct linux_prom_pci_ranges
*pr
= &pbm
->pbm_ranges
[i
];
756 type
= (pr
->child_phys_hi
>> 24) & 0x3;
757 a
= (((unsigned long)pr
->parent_phys_hi
<< 32UL) |
758 ((unsigned long)pr
->parent_phys_lo
<< 0UL));
762 /* 16-bit IO space, 16MB */
763 pbm
->io_space
.start
= a
;
764 pbm
->io_space
.end
= a
+ ((16UL*1024UL*1024UL) - 1UL);
765 pbm
->io_space
.flags
= IORESOURCE_IO
;
770 /* 32-bit MEM space, 2GB */
771 pbm
->mem_space
.start
= a
;
772 pbm
->mem_space
.end
= a
+ (0x80000000UL
- 1UL);
773 pbm
->mem_space
.flags
= IORESOURCE_MEM
;
778 /* XXX 64-bit MEM handling XXX */
785 if (!saw_io
|| !saw_mem
) {
786 prom_printf("%s: Fatal error, missing %s PBM range.\n",
788 (!saw_io
? "IO" : "MEM"));
792 printk("%s: PCI IO[%lx] MEM[%lx]\n",
795 pbm
->mem_space
.start
);
798 static void pbm_register_toplevel_resources(struct pci_controller_info
*p
,
799 struct pci_pbm_info
*pbm
)
801 pbm
->io_space
.name
= pbm
->mem_space
.name
= pbm
->name
;
803 request_resource(&ioport_resource
, &pbm
->io_space
);
804 request_resource(&iomem_resource
, &pbm
->mem_space
);
805 pci_register_legacy_regions(&pbm
->io_space
,
809 static unsigned long probe_existing_entries(struct pci_pbm_info
*pbm
,
810 struct pci_iommu
*iommu
)
812 struct pci_iommu_arena
*arena
= &iommu
->arena
;
813 unsigned long i
, cnt
= 0;
816 devhandle
= pbm
->devhandle
;
817 for (i
= 0; i
< arena
->limit
; i
++) {
818 unsigned long ret
, io_attrs
, ra
;
820 ret
= pci_sun4v_iommu_getmap(devhandle
,
825 __set_bit(i
, arena
->map
);
832 static void pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
834 struct pci_iommu
*iommu
= pbm
->iommu
;
835 unsigned long num_tsb_entries
, sz
;
836 u32 vdma
[2], dma_mask
, dma_offset
;
839 err
= prom_getproperty(pbm
->prom_node
, "virtual-dma",
840 (char *)&vdma
[0], sizeof(vdma
));
841 if (err
== 0 || err
== -1) {
842 /* No property, use default values. */
843 vdma
[0] = 0x80000000;
844 vdma
[1] = 0x80000000;
850 dma_mask
|= 0x1fffffff;
855 dma_mask
|= 0x3fffffff;
860 dma_mask
|= 0x7fffffff;
865 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
869 tsbsize
*= (8 * 1024);
871 num_tsb_entries
= tsbsize
/ sizeof(iopte_t
);
873 dma_offset
= vdma
[0];
875 /* Setup initial software IOMMU state. */
876 spin_lock_init(&iommu
->lock
);
877 iommu
->ctx_lowest_free
= 1;
878 iommu
->page_table_map_base
= dma_offset
;
879 iommu
->dma_addr_mask
= dma_mask
;
881 /* Allocate and initialize the free area map. */
882 sz
= num_tsb_entries
/ 8;
883 sz
= (sz
+ 7UL) & ~7UL;
884 iommu
->arena
.map
= kmalloc(sz
, GFP_KERNEL
);
885 if (!iommu
->arena
.map
) {
886 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
889 memset(iommu
->arena
.map
, 0, sz
);
890 iommu
->arena
.limit
= num_tsb_entries
;
892 sz
= probe_existing_entries(pbm
, iommu
);
894 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
895 pbm
->name
, num_tsb_entries
, sz
);
898 static void pci_sun4v_get_bus_range(struct pci_pbm_info
*pbm
)
900 unsigned int busrange
[2];
901 int prom_node
= pbm
->prom_node
;
904 err
= prom_getproperty(prom_node
, "bus-range",
905 (char *)&busrange
[0],
907 if (err
== 0 || err
== -1) {
908 prom_printf("%s: Fatal error, no bus-range.\n", pbm
->name
);
912 pbm
->pci_first_busno
= busrange
[0];
913 pbm
->pci_last_busno
= busrange
[1];
917 static void pci_sun4v_pbm_init(struct pci_controller_info
*p
, int prom_node
, u32 devhandle
)
919 struct pci_pbm_info
*pbm
;
922 if (devhandle
& 0x40)
928 pbm
->prom_node
= prom_node
;
929 pbm
->pci_first_slot
= 1;
931 pbm
->devhandle
= devhandle
;
933 sprintf(pbm
->name
, "SUN4V-PCI%d PBM%c",
934 p
->index
, (pbm
== &p
->pbm_A
? 'A' : 'B'));
936 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
937 pbm
->name
, pbm
->devhandle
,
938 pbm
->prom_node
, prom_getchild(pbm
->prom_node
));
940 prom_getstring(prom_node
, "name",
941 pbm
->prom_name
, sizeof(pbm
->prom_name
));
943 err
= prom_getproperty(prom_node
, "ranges",
944 (char *) pbm
->pbm_ranges
,
945 sizeof(pbm
->pbm_ranges
));
946 if (err
== 0 || err
== -1) {
947 prom_printf("%s: Fatal error, no ranges property.\n",
952 pbm
->num_pbm_ranges
=
953 (err
/ sizeof(struct linux_prom_pci_ranges
));
955 /* Mask out the top 8 bits of the ranges, leaving the real
958 for (i
= 0; i
< pbm
->num_pbm_ranges
; i
++)
959 pbm
->pbm_ranges
[i
].parent_phys_hi
&= 0x0fffffff;
961 pci_sun4v_determine_mem_io_space(pbm
);
962 pbm_register_toplevel_resources(p
, pbm
);
964 err
= prom_getproperty(prom_node
, "interrupt-map",
965 (char *)pbm
->pbm_intmap
,
966 sizeof(pbm
->pbm_intmap
));
967 if (err
== 0 || err
== -1) {
968 prom_printf("%s: Fatal error, no interrupt-map property.\n",
973 pbm
->num_pbm_intmap
= (err
/ sizeof(struct linux_prom_pci_intmap
));
974 err
= prom_getproperty(prom_node
, "interrupt-map-mask",
975 (char *)&pbm
->pbm_intmask
,
976 sizeof(pbm
->pbm_intmask
));
977 if (err
== 0 || err
== -1) {
978 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
983 pci_sun4v_get_bus_range(pbm
);
984 pci_sun4v_iommu_init(pbm
);
987 void sun4v_pci_init(int node
, char *model_name
)
989 struct pci_controller_info
*p
;
990 struct pci_iommu
*iommu
;
991 struct linux_prom64_registers regs
;
995 prom_getproperty(node
, "reg", (char *)®s
, sizeof(regs
));
996 devhandle
= (regs
.phys_addr
>> 32UL) & 0x0fffffff;
998 for (p
= pci_controller_root
; p
; p
= p
->next
) {
999 struct pci_pbm_info
*pbm
;
1001 if (p
->pbm_A
.prom_node
&& p
->pbm_B
.prom_node
)
1004 pbm
= (p
->pbm_A
.prom_node
?
1008 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
1009 pci_sun4v_pbm_init(p
, node
, devhandle
);
1014 for (i
= 0; i
< NR_CPUS
; i
++) {
1015 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
1018 goto fatal_memory_error
;
1020 per_cpu(iommu_pglists
, i
).pglist
= (u64
*) page
;
1023 p
= kmalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
1025 goto fatal_memory_error
;
1027 memset(p
, 0, sizeof(*p
));
1029 iommu
= kmalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1031 goto fatal_memory_error
;
1033 memset(iommu
, 0, sizeof(*iommu
));
1034 p
->pbm_A
.iommu
= iommu
;
1036 iommu
= kmalloc(sizeof(struct pci_iommu
), GFP_ATOMIC
);
1038 goto fatal_memory_error
;
1040 memset(iommu
, 0, sizeof(*iommu
));
1041 p
->pbm_B
.iommu
= iommu
;
1043 p
->next
= pci_controller_root
;
1044 pci_controller_root
= p
;
1046 p
->index
= pci_num_controllers
++;
1047 p
->pbms_same_domain
= 0;
1049 p
->scan_bus
= pci_sun4v_scan_bus
;
1050 p
->irq_build
= pci_sun4v_irq_build
;
1051 p
->base_address_update
= pci_sun4v_base_address_update
;
1052 p
->resource_adjust
= pci_sun4v_resource_adjust
;
1053 p
->pci_ops
= &pci_sun4v_ops
;
1055 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1058 pci_memspace_mask
= 0x7fffffffUL
;
1060 pci_sun4v_pbm_init(p
, node
, devhandle
);
1064 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");