1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
17 #include <asm/iommu.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
26 #include "iommu_common.h"
28 #include "pci_sun4v.h"
30 static unsigned long vpci_major
= 1;
31 static unsigned long vpci_minor
= 1;
33 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 struct device
*dev
; /* Device mapping is for. */
37 unsigned long prot
; /* IOMMU page protections */
38 unsigned long entry
; /* Index into IOTSB. */
39 u64
*pglist
; /* List of physical pages */
40 unsigned long npages
; /* Number of pages in list. */
43 static DEFINE_PER_CPU(struct iommu_batch
, iommu_batch
);
45 /* Interrupts must be disabled. */
46 static inline void iommu_batch_start(struct device
*dev
, unsigned long prot
, unsigned long entry
)
48 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
56 /* Interrupts must be disabled. */
57 static long iommu_batch_flush(struct iommu_batch
*p
)
59 struct pci_pbm_info
*pbm
= p
->dev
->archdata
.host_controller
;
60 unsigned long devhandle
= pbm
->devhandle
;
61 unsigned long prot
= p
->prot
;
62 unsigned long entry
= p
->entry
;
63 u64
*pglist
= p
->pglist
;
64 unsigned long npages
= p
->npages
;
69 num
= pci_sun4v_iommu_map(devhandle
, HV_PCI_TSBID(0, entry
),
70 npages
, prot
, __pa(pglist
));
71 if (unlikely(num
< 0)) {
72 if (printk_ratelimit())
73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
76 devhandle
, HV_PCI_TSBID(0, entry
),
77 npages
, prot
, __pa(pglist
), num
);
92 /* Interrupts must be disabled. */
93 static inline long iommu_batch_add(u64 phys_page
)
95 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
97 BUG_ON(p
->npages
>= PGLIST_NENTS
);
99 p
->pglist
[p
->npages
++] = phys_page
;
100 if (p
->npages
== PGLIST_NENTS
)
101 return iommu_batch_flush(p
);
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_end(void)
109 struct iommu_batch
*p
= &__get_cpu_var(iommu_batch
);
111 BUG_ON(p
->npages
>= PGLIST_NENTS
);
113 return iommu_batch_flush(p
);
116 static long arena_alloc(struct iommu_arena
*arena
, unsigned long npages
)
118 unsigned long n
, i
, start
, end
, limit
;
121 limit
= arena
->limit
;
126 n
= find_next_zero_bit(arena
->map
, limit
, start
);
128 if (unlikely(end
>= limit
)) {
129 if (likely(pass
< 1)) {
135 /* Scanned the whole thing, give up. */
140 for (i
= n
; i
< end
; i
++) {
141 if (test_bit(i
, arena
->map
)) {
147 for (i
= n
; i
< end
; i
++)
148 __set_bit(i
, arena
->map
);
155 static void arena_free(struct iommu_arena
*arena
, unsigned long base
,
156 unsigned long npages
)
160 for (i
= base
; i
< (base
+ npages
); i
++)
161 __clear_bit(i
, arena
->map
);
164 static void *dma_4v_alloc_coherent(struct device
*dev
, size_t size
,
165 dma_addr_t
*dma_addrp
, gfp_t gfp
)
168 unsigned long flags
, order
, first_page
, npages
, n
;
172 size
= IO_PAGE_ALIGN(size
);
173 order
= get_order(size
);
174 if (unlikely(order
>= MAX_ORDER
))
177 npages
= size
>> IO_PAGE_SHIFT
;
179 first_page
= __get_free_pages(gfp
, order
);
180 if (unlikely(first_page
== 0UL))
183 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
185 iommu
= dev
->archdata
.iommu
;
187 spin_lock_irqsave(&iommu
->lock
, flags
);
188 entry
= arena_alloc(&iommu
->arena
, npages
);
189 spin_unlock_irqrestore(&iommu
->lock
, flags
);
191 if (unlikely(entry
< 0L))
192 goto arena_alloc_fail
;
194 *dma_addrp
= (iommu
->page_table_map_base
+
195 (entry
<< IO_PAGE_SHIFT
));
196 ret
= (void *) first_page
;
197 first_page
= __pa(first_page
);
199 local_irq_save(flags
);
201 iommu_batch_start(dev
,
202 (HV_PCI_MAP_ATTR_READ
|
203 HV_PCI_MAP_ATTR_WRITE
),
206 for (n
= 0; n
< npages
; n
++) {
207 long err
= iommu_batch_add(first_page
+ (n
* PAGE_SIZE
));
208 if (unlikely(err
< 0L))
212 if (unlikely(iommu_batch_end() < 0L))
215 local_irq_restore(flags
);
220 /* Interrupts are disabled. */
221 spin_lock(&iommu
->lock
);
222 arena_free(&iommu
->arena
, entry
, npages
);
223 spin_unlock_irqrestore(&iommu
->lock
, flags
);
226 free_pages(first_page
, order
);
230 static void dma_4v_free_coherent(struct device
*dev
, size_t size
, void *cpu
,
233 struct pci_pbm_info
*pbm
;
235 unsigned long flags
, order
, npages
, entry
;
238 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
239 iommu
= dev
->archdata
.iommu
;
240 pbm
= dev
->archdata
.host_controller
;
241 devhandle
= pbm
->devhandle
;
242 entry
= ((dvma
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
244 spin_lock_irqsave(&iommu
->lock
, flags
);
246 arena_free(&iommu
->arena
, entry
, npages
);
251 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
255 } while (npages
!= 0);
257 spin_unlock_irqrestore(&iommu
->lock
, flags
);
259 order
= get_order(size
);
261 free_pages((unsigned long)cpu
, order
);
264 static dma_addr_t
dma_4v_map_single(struct device
*dev
, void *ptr
, size_t sz
,
265 enum dma_data_direction direction
)
268 unsigned long flags
, npages
, oaddr
;
269 unsigned long i
, base_paddr
;
274 iommu
= dev
->archdata
.iommu
;
276 if (unlikely(direction
== DMA_NONE
))
279 oaddr
= (unsigned long)ptr
;
280 npages
= IO_PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& IO_PAGE_MASK
);
281 npages
>>= IO_PAGE_SHIFT
;
283 spin_lock_irqsave(&iommu
->lock
, flags
);
284 entry
= arena_alloc(&iommu
->arena
, npages
);
285 spin_unlock_irqrestore(&iommu
->lock
, flags
);
287 if (unlikely(entry
< 0L))
290 bus_addr
= (iommu
->page_table_map_base
+
291 (entry
<< IO_PAGE_SHIFT
));
292 ret
= bus_addr
| (oaddr
& ~IO_PAGE_MASK
);
293 base_paddr
= __pa(oaddr
& IO_PAGE_MASK
);
294 prot
= HV_PCI_MAP_ATTR_READ
;
295 if (direction
!= DMA_TO_DEVICE
)
296 prot
|= HV_PCI_MAP_ATTR_WRITE
;
298 local_irq_save(flags
);
300 iommu_batch_start(dev
, prot
, entry
);
302 for (i
= 0; i
< npages
; i
++, base_paddr
+= IO_PAGE_SIZE
) {
303 long err
= iommu_batch_add(base_paddr
);
304 if (unlikely(err
< 0L))
307 if (unlikely(iommu_batch_end() < 0L))
310 local_irq_restore(flags
);
315 if (printk_ratelimit())
317 return DMA_ERROR_CODE
;
320 /* Interrupts are disabled. */
321 spin_lock(&iommu
->lock
);
322 arena_free(&iommu
->arena
, entry
, npages
);
323 spin_unlock_irqrestore(&iommu
->lock
, flags
);
325 return DMA_ERROR_CODE
;
328 static void dma_4v_unmap_single(struct device
*dev
, dma_addr_t bus_addr
,
329 size_t sz
, enum dma_data_direction direction
)
331 struct pci_pbm_info
*pbm
;
333 unsigned long flags
, npages
;
337 if (unlikely(direction
== DMA_NONE
)) {
338 if (printk_ratelimit())
343 iommu
= dev
->archdata
.iommu
;
344 pbm
= dev
->archdata
.host_controller
;
345 devhandle
= pbm
->devhandle
;
347 npages
= IO_PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& IO_PAGE_MASK
);
348 npages
>>= IO_PAGE_SHIFT
;
349 bus_addr
&= IO_PAGE_MASK
;
351 spin_lock_irqsave(&iommu
->lock
, flags
);
353 entry
= (bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
;
354 arena_free(&iommu
->arena
, entry
, npages
);
359 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
363 } while (npages
!= 0);
365 spin_unlock_irqrestore(&iommu
->lock
, flags
);
368 static int dma_4v_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
369 int nelems
, enum dma_data_direction direction
)
371 unsigned long flags
, npages
, i
, prot
;
372 struct scatterlist
*sg
;
377 /* Fast path single entry scatterlists. */
379 sglist
->dma_address
=
380 dma_4v_map_single(dev
, sg_virt(sglist
),
381 sglist
->length
, direction
);
382 if (unlikely(sglist
->dma_address
== DMA_ERROR_CODE
))
384 sglist
->dma_length
= sglist
->length
;
388 iommu
= dev
->archdata
.iommu
;
390 if (unlikely(direction
== DMA_NONE
))
393 npages
= calc_npages(sglist
, nelems
);
395 spin_lock_irqsave(&iommu
->lock
, flags
);
396 entry
= arena_alloc(&iommu
->arena
, npages
);
397 spin_unlock_irqrestore(&iommu
->lock
, flags
);
399 if (unlikely(entry
< 0L))
402 dma_base
= iommu
->page_table_map_base
+
403 (entry
<< IO_PAGE_SHIFT
);
405 prot
= HV_PCI_MAP_ATTR_READ
;
406 if (direction
!= DMA_TO_DEVICE
)
407 prot
|= HV_PCI_MAP_ATTR_WRITE
;
409 local_irq_save(flags
);
411 iommu_batch_start(dev
, prot
, entry
);
413 for_each_sg(sglist
, sg
, nelems
, i
) {
414 unsigned long paddr
= SG_ENT_PHYS_ADDRESS(sg
);
415 unsigned long slen
= sg
->length
;
416 unsigned long this_npages
;
418 this_npages
= iommu_num_pages(paddr
, slen
);
420 sg
->dma_address
= dma_base
| (paddr
& ~IO_PAGE_MASK
);
421 sg
->dma_length
= slen
;
423 paddr
&= IO_PAGE_MASK
;
424 while (this_npages
--) {
425 err
= iommu_batch_add(paddr
);
426 if (unlikely(err
< 0L)) {
427 local_irq_restore(flags
);
428 goto iommu_map_failed
;
431 paddr
+= IO_PAGE_SIZE
;
432 dma_base
+= IO_PAGE_SIZE
;
436 err
= iommu_batch_end();
438 local_irq_restore(flags
);
440 if (unlikely(err
< 0L))
441 goto iommu_map_failed
;
446 if (printk_ratelimit())
451 spin_lock_irqsave(&iommu
->lock
, flags
);
452 arena_free(&iommu
->arena
, entry
, npages
);
453 spin_unlock_irqrestore(&iommu
->lock
, flags
);
458 static void dma_4v_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
459 int nelems
, enum dma_data_direction direction
)
461 unsigned long flags
, npages
;
462 struct pci_pbm_info
*pbm
;
463 u32 devhandle
, bus_addr
;
467 if (unlikely(direction
== DMA_NONE
)) {
468 if (printk_ratelimit())
472 iommu
= dev
->archdata
.iommu
;
473 pbm
= dev
->archdata
.host_controller
;
474 devhandle
= pbm
->devhandle
;
476 bus_addr
= sglist
->dma_address
& IO_PAGE_MASK
;
478 npages
= calc_npages(sglist
, nelems
);
480 entry
= ((bus_addr
- iommu
->page_table_map_base
) >> IO_PAGE_SHIFT
);
482 spin_lock_irqsave(&iommu
->lock
, flags
);
484 arena_free(&iommu
->arena
, entry
, npages
);
489 num
= pci_sun4v_iommu_demap(devhandle
, HV_PCI_TSBID(0, entry
),
493 } while (npages
!= 0);
495 spin_unlock_irqrestore(&iommu
->lock
, flags
);
498 static void dma_4v_sync_single_for_cpu(struct device
*dev
,
499 dma_addr_t bus_addr
, size_t sz
,
500 enum dma_data_direction direction
)
502 /* Nothing to do... */
505 static void dma_4v_sync_sg_for_cpu(struct device
*dev
,
506 struct scatterlist
*sglist
, int nelems
,
507 enum dma_data_direction direction
)
509 /* Nothing to do... */
512 const struct dma_ops sun4v_dma_ops
= {
513 .alloc_coherent
= dma_4v_alloc_coherent
,
514 .free_coherent
= dma_4v_free_coherent
,
515 .map_single
= dma_4v_map_single
,
516 .unmap_single
= dma_4v_unmap_single
,
517 .map_sg
= dma_4v_map_sg
,
518 .unmap_sg
= dma_4v_unmap_sg
,
519 .sync_single_for_cpu
= dma_4v_sync_single_for_cpu
,
520 .sync_sg_for_cpu
= dma_4v_sync_sg_for_cpu
,
523 static void __init
pci_sun4v_scan_bus(struct pci_pbm_info
*pbm
)
525 struct property
*prop
;
526 struct device_node
*dp
;
529 prop
= of_find_property(dp
, "66mhz-capable", NULL
);
530 pbm
->is_66mhz_capable
= (prop
!= NULL
);
531 pbm
->pci_bus
= pci_scan_one_pbm(pbm
);
533 /* XXX register error interrupt handlers XXX */
536 static unsigned long __init
probe_existing_entries(struct pci_pbm_info
*pbm
,
539 struct iommu_arena
*arena
= &iommu
->arena
;
540 unsigned long i
, cnt
= 0;
543 devhandle
= pbm
->devhandle
;
544 for (i
= 0; i
< arena
->limit
; i
++) {
545 unsigned long ret
, io_attrs
, ra
;
547 ret
= pci_sun4v_iommu_getmap(devhandle
,
551 if (page_in_phys_avail(ra
)) {
552 pci_sun4v_iommu_demap(devhandle
,
553 HV_PCI_TSBID(0, i
), 1);
556 __set_bit(i
, arena
->map
);
564 static void __init
pci_sun4v_iommu_init(struct pci_pbm_info
*pbm
)
566 struct iommu
*iommu
= pbm
->iommu
;
567 struct property
*prop
;
568 unsigned long num_tsb_entries
, sz
, tsbsize
;
569 u32 vdma
[2], dma_mask
, dma_offset
;
571 prop
= of_find_property(pbm
->prom_node
, "virtual-dma", NULL
);
573 u32
*val
= prop
->value
;
578 /* No property, use default values. */
579 vdma
[0] = 0x80000000;
580 vdma
[1] = 0x80000000;
583 if ((vdma
[0] | vdma
[1]) & ~IO_PAGE_MASK
) {
584 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
589 dma_mask
= (roundup_pow_of_two(vdma
[1]) - 1UL);
590 num_tsb_entries
= vdma
[1] / IO_PAGE_SIZE
;
591 tsbsize
= num_tsb_entries
* sizeof(iopte_t
);
593 dma_offset
= vdma
[0];
595 /* Setup initial software IOMMU state. */
596 spin_lock_init(&iommu
->lock
);
597 iommu
->ctx_lowest_free
= 1;
598 iommu
->page_table_map_base
= dma_offset
;
599 iommu
->dma_addr_mask
= dma_mask
;
601 /* Allocate and initialize the free area map. */
602 sz
= (num_tsb_entries
+ 7) / 8;
603 sz
= (sz
+ 7UL) & ~7UL;
604 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
605 if (!iommu
->arena
.map
) {
606 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
609 iommu
->arena
.limit
= num_tsb_entries
;
611 sz
= probe_existing_entries(pbm
, iommu
);
613 printk("%s: Imported %lu TSB entries from OBP\n",
617 #ifdef CONFIG_PCI_MSI
618 struct pci_sun4v_msiq_entry
{
620 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
621 #define MSIQ_VERSION_SHIFT 32
622 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
623 #define MSIQ_TYPE_SHIFT 0
624 #define MSIQ_TYPE_NONE 0x00
625 #define MSIQ_TYPE_MSG 0x01
626 #define MSIQ_TYPE_MSI32 0x02
627 #define MSIQ_TYPE_MSI64 0x03
628 #define MSIQ_TYPE_INTX 0x08
629 #define MSIQ_TYPE_NONE2 0xff
634 u64 req_id
; /* bus/device/func */
635 #define MSIQ_REQID_BUS_MASK 0xff00UL
636 #define MSIQ_REQID_BUS_SHIFT 8
637 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
638 #define MSIQ_REQID_DEVICE_SHIFT 3
639 #define MSIQ_REQID_FUNC_MASK 0x0007UL
640 #define MSIQ_REQID_FUNC_SHIFT 0
644 /* The format of this value is message type dependent.
645 * For MSI bits 15:0 are the data from the MSI packet.
646 * For MSI-X bits 31:0 are the data from the MSI packet.
647 * For MSG, the message code and message routing code where:
648 * bits 39:32 is the bus/device/fn of the msg target-id
649 * bits 18:16 is the message routing code
650 * bits 7:0 is the message code
651 * For INTx the low order 2-bits are:
662 static int pci_sun4v_get_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
665 unsigned long err
, limit
;
667 err
= pci_sun4v_msiq_gethead(pbm
->devhandle
, msiqid
, head
);
671 limit
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
672 if (unlikely(*head
>= limit
))
678 static int pci_sun4v_dequeue_msi(struct pci_pbm_info
*pbm
,
679 unsigned long msiqid
, unsigned long *head
,
682 struct pci_sun4v_msiq_entry
*ep
;
683 unsigned long err
, type
;
685 /* Note: void pointer arithmetic, 'head' is a byte offset */
686 ep
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) *
687 (pbm
->msiq_ent_count
*
688 sizeof(struct pci_sun4v_msiq_entry
))) +
691 if ((ep
->version_type
& MSIQ_TYPE_MASK
) == 0)
694 type
= (ep
->version_type
& MSIQ_TYPE_MASK
) >> MSIQ_TYPE_SHIFT
;
695 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
696 type
!= MSIQ_TYPE_MSI64
))
701 err
= pci_sun4v_msi_setstate(pbm
->devhandle
,
702 ep
->msi_data
/* msi_num */,
707 /* Clear the entry. */
708 ep
->version_type
&= ~MSIQ_TYPE_MASK
;
710 (*head
) += sizeof(struct pci_sun4v_msiq_entry
);
712 (pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
)))
718 static int pci_sun4v_set_head(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
723 err
= pci_sun4v_msiq_sethead(pbm
->devhandle
, msiqid
, head
);
730 static int pci_sun4v_msi_setup(struct pci_pbm_info
*pbm
, unsigned long msiqid
,
731 unsigned long msi
, int is_msi64
)
733 if (pci_sun4v_msi_setmsiq(pbm
->devhandle
, msi
, msiqid
,
735 HV_MSITYPE_MSI64
: HV_MSITYPE_MSI32
)))
737 if (pci_sun4v_msi_setstate(pbm
->devhandle
, msi
, HV_MSISTATE_IDLE
))
739 if (pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_VALID
))
744 static int pci_sun4v_msi_teardown(struct pci_pbm_info
*pbm
, unsigned long msi
)
746 unsigned long err
, msiqid
;
748 err
= pci_sun4v_msi_getmsiq(pbm
->devhandle
, msi
, &msiqid
);
752 pci_sun4v_msi_setvalid(pbm
->devhandle
, msi
, HV_MSIVALID_INVALID
);
757 static int pci_sun4v_msiq_alloc(struct pci_pbm_info
*pbm
)
759 unsigned long q_size
, alloc_size
, pages
, order
;
762 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
763 alloc_size
= (pbm
->msiq_num
* q_size
);
764 order
= get_order(alloc_size
);
765 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
767 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
771 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
772 pbm
->msi_queues
= (void *) pages
;
774 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
775 unsigned long err
, base
= __pa(pages
+ (i
* q_size
));
776 unsigned long ret1
, ret2
;
778 err
= pci_sun4v_msiq_conf(pbm
->devhandle
,
780 base
, pbm
->msiq_ent_count
);
782 printk(KERN_ERR
"MSI: msiq register fails (err=%lu)\n",
787 err
= pci_sun4v_msiq_info(pbm
->devhandle
,
791 printk(KERN_ERR
"MSI: Cannot read msiq (err=%lu)\n",
795 if (ret1
!= base
|| ret2
!= pbm
->msiq_ent_count
) {
796 printk(KERN_ERR
"MSI: Bogus qconf "
797 "expected[%lx:%x] got[%lx:%lx]\n",
798 base
, pbm
->msiq_ent_count
,
807 free_pages(pages
, order
);
811 static void pci_sun4v_msiq_free(struct pci_pbm_info
*pbm
)
813 unsigned long q_size
, alloc_size
, pages
, order
;
816 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
817 unsigned long msiqid
= pbm
->msiq_first
+ i
;
819 (void) pci_sun4v_msiq_conf(pbm
->devhandle
, msiqid
, 0UL, 0);
822 q_size
= pbm
->msiq_ent_count
* sizeof(struct pci_sun4v_msiq_entry
);
823 alloc_size
= (pbm
->msiq_num
* q_size
);
824 order
= get_order(alloc_size
);
826 pages
= (unsigned long) pbm
->msi_queues
;
828 free_pages(pages
, order
);
830 pbm
->msi_queues
= NULL
;
833 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info
*pbm
,
834 unsigned long msiqid
,
835 unsigned long devino
)
837 unsigned int virt_irq
= sun4v_build_irq(pbm
->devhandle
, devino
);
842 if (pci_sun4v_msiq_setstate(pbm
->devhandle
, msiqid
, HV_MSIQSTATE_IDLE
))
844 if (pci_sun4v_msiq_setvalid(pbm
->devhandle
, msiqid
, HV_MSIQ_VALID
))
850 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops
= {
851 .get_head
= pci_sun4v_get_head
,
852 .dequeue_msi
= pci_sun4v_dequeue_msi
,
853 .set_head
= pci_sun4v_set_head
,
854 .msi_setup
= pci_sun4v_msi_setup
,
855 .msi_teardown
= pci_sun4v_msi_teardown
,
856 .msiq_alloc
= pci_sun4v_msiq_alloc
,
857 .msiq_free
= pci_sun4v_msiq_free
,
858 .msiq_build_irq
= pci_sun4v_msiq_build_irq
,
861 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
863 sparc64_pbm_msi_init(pbm
, &pci_sun4v_msiq_ops
);
865 #else /* CONFIG_PCI_MSI */
866 static void pci_sun4v_msi_init(struct pci_pbm_info
*pbm
)
869 #endif /* !(CONFIG_PCI_MSI) */
871 static void __init
pci_sun4v_pbm_init(struct pci_controller_info
*p
,
872 struct device_node
*dp
, u32 devhandle
)
874 struct pci_pbm_info
*pbm
;
876 if (devhandle
& 0x40)
881 pbm
->next
= pci_pbm_root
;
884 pbm
->scan_bus
= pci_sun4v_scan_bus
;
885 pbm
->pci_ops
= &sun4v_pci_ops
;
886 pbm
->config_space_reg_bits
= 12;
888 pbm
->index
= pci_num_pbms
++;
893 pbm
->devhandle
= devhandle
;
895 pbm
->name
= dp
->full_name
;
897 printk("%s: SUN4V PCI Bus Module\n", pbm
->name
);
899 pci_determine_mem_io_space(pbm
);
901 pci_get_pbm_props(pbm
);
902 pci_sun4v_iommu_init(pbm
);
903 pci_sun4v_msi_init(pbm
);
906 void __init
sun4v_pci_init(struct device_node
*dp
, char *model_name
)
908 static int hvapi_negotiated
= 0;
909 struct pci_controller_info
*p
;
910 struct pci_pbm_info
*pbm
;
912 struct property
*prop
;
913 struct linux_prom64_registers
*regs
;
917 if (!hvapi_negotiated
++) {
918 int err
= sun4v_hvapi_register(HV_GRP_PCI
,
923 prom_printf("SUN4V_PCI: Could not register hvapi, "
927 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
928 vpci_major
, vpci_minor
);
930 dma_ops
= &sun4v_dma_ops
;
933 prop
= of_find_property(dp
, "reg", NULL
);
935 prom_printf("SUN4V_PCI: Could not find config registers\n");
940 devhandle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
942 for (pbm
= pci_pbm_root
; pbm
; pbm
= pbm
->next
) {
943 if (pbm
->devhandle
== (devhandle
^ 0x40)) {
944 pci_sun4v_pbm_init(pbm
->parent
, dp
, devhandle
);
949 for_each_possible_cpu(i
) {
950 unsigned long page
= get_zeroed_page(GFP_ATOMIC
);
953 goto fatal_memory_error
;
955 per_cpu(iommu_batch
, i
).pglist
= (u64
*) page
;
958 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
960 goto fatal_memory_error
;
962 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
964 goto fatal_memory_error
;
966 p
->pbm_A
.iommu
= iommu
;
968 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
970 goto fatal_memory_error
;
972 p
->pbm_B
.iommu
= iommu
;
974 pci_sun4v_pbm_init(p
, dp
, devhandle
);
978 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");