1 #include <linux/prefetch.h>
4 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
5 * @ioc: The I/O Controller.
6 * @startsg: The scatter/gather list of coalesced chunks.
7 * @nents: The number of entries in the scatter/gather list.
10 * This function inserts the coalesced scatter/gather list chunks into the
11 * I/O Controller's I/O Pdir.
13 static inline unsigned int
14 iommu_fill_pdir(struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
,
16 void (*iommu_io_pdir_entry
)(u64
*, space_t
, unsigned long,
19 struct scatterlist
*dma_sg
= startsg
; /* pointer to current DMA */
20 unsigned int n_mappings
= 0;
21 unsigned long dma_offset
= 0, dma_len
= 0;
24 /* Horrible hack. For efficiency's sake, dma_sg starts one
25 * entry below the true start (it is immediately incremented
33 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents
,
34 (unsigned long)sg_dma_address(startsg
), cnt
,
35 sg_virt_addr(startsg
), startsg
->length
40 ** Look for the start of a new DMA stream
43 if (sg_dma_address(startsg
) & PIDE_FLAG
) {
44 u32 pide
= sg_dma_address(startsg
) & ~PIDE_FLAG
;
46 BUG_ON(pdirp
&& (dma_len
!= sg_dma_len(dma_sg
)));
50 dma_len
= sg_dma_len(startsg
);
51 sg_dma_len(startsg
) = 0;
52 dma_offset
= (unsigned long) pide
& ~IOVP_MASK
;
54 #if defined(ZX1_SUPPORT)
55 /* Pluto IOMMU IO Virt Address is not zero based */
56 sg_dma_address(dma_sg
) = pide
| ioc
->ibase
;
58 /* SBA, ccio, and dino are zero based.
59 * Trying to save a few CPU cycles for most users.
61 sg_dma_address(dma_sg
) = pide
;
63 pdirp
= &(ioc
->pdir_base
[pide
>> IOVP_SHIFT
]);
67 BUG_ON(pdirp
== NULL
);
69 vaddr
= sg_virt_addr(startsg
);
70 sg_dma_len(dma_sg
) += startsg
->length
;
71 size
= startsg
->length
+ dma_offset
;
73 #ifdef IOMMU_MAP_STATS
74 ioc
->msg_pages
+= startsg
->length
>> IOVP_SHIFT
;
77 iommu_io_pdir_entry(pdirp
, KERNEL_SPACE
,
82 } while(unlikely(size
> 0));
90 ** First pass is to walk the SG list and determine where the breaks are
91 ** in the DMA stream. Allocates PDIR entries but does not fill them.
92 ** Returns the number of DMA chunks.
94 ** Doing the fill separate from the coalescing/allocation keeps the
95 ** code simpler. Future enhancement could make one pass through
96 ** the sglist do both.
99 static inline unsigned int
100 iommu_coalesce_chunks(struct ioc
*ioc
, struct device
*dev
,
101 struct scatterlist
*startsg
, int nents
,
102 int (*iommu_alloc_range
)(struct ioc
*, struct device
*, size_t))
104 struct scatterlist
*contig_sg
; /* contig chunk head */
105 unsigned long dma_offset
, dma_len
; /* start/len of DMA stream */
106 unsigned int n_mappings
= 0;
107 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
112 ** Prepare for first/next DMA stream
115 dma_len
= startsg
->length
;
116 dma_offset
= sg_virt_addr(startsg
) & ~IOVP_MASK
;
118 /* PARANOID: clear entries */
119 sg_dma_address(startsg
) = 0;
120 sg_dma_len(startsg
) = 0;
123 ** This loop terminates one iteration "early" since
124 ** it's always looking one "ahead".
127 unsigned long prevstartsg_end
, startsg_end
;
129 prevstartsg_end
= sg_virt_addr(startsg
) +
133 startsg_end
= sg_virt_addr(startsg
) +
136 /* PARANOID: clear entries */
137 sg_dma_address(startsg
) = 0;
138 sg_dma_len(startsg
) = 0;
141 ** First make sure current dma stream won't
142 ** exceed DMA_CHUNK_SIZE if we coalesce the
145 if(unlikely(ALIGN(dma_len
+ dma_offset
+ startsg
->length
,
146 IOVP_SIZE
) > DMA_CHUNK_SIZE
))
149 if (startsg
->length
+ dma_len
> max_seg_size
)
153 ** Next see if we can append the next chunk (i.e.
154 ** it must end on one page and begin on another
156 if (unlikely(((prevstartsg_end
| sg_virt_addr(startsg
)) & ~PAGE_MASK
) != 0))
159 dma_len
+= startsg
->length
;
164 ** Terminate last VCONTIG block.
165 ** Allocate space for DMA stream.
167 sg_dma_len(contig_sg
) = dma_len
;
168 dma_len
= ALIGN(dma_len
+ dma_offset
, IOVP_SIZE
);
169 sg_dma_address(contig_sg
) =
171 | (iommu_alloc_range(ioc
, dev
, dma_len
) << IOVP_SHIFT
)