1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/config.h>
7 #include <linux/mm.h> /* need struct page */
9 #include <asm/scatterlist.h>
12 * DMA-consistent mapping functions. These allocate/free a region of
13 * uncached, unwrite-buffered mapped memory space for use with DMA
14 * devices. This is the "generic" version. The PCI specific version
17 extern void consistent_sync(void *kaddr
, size_t size
, int rw
);
20 * Return whether the given device DMA address mask can be supported
21 * properly. For example, if your device can only drive the low 24-bits
22 * during bus mastering, then you would pass 0x00ffffff as the mask
25 * FIXME: This should really be a platform specific issue - we should
26 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
28 static inline int dma_supported(struct device
*dev
, u64 mask
)
30 return dev
->dma_mask
&& *dev
->dma_mask
!= 0;
33 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
35 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
38 *dev
->dma_mask
= dma_mask
;
43 static inline int dma_get_cache_alignment(void)
48 static inline int dma_is_consistent(dma_addr_t handle
)
54 * DMA errors are defined by all-bits-set in the DMA address.
56 static inline int dma_mapping_error(dma_addr_t dma_addr
)
58 return dma_addr
== ~0;
62 * dma_alloc_coherent - allocate consistent memory for DMA
63 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
64 * @size: required memory size
65 * @handle: bus-specific DMA address
67 * Allocate some uncached, unbuffered memory for a device for
68 * performing DMA. This function allocates pages, and will
69 * return the CPU-viewed address, and sets @handle to be the
70 * device-viewed address.
73 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
76 * dma_free_coherent - free memory allocated by dma_alloc_coherent
77 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
78 * @size: size of memory originally requested in dma_alloc_coherent
79 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
80 * @handle: device-view address returned from dma_alloc_coherent
82 * Free (and unmap) a DMA buffer previously allocated by
83 * dma_alloc_coherent().
85 * References to memory and mappings associated with cpu_addr/handle
86 * during and after this call executing are illegal.
89 dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
93 * dma_mmap_coherent - map a coherent DMA allocation into user space
94 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
95 * @vma: vm_area_struct describing requested user mapping
96 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
97 * @handle: device-view address returned from dma_alloc_coherent
98 * @size: size of memory originally requested in dma_alloc_coherent
100 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
101 * into user space. The coherent DMA buffer must not be freed by the
102 * driver until the user space mapping has been released.
104 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
105 void *cpu_addr
, dma_addr_t handle
, size_t size
);
109 * dma_alloc_writecombine - allocate writecombining memory for DMA
110 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
111 * @size: required memory size
112 * @handle: bus-specific DMA address
114 * Allocate some uncached, buffered memory for a device for
115 * performing DMA. This function allocates pages, and will
116 * return the CPU-viewed address, and sets @handle to be the
117 * device-viewed address.
120 dma_alloc_writecombine(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
122 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
123 dma_free_coherent(dev,size,cpu_addr,handle)
125 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
126 void *cpu_addr
, dma_addr_t handle
, size_t size
);
130 * dma_map_single - map a single buffer for streaming DMA
131 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
132 * @cpu_addr: CPU direct mapped address of buffer
133 * @size: size of buffer to map
134 * @dir: DMA transfer direction
136 * Ensure that any data held in the cache is appropriately discarded
139 * The device owns this memory once this call has completed. The CPU
140 * can regain ownership by calling dma_unmap_single() or
141 * dma_sync_single_for_cpu().
143 #ifndef CONFIG_DMABOUNCE
144 static inline dma_addr_t
145 dma_map_single(struct device
*dev
, void *cpu_addr
, size_t size
,
146 enum dma_data_direction dir
)
148 consistent_sync(cpu_addr
, size
, dir
);
149 return virt_to_dma(dev
, (unsigned long)cpu_addr
);
152 extern dma_addr_t
dma_map_single(struct device
*,void *, size_t, enum dma_data_direction
);
156 * dma_map_page - map a portion of a page for streaming DMA
157 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
158 * @page: page that buffer resides in
159 * @offset: offset into page for start of buffer
160 * @size: size of buffer to map
161 * @dir: DMA transfer direction
163 * Ensure that any data held in the cache is appropriately discarded
166 * The device owns this memory once this call has completed. The CPU
167 * can regain ownership by calling dma_unmap_page() or
168 * dma_sync_single_for_cpu().
170 static inline dma_addr_t
171 dma_map_page(struct device
*dev
, struct page
*page
,
172 unsigned long offset
, size_t size
,
173 enum dma_data_direction dir
)
175 return dma_map_single(dev
, page_address(page
) + offset
, size
, (int)dir
);
179 * dma_unmap_single - unmap a single buffer previously mapped
180 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
181 * @handle: DMA address of buffer
182 * @size: size of buffer to map
183 * @dir: DMA transfer direction
185 * Unmap a single streaming mode DMA translation. The handle and size
186 * must match what was provided in the previous dma_map_single() call.
187 * All other usages are undefined.
189 * After this call, reads by the CPU to the buffer are guaranteed to see
190 * whatever the device wrote there.
192 #ifndef CONFIG_DMABOUNCE
194 dma_unmap_single(struct device
*dev
, dma_addr_t handle
, size_t size
,
195 enum dma_data_direction dir
)
200 extern void dma_unmap_single(struct device
*, dma_addr_t
, size_t, enum dma_data_direction
);
204 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
205 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
206 * @handle: DMA address of buffer
207 * @size: size of buffer to map
208 * @dir: DMA transfer direction
210 * Unmap a single streaming mode DMA translation. The handle and size
211 * must match what was provided in the previous dma_map_single() call.
212 * All other usages are undefined.
214 * After this call, reads by the CPU to the buffer are guaranteed to see
215 * whatever the device wrote there.
218 dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
219 enum dma_data_direction dir
)
221 dma_unmap_single(dev
, handle
, size
, (int)dir
);
225 * dma_map_sg - map a set of SG buffers for streaming mode DMA
226 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
227 * @sg: list of buffers
228 * @nents: number of buffers to map
229 * @dir: DMA transfer direction
231 * Map a set of buffers described by scatterlist in streaming
232 * mode for DMA. This is the scatter-gather version of the
233 * above dma_map_single interface. Here the scatter gather list
234 * elements are each tagged with the appropriate dma address
235 * and length. They are obtained via sg_dma_{address,length}(SG).
237 * NOTE: An implementation may be able to use a smaller number of
238 * DMA address/length pairs than there are SG table elements.
239 * (for example via virtual mapping capabilities)
240 * The routine returns the number of addr/length pairs actually
241 * used, at most nents.
243 * Device ownership issues as mentioned above for dma_map_single are
246 #ifndef CONFIG_DMABOUNCE
248 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
249 enum dma_data_direction dir
)
253 for (i
= 0; i
< nents
; i
++, sg
++) {
256 sg
->dma_address
= page_to_dma(dev
, sg
->page
) + sg
->offset
;
257 virt
= page_address(sg
->page
) + sg
->offset
;
258 consistent_sync(virt
, sg
->length
, dir
);
264 extern int dma_map_sg(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
268 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
269 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
270 * @sg: list of buffers
271 * @nents: number of buffers to map
272 * @dir: DMA transfer direction
274 * Unmap a set of streaming mode DMA translations.
275 * Again, CPU read rules concerning calls here are the same as for
276 * dma_unmap_single() above.
278 #ifndef CONFIG_DMABOUNCE
280 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
281 enum dma_data_direction dir
)
287 extern void dma_unmap_sg(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
292 * dma_sync_single_for_cpu
293 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
294 * @handle: DMA address of buffer
295 * @size: size of buffer to map
296 * @dir: DMA transfer direction
298 * Make physical memory consistent for a single streaming mode DMA
299 * translation after a transfer.
301 * If you perform a dma_map_single() but wish to interrogate the
302 * buffer using the cpu, yet do not wish to teardown the PCI dma
303 * mapping, you must call this function before doing so. At the
304 * next point you give the PCI dma address back to the card, you
305 * must first the perform a dma_sync_for_device, and then the
306 * device again owns the buffer.
308 #ifndef CONFIG_DMABOUNCE
310 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t handle
, size_t size
,
311 enum dma_data_direction dir
)
313 consistent_sync((void *)dma_to_virt(dev
, handle
), size
, dir
);
317 dma_sync_single_for_device(struct device
*dev
, dma_addr_t handle
, size_t size
,
318 enum dma_data_direction dir
)
320 consistent_sync((void *)dma_to_virt(dev
, handle
), size
, dir
);
323 extern void dma_sync_single_for_cpu(struct device
*, dma_addr_t
, size_t, enum dma_data_direction
);
324 extern void dma_sync_single_for_device(struct device
*, dma_addr_t
, size_t, enum dma_data_direction
);
329 * dma_sync_sg_for_cpu
330 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
331 * @sg: list of buffers
332 * @nents: number of buffers to map
333 * @dir: DMA transfer direction
335 * Make physical memory consistent for a set of streaming
336 * mode DMA translations after a transfer.
338 * The same as dma_sync_single_for_* but for a scatter-gather list,
339 * same rules and usage.
341 #ifndef CONFIG_DMABOUNCE
343 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nents
,
344 enum dma_data_direction dir
)
348 for (i
= 0; i
< nents
; i
++, sg
++) {
349 char *virt
= page_address(sg
->page
) + sg
->offset
;
350 consistent_sync(virt
, sg
->length
, dir
);
355 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nents
,
356 enum dma_data_direction dir
)
360 for (i
= 0; i
< nents
; i
++, sg
++) {
361 char *virt
= page_address(sg
->page
) + sg
->offset
;
362 consistent_sync(virt
, sg
->length
, dir
);
366 extern void dma_sync_sg_for_cpu(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
367 extern void dma_sync_sg_for_device(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
370 #ifdef CONFIG_DMABOUNCE
372 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
373 * and utilize bounce buffers as needed to work around limited DMA windows.
375 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
376 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
377 * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
379 * The following are helper functions used by the dmabounce subystem
384 * dmabounce_register_dev
386 * @dev: valid struct device pointer
387 * @small_buf_size: size of buffers to use with small buffer pool
388 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
390 * This function should be called by low-level platform code to register
391 * a device as requireing DMA buffer bouncing. The function will allocate
392 * appropriate DMA pools for the device.
395 extern int dmabounce_register_dev(struct device
*, unsigned long, unsigned long);
398 * dmabounce_unregister_dev
400 * @dev: valid struct device pointer
402 * This function should be called by low-level platform code when device
403 * that was previously registered with dmabounce_register_dev is removed
407 extern void dmabounce_unregister_dev(struct device
*);
412 * @dev: valid struct device pointer
413 * @dma_handle: dma_handle of unbounced buffer
414 * @size: size of region being mapped
416 * Platforms that utilize the dmabounce mechanism must implement
419 * The dmabounce routines call this function whenever a dma-mapping
420 * is requested to determine whether a given buffer needs to be bounced
421 * or not. The function must return 0 if the the buffer is OK for
422 * DMA access and 1 if the buffer needs to be bounced.
425 extern int dma_needs_bounce(struct device
*, dma_addr_t
, size_t);
426 #endif /* CONFIG_DMABOUNCE */
428 #endif /* __KERNEL__ */