1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
17 #ifndef __arch_page_to_dma
18 static inline dma_addr_t
page_to_dma(struct device
*dev
, struct page
*page
)
20 return (dma_addr_t
)__virt_to_bus((unsigned long)page_address(page
));
23 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
25 return (void *)__bus_to_virt(addr
);
28 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
30 return (dma_addr_t
)__virt_to_bus((unsigned long)(addr
));
33 static inline dma_addr_t
page_to_dma(struct device
*dev
, struct page
*page
)
35 return __arch_page_to_dma(dev
, page
);
38 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
40 return __arch_dma_to_virt(dev
, addr
);
43 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
45 return __arch_virt_to_dma(dev
, addr
);
50 * DMA-consistent mapping functions. These allocate/free a region of
51 * uncached, unwrite-buffered mapped memory space for use with DMA
52 * devices. This is the "generic" version. The PCI specific version
55 * Note: Drivers should NOT use this function directly, as it will break
56 * platforms with CONFIG_DMABOUNCE.
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
59 extern void dma_cache_maint(const void *kaddr
, size_t size
, int rw
);
62 * Return whether the given device DMA address mask can be supported
63 * properly. For example, if your device can only drive the low 24-bits
64 * during bus mastering, then you would pass 0x00ffffff as the mask
67 * FIXME: This should really be a platform specific issue - we should
68 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
70 static inline int dma_supported(struct device
*dev
, u64 mask
)
72 return dev
->dma_mask
&& *dev
->dma_mask
!= 0;
75 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
77 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
80 *dev
->dma_mask
= dma_mask
;
85 static inline int dma_get_cache_alignment(void)
90 static inline int dma_is_consistent(struct device
*dev
, dma_addr_t handle
)
92 return !!arch_is_coherent();
96 * DMA errors are defined by all-bits-set in the DMA address.
98 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
100 return dma_addr
== ~0;
104 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
105 * function so drivers using this API are highlighted with build warnings.
108 dma_alloc_noncoherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
114 dma_free_noncoherent(struct device
*dev
, size_t size
, void *cpu_addr
,
120 * dma_alloc_coherent - allocate consistent memory for DMA
121 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
122 * @size: required memory size
123 * @handle: bus-specific DMA address
125 * Allocate some uncached, unbuffered memory for a device for
126 * performing DMA. This function allocates pages, and will
127 * return the CPU-viewed address, and sets @handle to be the
128 * device-viewed address.
131 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
134 * dma_free_coherent - free memory allocated by dma_alloc_coherent
135 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
136 * @size: size of memory originally requested in dma_alloc_coherent
137 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
138 * @handle: device-view address returned from dma_alloc_coherent
140 * Free (and unmap) a DMA buffer previously allocated by
141 * dma_alloc_coherent().
143 * References to memory and mappings associated with cpu_addr/handle
144 * during and after this call executing are illegal.
147 dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
151 * dma_mmap_coherent - map a coherent DMA allocation into user space
152 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
153 * @vma: vm_area_struct describing requested user mapping
154 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
155 * @handle: device-view address returned from dma_alloc_coherent
156 * @size: size of memory originally requested in dma_alloc_coherent
158 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
159 * into user space. The coherent DMA buffer must not be freed by the
160 * driver until the user space mapping has been released.
162 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
163 void *cpu_addr
, dma_addr_t handle
, size_t size
);
167 * dma_alloc_writecombine - allocate writecombining memory for DMA
168 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
169 * @size: required memory size
170 * @handle: bus-specific DMA address
172 * Allocate some uncached, buffered memory for a device for
173 * performing DMA. This function allocates pages, and will
174 * return the CPU-viewed address, and sets @handle to be the
175 * device-viewed address.
178 dma_alloc_writecombine(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
180 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
181 dma_free_coherent(dev,size,cpu_addr,handle)
183 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
184 void *cpu_addr
, dma_addr_t handle
, size_t size
);
187 #ifdef CONFIG_DMABOUNCE
189 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
190 * and utilize bounce buffers as needed to work around limited DMA windows.
192 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
193 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
194 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
196 * The following are helper functions used by the dmabounce subystem
201 * dmabounce_register_dev
203 * @dev: valid struct device pointer
204 * @small_buf_size: size of buffers to use with small buffer pool
205 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
207 * This function should be called by low-level platform code to register
208 * a device as requireing DMA buffer bouncing. The function will allocate
209 * appropriate DMA pools for the device.
212 extern int dmabounce_register_dev(struct device
*, unsigned long, unsigned long);
215 * dmabounce_unregister_dev
217 * @dev: valid struct device pointer
219 * This function should be called by low-level platform code when device
220 * that was previously registered with dmabounce_register_dev is removed
224 extern void dmabounce_unregister_dev(struct device
*);
229 * @dev: valid struct device pointer
230 * @dma_handle: dma_handle of unbounced buffer
231 * @size: size of region being mapped
233 * Platforms that utilize the dmabounce mechanism must implement
236 * The dmabounce routines call this function whenever a dma-mapping
237 * is requested to determine whether a given buffer needs to be bounced
238 * or not. The function must return 0 if the buffer is OK for
239 * DMA access and 1 if the buffer needs to be bounced.
242 extern int dma_needs_bounce(struct device
*, dma_addr_t
, size_t);
245 * The DMA API, implemented by dmabounce.c. See below for descriptions.
247 extern dma_addr_t
dma_map_single(struct device
*,void *, size_t, enum dma_data_direction
);
248 extern dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
249 unsigned long offset
, size_t size
,
250 enum dma_data_direction dir
);
251 extern void dma_unmap_single(struct device
*, dma_addr_t
, size_t, enum dma_data_direction
);
256 int dmabounce_sync_for_cpu(struct device
*, dma_addr_t
, unsigned long,
257 size_t, enum dma_data_direction
);
258 int dmabounce_sync_for_device(struct device
*, dma_addr_t
, unsigned long,
259 size_t, enum dma_data_direction
);
261 #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
262 #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
266 * dma_map_single - map a single buffer for streaming DMA
267 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
268 * @cpu_addr: CPU direct mapped address of buffer
269 * @size: size of buffer to map
270 * @dir: DMA transfer direction
272 * Ensure that any data held in the cache is appropriately discarded
275 * The device owns this memory once this call has completed. The CPU
276 * can regain ownership by calling dma_unmap_single() or
277 * dma_sync_single_for_cpu().
279 static inline dma_addr_t
280 dma_map_single(struct device
*dev
, void *cpu_addr
, size_t size
,
281 enum dma_data_direction dir
)
283 if (!arch_is_coherent())
284 dma_cache_maint(cpu_addr
, size
, dir
);
286 return virt_to_dma(dev
, cpu_addr
);
291 * dma_map_page - map a portion of a page for streaming DMA
292 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
293 * @page: page that buffer resides in
294 * @offset: offset into page for start of buffer
295 * @size: size of buffer to map
296 * @dir: DMA transfer direction
298 * Ensure that any data held in the cache is appropriately discarded
301 * The device owns this memory once this call has completed. The CPU
302 * can regain ownership by calling dma_unmap_page() or
303 * dma_sync_single_for_cpu().
305 static inline dma_addr_t
306 dma_map_page(struct device
*dev
, struct page
*page
,
307 unsigned long offset
, size_t size
,
308 enum dma_data_direction dir
)
310 if (!arch_is_coherent())
311 dma_cache_maint(page_address(page
) + offset
, size
, dir
);
313 return page_to_dma(dev
, page
) + offset
;
317 * dma_unmap_single - unmap a single buffer previously mapped
318 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
319 * @handle: DMA address of buffer
320 * @size: size of buffer to map
321 * @dir: DMA transfer direction
323 * Unmap a single streaming mode DMA translation. The handle and size
324 * must match what was provided in the previous dma_map_single() call.
325 * All other usages are undefined.
327 * After this call, reads by the CPU to the buffer are guaranteed to see
328 * whatever the device wrote there.
331 dma_unmap_single(struct device
*dev
, dma_addr_t handle
, size_t size
,
332 enum dma_data_direction dir
)
336 #endif /* CONFIG_DMABOUNCE */
339 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
340 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
341 * @handle: DMA address of buffer
342 * @size: size of buffer to map
343 * @dir: DMA transfer direction
345 * Unmap a single streaming mode DMA translation. The handle and size
346 * must match what was provided in the previous dma_map_single() call.
347 * All other usages are undefined.
349 * After this call, reads by the CPU to the buffer are guaranteed to see
350 * whatever the device wrote there.
353 dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
354 enum dma_data_direction dir
)
356 dma_unmap_single(dev
, handle
, size
, dir
);
360 * dma_sync_single_range_for_cpu
361 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
362 * @handle: DMA address of buffer
363 * @offset: offset of region to start sync
364 * @size: size of region to sync
365 * @dir: DMA transfer direction (same as passed to dma_map_single)
367 * Make physical memory consistent for a single streaming mode DMA
368 * translation after a transfer.
370 * If you perform a dma_map_single() but wish to interrogate the
371 * buffer using the cpu, yet do not wish to teardown the PCI dma
372 * mapping, you must call this function before doing so. At the
373 * next point you give the PCI dma address back to the card, you
374 * must first the perform a dma_sync_for_device, and then the
375 * device again owns the buffer.
378 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t handle
,
379 unsigned long offset
, size_t size
,
380 enum dma_data_direction dir
)
382 if (!dmabounce_sync_for_cpu(dev
, handle
, offset
, size
, dir
))
385 if (!arch_is_coherent())
386 dma_cache_maint(dma_to_virt(dev
, handle
) + offset
, size
, dir
);
390 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t handle
,
391 unsigned long offset
, size_t size
,
392 enum dma_data_direction dir
)
394 if (!dmabounce_sync_for_device(dev
, handle
, offset
, size
, dir
))
397 if (!arch_is_coherent())
398 dma_cache_maint(dma_to_virt(dev
, handle
) + offset
, size
, dir
);
402 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t handle
, size_t size
,
403 enum dma_data_direction dir
)
405 dma_sync_single_range_for_cpu(dev
, handle
, 0, size
, dir
);
409 dma_sync_single_for_device(struct device
*dev
, dma_addr_t handle
, size_t size
,
410 enum dma_data_direction dir
)
412 dma_sync_single_range_for_device(dev
, handle
, 0, size
, dir
);
416 * The scatter list versions of the above methods.
418 extern int dma_map_sg(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
419 extern void dma_unmap_sg(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
420 extern void dma_sync_sg_for_cpu(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
421 extern void dma_sync_sg_for_device(struct device
*, struct scatterlist
*, int, enum dma_data_direction
);
424 #endif /* __KERNEL__ */