1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
17 #ifndef __arch_page_to_dma
18 static inline dma_addr_t
page_to_dma(struct device
*dev
, struct page
*page
)
20 return (dma_addr_t
)__pfn_to_bus(page_to_pfn(page
));
23 static inline struct page
*dma_to_page(struct device
*dev
, dma_addr_t addr
)
25 return pfn_to_page(__bus_to_pfn(addr
));
28 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
30 return (void *)__bus_to_virt(addr
);
33 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
35 return (dma_addr_t
)__virt_to_bus((unsigned long)(addr
));
38 static inline dma_addr_t
page_to_dma(struct device
*dev
, struct page
*page
)
40 return __arch_page_to_dma(dev
, page
);
43 static inline struct page
*dma_to_page(struct device
*dev
, dma_addr_t addr
)
45 return __arch_dma_to_page(dev
, addr
);
48 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
50 return __arch_dma_to_virt(dev
, addr
);
53 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
55 return __arch_virt_to_dma(dev
, addr
);
60 * DMA-consistent mapping functions. These allocate/free a region of
61 * uncached, unwrite-buffered mapped memory space for use with DMA
62 * devices. This is the "generic" version. The PCI specific version
65 * Note: Drivers should NOT use this function directly, as it will break
66 * platforms with CONFIG_DMABOUNCE.
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
69 extern void dma_cache_maint(const void *kaddr
, size_t size
, int rw
);
70 extern void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
74 * Return whether the given device DMA address mask can be supported
75 * properly. For example, if your device can only drive the low 24-bits
76 * during bus mastering, then you would pass 0x00ffffff as the mask
79 * FIXME: This should really be a platform specific issue - we should
80 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
82 static inline int dma_supported(struct device
*dev
, u64 mask
)
84 if (mask
< ISA_DMA_THRESHOLD
)
89 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
91 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
94 *dev
->dma_mask
= dma_mask
;
99 static inline int dma_get_cache_alignment(void)
104 static inline int dma_is_consistent(struct device
*dev
, dma_addr_t handle
)
106 return !!arch_is_coherent();
110 * DMA errors are defined by all-bits-set in the DMA address.
112 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
114 return dma_addr
== ~0;
118 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
119 * function so drivers using this API are highlighted with build warnings.
121 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
122 dma_addr_t
*handle
, gfp_t gfp
)
127 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
128 void *cpu_addr
, dma_addr_t handle
)
133 * dma_alloc_coherent - allocate consistent memory for DMA
134 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
135 * @size: required memory size
136 * @handle: bus-specific DMA address
138 * Allocate some uncached, unbuffered memory for a device for
139 * performing DMA. This function allocates pages, and will
140 * return the CPU-viewed address, and sets @handle to be the
141 * device-viewed address.
143 extern void *dma_alloc_coherent(struct device
*, size_t, dma_addr_t
*, gfp_t
);
146 * dma_free_coherent - free memory allocated by dma_alloc_coherent
147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
148 * @size: size of memory originally requested in dma_alloc_coherent
149 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
150 * @handle: device-view address returned from dma_alloc_coherent
152 * Free (and unmap) a DMA buffer previously allocated by
153 * dma_alloc_coherent().
155 * References to memory and mappings associated with cpu_addr/handle
156 * during and after this call executing are illegal.
158 extern void dma_free_coherent(struct device
*, size_t, void *, dma_addr_t
);
161 * dma_mmap_coherent - map a coherent DMA allocation into user space
162 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
163 * @vma: vm_area_struct describing requested user mapping
164 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
165 * @handle: device-view address returned from dma_alloc_coherent
166 * @size: size of memory originally requested in dma_alloc_coherent
168 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
169 * into user space. The coherent DMA buffer must not be freed by the
170 * driver until the user space mapping has been released.
172 int dma_mmap_coherent(struct device
*, struct vm_area_struct
*,
173 void *, dma_addr_t
, size_t);
177 * dma_alloc_writecombine - allocate writecombining memory for DMA
178 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
179 * @size: required memory size
180 * @handle: bus-specific DMA address
182 * Allocate some uncached, buffered memory for a device for
183 * performing DMA. This function allocates pages, and will
184 * return the CPU-viewed address, and sets @handle to be the
185 * device-viewed address.
187 extern void *dma_alloc_writecombine(struct device
*, size_t, dma_addr_t
*,
190 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
191 dma_free_coherent(dev,size,cpu_addr,handle)
193 int dma_mmap_writecombine(struct device
*, struct vm_area_struct
*,
194 void *, dma_addr_t
, size_t);
197 #ifdef CONFIG_DMABOUNCE
199 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
200 * and utilize bounce buffers as needed to work around limited DMA windows.
202 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
203 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
204 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
206 * The following are helper functions used by the dmabounce subystem
211 * dmabounce_register_dev
213 * @dev: valid struct device pointer
214 * @small_buf_size: size of buffers to use with small buffer pool
215 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
217 * This function should be called by low-level platform code to register
218 * a device as requireing DMA buffer bouncing. The function will allocate
219 * appropriate DMA pools for the device.
222 extern int dmabounce_register_dev(struct device
*, unsigned long,
226 * dmabounce_unregister_dev
228 * @dev: valid struct device pointer
230 * This function should be called by low-level platform code when device
231 * that was previously registered with dmabounce_register_dev is removed
235 extern void dmabounce_unregister_dev(struct device
*);
240 * @dev: valid struct device pointer
241 * @dma_handle: dma_handle of unbounced buffer
242 * @size: size of region being mapped
244 * Platforms that utilize the dmabounce mechanism must implement
247 * The dmabounce routines call this function whenever a dma-mapping
248 * is requested to determine whether a given buffer needs to be bounced
249 * or not. The function must return 0 if the buffer is OK for
250 * DMA access and 1 if the buffer needs to be bounced.
253 extern int dma_needs_bounce(struct device
*, dma_addr_t
, size_t);
256 * The DMA API, implemented by dmabounce.c. See below for descriptions.
258 extern dma_addr_t
dma_map_single(struct device
*, void *, size_t,
259 enum dma_data_direction
);
260 extern void dma_unmap_single(struct device
*, dma_addr_t
, size_t,
261 enum dma_data_direction
);
262 extern dma_addr_t
dma_map_page(struct device
*, struct page
*,
263 unsigned long, size_t, enum dma_data_direction
);
264 extern void dma_unmap_page(struct device
*, dma_addr_t
, size_t,
265 enum dma_data_direction
);
270 int dmabounce_sync_for_cpu(struct device
*, dma_addr_t
, unsigned long,
271 size_t, enum dma_data_direction
);
272 int dmabounce_sync_for_device(struct device
*, dma_addr_t
, unsigned long,
273 size_t, enum dma_data_direction
);
275 static inline int dmabounce_sync_for_cpu(struct device
*d
, dma_addr_t addr
,
276 unsigned long offset
, size_t size
, enum dma_data_direction dir
)
281 static inline int dmabounce_sync_for_device(struct device
*d
, dma_addr_t addr
,
282 unsigned long offset
, size_t size
, enum dma_data_direction dir
)
289 * dma_map_single - map a single buffer for streaming DMA
290 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
291 * @cpu_addr: CPU direct mapped address of buffer
292 * @size: size of buffer to map
293 * @dir: DMA transfer direction
295 * Ensure that any data held in the cache is appropriately discarded
298 * The device owns this memory once this call has completed. The CPU
299 * can regain ownership by calling dma_unmap_single() or
300 * dma_sync_single_for_cpu().
302 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
303 size_t size
, enum dma_data_direction dir
)
305 BUG_ON(!valid_dma_direction(dir
));
307 if (!arch_is_coherent())
308 dma_cache_maint(cpu_addr
, size
, dir
);
310 return virt_to_dma(dev
, cpu_addr
);
314 * dma_map_page - map a portion of a page for streaming DMA
315 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
316 * @page: page that buffer resides in
317 * @offset: offset into page for start of buffer
318 * @size: size of buffer to map
319 * @dir: DMA transfer direction
321 * Ensure that any data held in the cache is appropriately discarded
324 * The device owns this memory once this call has completed. The CPU
325 * can regain ownership by calling dma_unmap_page().
327 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
328 unsigned long offset
, size_t size
, enum dma_data_direction dir
)
330 BUG_ON(!valid_dma_direction(dir
));
332 if (!arch_is_coherent())
333 dma_cache_maint_page(page
, offset
, size
, dir
);
335 return page_to_dma(dev
, page
) + offset
;
339 * dma_unmap_single - unmap a single buffer previously mapped
340 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
341 * @handle: DMA address of buffer
342 * @size: size of buffer (same as passed to dma_map_single)
343 * @dir: DMA transfer direction (same as passed to dma_map_single)
345 * Unmap a single streaming mode DMA translation. The handle and size
346 * must match what was provided in the previous dma_map_single() call.
347 * All other usages are undefined.
349 * After this call, reads by the CPU to the buffer are guaranteed to see
350 * whatever the device wrote there.
352 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t handle
,
353 size_t size
, enum dma_data_direction dir
)
359 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
360 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
361 * @handle: DMA address of buffer
362 * @size: size of buffer (same as passed to dma_map_page)
363 * @dir: DMA transfer direction (same as passed to dma_map_page)
365 * Unmap a page streaming mode DMA translation. The handle and size
366 * must match what was provided in the previous dma_map_page() call.
367 * All other usages are undefined.
369 * After this call, reads by the CPU to the buffer are guaranteed to see
370 * whatever the device wrote there.
372 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
373 size_t size
, enum dma_data_direction dir
)
377 #endif /* CONFIG_DMABOUNCE */
380 * dma_sync_single_range_for_cpu
381 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
382 * @handle: DMA address of buffer
383 * @offset: offset of region to start sync
384 * @size: size of region to sync
385 * @dir: DMA transfer direction (same as passed to dma_map_single)
387 * Make physical memory consistent for a single streaming mode DMA
388 * translation after a transfer.
390 * If you perform a dma_map_single() but wish to interrogate the
391 * buffer using the cpu, yet do not wish to teardown the PCI dma
392 * mapping, you must call this function before doing so. At the
393 * next point you give the PCI dma address back to the card, you
394 * must first the perform a dma_sync_for_device, and then the
395 * device again owns the buffer.
397 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
398 dma_addr_t handle
, unsigned long offset
, size_t size
,
399 enum dma_data_direction dir
)
401 BUG_ON(!valid_dma_direction(dir
));
403 dmabounce_sync_for_cpu(dev
, handle
, offset
, size
, dir
);
406 static inline void dma_sync_single_range_for_device(struct device
*dev
,
407 dma_addr_t handle
, unsigned long offset
, size_t size
,
408 enum dma_data_direction dir
)
410 BUG_ON(!valid_dma_direction(dir
));
412 if (!dmabounce_sync_for_device(dev
, handle
, offset
, size
, dir
))
415 if (!arch_is_coherent())
416 dma_cache_maint(dma_to_virt(dev
, handle
) + offset
, size
, dir
);
419 static inline void dma_sync_single_for_cpu(struct device
*dev
,
420 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
422 dma_sync_single_range_for_cpu(dev
, handle
, 0, size
, dir
);
425 static inline void dma_sync_single_for_device(struct device
*dev
,
426 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
428 dma_sync_single_range_for_device(dev
, handle
, 0, size
, dir
);
432 * The scatter list versions of the above methods.
434 extern int dma_map_sg(struct device
*, struct scatterlist
*, int,
435 enum dma_data_direction
);
436 extern void dma_unmap_sg(struct device
*, struct scatterlist
*, int,
437 enum dma_data_direction
);
438 extern void dma_sync_sg_for_cpu(struct device
*, struct scatterlist
*, int,
439 enum dma_data_direction
);
440 extern void dma_sync_sg_for_device(struct device
*, struct scatterlist
*, int,
441 enum dma_data_direction
);
444 #endif /* __KERNEL__ */