added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / arm / include / asm / dma-mapping.h
blob22cb14ec3438e9d74b1fa8f626bb65945c2d4607
1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
4 #ifdef __KERNEL__
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
17 #ifndef __arch_page_to_dma
18 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
20 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
23 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
25 return (void *)__bus_to_virt(addr);
28 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
30 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
32 #else
33 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
35 return __arch_page_to_dma(dev, page);
38 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
40 return __arch_dma_to_virt(dev, addr);
43 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
45 return __arch_virt_to_dma(dev, addr);
47 #endif
50 * DMA-consistent mapping functions. These allocate/free a region of
51 * uncached, unwrite-buffered mapped memory space for use with DMA
52 * devices. This is the "generic" version. The PCI specific version
53 * is in pci.h
55 * Note: Drivers should NOT use this function directly, as it will break
56 * platforms with CONFIG_DMABOUNCE.
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
59 extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
62 * Return whether the given device DMA address mask can be supported
63 * properly. For example, if your device can only drive the low 24-bits
64 * during bus mastering, then you would pass 0x00ffffff as the mask
65 * to this function.
67 * FIXME: This should really be a platform specific issue - we should
68 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
70 static inline int dma_supported(struct device *dev, u64 mask)
72 if (mask < ISA_DMA_THRESHOLD)
73 return 0;
74 return 1;
77 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
79 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
80 return -EIO;
82 *dev->dma_mask = dma_mask;
84 return 0;
87 static inline int dma_get_cache_alignment(void)
89 return 32;
92 static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
94 return !!arch_is_coherent();
98 * DMA errors are defined by all-bits-set in the DMA address.
100 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
102 return dma_addr == ~0;
106 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
107 * function so drivers using this API are highlighted with build warnings.
109 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
110 dma_addr_t *handle, gfp_t gfp)
112 return NULL;
115 static inline void dma_free_noncoherent(struct device *dev, size_t size,
116 void *cpu_addr, dma_addr_t handle)
121 * dma_alloc_coherent - allocate consistent memory for DMA
122 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
123 * @size: required memory size
124 * @handle: bus-specific DMA address
126 * Allocate some uncached, unbuffered memory for a device for
127 * performing DMA. This function allocates pages, and will
128 * return the CPU-viewed address, and sets @handle to be the
129 * device-viewed address.
131 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
134 * dma_free_coherent - free memory allocated by dma_alloc_coherent
135 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
136 * @size: size of memory originally requested in dma_alloc_coherent
137 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
138 * @handle: device-view address returned from dma_alloc_coherent
140 * Free (and unmap) a DMA buffer previously allocated by
141 * dma_alloc_coherent().
143 * References to memory and mappings associated with cpu_addr/handle
144 * during and after this call executing are illegal.
146 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
149 * dma_mmap_coherent - map a coherent DMA allocation into user space
150 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
151 * @vma: vm_area_struct describing requested user mapping
152 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
153 * @handle: device-view address returned from dma_alloc_coherent
154 * @size: size of memory originally requested in dma_alloc_coherent
156 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
157 * into user space. The coherent DMA buffer must not be freed by the
158 * driver until the user space mapping has been released.
160 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
161 void *, dma_addr_t, size_t);
165 * dma_alloc_writecombine - allocate writecombining memory for DMA
166 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
167 * @size: required memory size
168 * @handle: bus-specific DMA address
170 * Allocate some uncached, buffered memory for a device for
171 * performing DMA. This function allocates pages, and will
172 * return the CPU-viewed address, and sets @handle to be the
173 * device-viewed address.
175 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
176 gfp_t);
178 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
179 dma_free_coherent(dev,size,cpu_addr,handle)
181 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
182 void *, dma_addr_t, size_t);
185 #ifdef CONFIG_DMABOUNCE
187 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
188 * and utilize bounce buffers as needed to work around limited DMA windows.
190 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
191 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
192 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
194 * The following are helper functions used by the dmabounce subystem
199 * dmabounce_register_dev
201 * @dev: valid struct device pointer
202 * @small_buf_size: size of buffers to use with small buffer pool
203 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
205 * This function should be called by low-level platform code to register
206 * a device as requireing DMA buffer bouncing. The function will allocate
207 * appropriate DMA pools for the device.
210 extern int dmabounce_register_dev(struct device *, unsigned long,
211 unsigned long);
214 * dmabounce_unregister_dev
216 * @dev: valid struct device pointer
218 * This function should be called by low-level platform code when device
219 * that was previously registered with dmabounce_register_dev is removed
220 * from the system.
223 extern void dmabounce_unregister_dev(struct device *);
226 * dma_needs_bounce
228 * @dev: valid struct device pointer
229 * @dma_handle: dma_handle of unbounced buffer
230 * @size: size of region being mapped
232 * Platforms that utilize the dmabounce mechanism must implement
233 * this function.
235 * The dmabounce routines call this function whenever a dma-mapping
236 * is requested to determine whether a given buffer needs to be bounced
237 * or not. The function must return 0 if the buffer is OK for
238 * DMA access and 1 if the buffer needs to be bounced.
241 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
244 * The DMA API, implemented by dmabounce.c. See below for descriptions.
246 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
247 enum dma_data_direction);
248 extern dma_addr_t dma_map_page(struct device *, struct page *,
249 unsigned long, size_t, enum dma_data_direction);
250 extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
251 enum dma_data_direction);
254 * Private functions
256 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
257 size_t, enum dma_data_direction);
258 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
259 size_t, enum dma_data_direction);
260 #else
261 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
262 unsigned long offset, size_t size, enum dma_data_direction dir)
264 return 1;
267 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
268 unsigned long offset, size_t size, enum dma_data_direction dir)
270 return 1;
275 * dma_map_single - map a single buffer for streaming DMA
276 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
277 * @cpu_addr: CPU direct mapped address of buffer
278 * @size: size of buffer to map
279 * @dir: DMA transfer direction
281 * Ensure that any data held in the cache is appropriately discarded
282 * or written back.
284 * The device owns this memory once this call has completed. The CPU
285 * can regain ownership by calling dma_unmap_single() or
286 * dma_sync_single_for_cpu().
288 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
289 size_t size, enum dma_data_direction dir)
291 BUG_ON(!valid_dma_direction(dir));
293 if (!arch_is_coherent())
294 dma_cache_maint(cpu_addr, size, dir);
296 return virt_to_dma(dev, cpu_addr);
300 * dma_map_page - map a portion of a page for streaming DMA
301 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
302 * @page: page that buffer resides in
303 * @offset: offset into page for start of buffer
304 * @size: size of buffer to map
305 * @dir: DMA transfer direction
307 * Ensure that any data held in the cache is appropriately discarded
308 * or written back.
310 * The device owns this memory once this call has completed. The CPU
311 * can regain ownership by calling dma_unmap_page().
313 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
314 unsigned long offset, size_t size, enum dma_data_direction dir)
316 BUG_ON(!valid_dma_direction(dir));
318 if (!arch_is_coherent())
319 dma_cache_maint(page_address(page) + offset, size, dir);
321 return page_to_dma(dev, page) + offset;
325 * dma_unmap_single - unmap a single buffer previously mapped
326 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
327 * @handle: DMA address of buffer
328 * @size: size of buffer (same as passed to dma_map_single)
329 * @dir: DMA transfer direction (same as passed to dma_map_single)
331 * Unmap a single streaming mode DMA translation. The handle and size
332 * must match what was provided in the previous dma_map_single() call.
333 * All other usages are undefined.
335 * After this call, reads by the CPU to the buffer are guaranteed to see
336 * whatever the device wrote there.
338 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
339 size_t size, enum dma_data_direction dir)
341 /* nothing to do */
343 #endif /* CONFIG_DMABOUNCE */
346 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
347 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
348 * @handle: DMA address of buffer
349 * @size: size of buffer (same as passed to dma_map_page)
350 * @dir: DMA transfer direction (same as passed to dma_map_page)
352 * Unmap a page streaming mode DMA translation. The handle and size
353 * must match what was provided in the previous dma_map_page() call.
354 * All other usages are undefined.
356 * After this call, reads by the CPU to the buffer are guaranteed to see
357 * whatever the device wrote there.
359 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
360 size_t size, enum dma_data_direction dir)
362 dma_unmap_single(dev, handle, size, dir);
366 * dma_sync_single_range_for_cpu
367 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
368 * @handle: DMA address of buffer
369 * @offset: offset of region to start sync
370 * @size: size of region to sync
371 * @dir: DMA transfer direction (same as passed to dma_map_single)
373 * Make physical memory consistent for a single streaming mode DMA
374 * translation after a transfer.
376 * If you perform a dma_map_single() but wish to interrogate the
377 * buffer using the cpu, yet do not wish to teardown the PCI dma
378 * mapping, you must call this function before doing so. At the
379 * next point you give the PCI dma address back to the card, you
380 * must first the perform a dma_sync_for_device, and then the
381 * device again owns the buffer.
383 static inline void dma_sync_single_range_for_cpu(struct device *dev,
384 dma_addr_t handle, unsigned long offset, size_t size,
385 enum dma_data_direction dir)
387 BUG_ON(!valid_dma_direction(dir));
389 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
392 static inline void dma_sync_single_range_for_device(struct device *dev,
393 dma_addr_t handle, unsigned long offset, size_t size,
394 enum dma_data_direction dir)
396 BUG_ON(!valid_dma_direction(dir));
398 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
399 return;
401 if (!arch_is_coherent())
402 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
405 static inline void dma_sync_single_for_cpu(struct device *dev,
406 dma_addr_t handle, size_t size, enum dma_data_direction dir)
408 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
411 static inline void dma_sync_single_for_device(struct device *dev,
412 dma_addr_t handle, size_t size, enum dma_data_direction dir)
414 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
418 * The scatter list versions of the above methods.
420 extern int dma_map_sg(struct device *, struct scatterlist *, int,
421 enum dma_data_direction);
422 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
423 enum dma_data_direction);
424 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
425 enum dma_data_direction);
426 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
427 enum dma_data_direction);
430 #endif /* __KERNEL__ */
431 #endif