2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <linux/scatterlist.h>
16 #include <linux/dma-attrs.h>
19 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21 #ifdef CONFIG_NOT_COHERENT_CACHE
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
29 extern void *__dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
30 extern void __dma_free_coherent(size_t size
, void *vaddr
);
31 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
32 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
33 size_t size
, int direction
);
35 #else /* ! CONFIG_NOT_COHERENT_CACHE */
37 * Cache coherent cores.
40 #define __dma_alloc_coherent(gfp, size, handle) NULL
41 #define __dma_free_coherent(size, addr) ((void)0)
42 #define __dma_sync(addr, size, rw) ((void)0)
43 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
45 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
49 static inline unsigned long device_to_mask(struct device
*dev
)
51 if (dev
->dma_mask
&& *dev
->dma_mask
)
52 return *dev
->dma_mask
;
53 /* Assume devices without mask can take 32 bit addresses */
58 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
60 struct dma_mapping_ops
{
61 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
62 dma_addr_t
*dma_handle
, gfp_t flag
);
63 void (*free_coherent
)(struct device
*dev
, size_t size
,
64 void *vaddr
, dma_addr_t dma_handle
);
65 dma_addr_t (*map_single
)(struct device
*dev
, void *ptr
,
66 size_t size
, enum dma_data_direction direction
,
67 struct dma_attrs
*attrs
);
68 void (*unmap_single
)(struct device
*dev
, dma_addr_t dma_addr
,
69 size_t size
, enum dma_data_direction direction
,
70 struct dma_attrs
*attrs
);
71 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
72 int nents
, enum dma_data_direction direction
,
73 struct dma_attrs
*attrs
);
74 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
75 int nents
, enum dma_data_direction direction
,
76 struct dma_attrs
*attrs
);
77 int (*dma_supported
)(struct device
*dev
, u64 mask
);
78 int (*set_dma_mask
)(struct device
*dev
, u64 dma_mask
);
81 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
83 /* We don't handle the NULL dev case for ISA for now. We could
84 * do it via an out of line call but it is not needed for now. The
85 * only ISA DMA device we support is the floppy and we have a hack
86 * in the floppy driver directly to get a device for us.
88 if (unlikely(dev
== NULL
|| dev
->archdata
.dma_ops
== NULL
))
90 return dev
->archdata
.dma_ops
;
93 static inline void set_dma_ops(struct device
*dev
, struct dma_mapping_ops
*ops
)
95 dev
->archdata
.dma_ops
= ops
;
98 static inline int dma_supported(struct device
*dev
, u64 mask
)
100 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
102 if (unlikely(dma_ops
== NULL
))
104 if (dma_ops
->dma_supported
== NULL
)
106 return dma_ops
->dma_supported(dev
, mask
);
109 /* We have our own implementation of pci_set_dma_mask() */
110 #define HAVE_ARCH_PCI_SET_DMA_MASK
112 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
114 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
116 if (unlikely(dma_ops
== NULL
))
118 if (dma_ops
->set_dma_mask
!= NULL
)
119 return dma_ops
->set_dma_mask(dev
, dma_mask
);
120 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
122 *dev
->dma_mask
= dma_mask
;
126 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
,
129 enum dma_data_direction direction
,
130 struct dma_attrs
*attrs
)
132 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
135 return dma_ops
->map_single(dev
, cpu_addr
, size
, direction
, attrs
);
138 static inline void dma_unmap_single_attrs(struct device
*dev
,
141 enum dma_data_direction direction
,
142 struct dma_attrs
*attrs
)
144 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
147 dma_ops
->unmap_single(dev
, dma_addr
, size
, direction
, attrs
);
150 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
152 unsigned long offset
, size_t size
,
153 enum dma_data_direction direction
,
154 struct dma_attrs
*attrs
)
156 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
159 return dma_ops
->map_single(dev
, page_address(page
) + offset
, size
,
163 static inline void dma_unmap_page_attrs(struct device
*dev
,
164 dma_addr_t dma_address
,
166 enum dma_data_direction direction
,
167 struct dma_attrs
*attrs
)
169 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
172 dma_ops
->unmap_single(dev
, dma_address
, size
, direction
, attrs
);
175 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
176 int nents
, enum dma_data_direction direction
,
177 struct dma_attrs
*attrs
)
179 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
182 return dma_ops
->map_sg(dev
, sg
, nents
, direction
, attrs
);
185 static inline void dma_unmap_sg_attrs(struct device
*dev
,
186 struct scatterlist
*sg
,
188 enum dma_data_direction direction
,
189 struct dma_attrs
*attrs
)
191 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
194 dma_ops
->unmap_sg(dev
, sg
, nhwentries
, direction
, attrs
);
197 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
198 dma_addr_t
*dma_handle
, gfp_t flag
)
200 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
203 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, flag
);
206 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
207 void *cpu_addr
, dma_addr_t dma_handle
)
209 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
212 dma_ops
->free_coherent(dev
, size
, cpu_addr
, dma_handle
);
215 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
217 enum dma_data_direction direction
)
219 return dma_map_single_attrs(dev
, cpu_addr
, size
, direction
, NULL
);
222 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
224 enum dma_data_direction direction
)
226 dma_unmap_single_attrs(dev
, dma_addr
, size
, direction
, NULL
);
229 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
230 unsigned long offset
, size_t size
,
231 enum dma_data_direction direction
)
233 return dma_map_page_attrs(dev
, page
, offset
, size
, direction
, NULL
);
236 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
238 enum dma_data_direction direction
)
240 dma_unmap_page_attrs(dev
, dma_address
, size
, direction
, NULL
);
243 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
244 int nents
, enum dma_data_direction direction
)
246 return dma_map_sg_attrs(dev
, sg
, nents
, direction
, NULL
);
249 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
251 enum dma_data_direction direction
)
253 dma_unmap_sg_attrs(dev
, sg
, nhwentries
, direction
, NULL
);
257 * Available generic sets of operations
259 extern struct dma_mapping_ops dma_iommu_ops
;
260 extern struct dma_mapping_ops dma_direct_ops
;
262 #else /* CONFIG_PPC64 */
264 #define dma_supported(dev, mask) (1)
266 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
268 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
271 *dev
->dma_mask
= dma_mask
;
276 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
277 dma_addr_t
* dma_handle
,
280 #ifdef CONFIG_NOT_COHERENT_CACHE
281 return __dma_alloc_coherent(size
, dma_handle
, gfp
);
284 /* ignore region specifiers */
285 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
287 if (dev
== NULL
|| dev
->coherent_dma_mask
< 0xffffffff)
290 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
293 memset(ret
, 0, size
);
294 *dma_handle
= virt_to_bus(ret
);
302 dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
303 dma_addr_t dma_handle
)
305 #ifdef CONFIG_NOT_COHERENT_CACHE
306 __dma_free_coherent(size
, vaddr
);
308 free_pages((unsigned long)vaddr
, get_order(size
));
312 static inline dma_addr_t
313 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
314 enum dma_data_direction direction
)
316 BUG_ON(direction
== DMA_NONE
);
318 __dma_sync(ptr
, size
, direction
);
320 return virt_to_bus(ptr
);
323 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
325 enum dma_data_direction direction
)
330 static inline dma_addr_t
331 dma_map_page(struct device
*dev
, struct page
*page
,
332 unsigned long offset
, size_t size
,
333 enum dma_data_direction direction
)
335 BUG_ON(direction
== DMA_NONE
);
337 __dma_sync_page(page
, offset
, size
, direction
);
339 return page_to_bus(page
) + offset
;
342 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
344 enum dma_data_direction direction
)
350 dma_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
351 enum dma_data_direction direction
)
353 struct scatterlist
*sg
;
356 BUG_ON(direction
== DMA_NONE
);
358 for_each_sg(sgl
, sg
, nents
, i
) {
359 BUG_ON(!sg_page(sg
));
360 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
361 sg
->dma_address
= page_to_bus(sg_page(sg
)) + sg
->offset
;
367 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
369 enum dma_data_direction direction
)
371 /* We don't do anything here. */
374 #endif /* CONFIG_PPC64 */
376 static inline void dma_sync_single_for_cpu(struct device
*dev
,
377 dma_addr_t dma_handle
, size_t size
,
378 enum dma_data_direction direction
)
380 BUG_ON(direction
== DMA_NONE
);
381 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
384 static inline void dma_sync_single_for_device(struct device
*dev
,
385 dma_addr_t dma_handle
, size_t size
,
386 enum dma_data_direction direction
)
388 BUG_ON(direction
== DMA_NONE
);
389 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
392 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
393 struct scatterlist
*sgl
, int nents
,
394 enum dma_data_direction direction
)
396 struct scatterlist
*sg
;
399 BUG_ON(direction
== DMA_NONE
);
401 for_each_sg(sgl
, sg
, nents
, i
)
402 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
405 static inline void dma_sync_sg_for_device(struct device
*dev
,
406 struct scatterlist
*sgl
, int nents
,
407 enum dma_data_direction direction
)
409 struct scatterlist
*sg
;
412 BUG_ON(direction
== DMA_NONE
);
414 for_each_sg(sgl
, sg
, nents
, i
)
415 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
418 static inline int dma_mapping_error(dma_addr_t dma_addr
)
421 return (dma_addr
== DMA_ERROR_CODE
);
427 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
428 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
429 #ifdef CONFIG_NOT_COHERENT_CACHE
430 #define dma_is_consistent(d, h) (0)
432 #define dma_is_consistent(d, h) (1)
435 static inline int dma_get_cache_alignment(void)
438 /* no easy way to get cache size on all processors, so return
439 * the maximum possible, to be safe */
440 return (1 << INTERNODE_CACHE_SHIFT
);
443 * Each processor family will define its own L1_CACHE_SHIFT,
444 * L1_CACHE_BYTES wraps to this, so this is always safe.
446 return L1_CACHE_BYTES
;
450 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
451 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
452 enum dma_data_direction direction
)
454 /* just sync everything for now */
455 dma_sync_single_for_cpu(dev
, dma_handle
, offset
+ size
, direction
);
458 static inline void dma_sync_single_range_for_device(struct device
*dev
,
459 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
460 enum dma_data_direction direction
)
462 /* just sync everything for now */
463 dma_sync_single_for_device(dev
, dma_handle
, offset
+ size
, direction
);
466 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
467 enum dma_data_direction direction
)
469 BUG_ON(direction
== DMA_NONE
);
470 __dma_sync(vaddr
, size
, (int)direction
);
473 #endif /* __KERNEL__ */
474 #endif /* _ASM_DMA_MAPPING_H */