2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <asm/scatterlist.h>
18 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20 #ifdef CONFIG_NOT_COHERENT_CACHE
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
28 extern void *__dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
29 extern void __dma_free_coherent(size_t size
, void *vaddr
);
30 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
31 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
32 size_t size
, int direction
);
34 #else /* ! CONFIG_NOT_COHERENT_CACHE */
36 * Cache coherent cores.
39 #define __dma_alloc_coherent(gfp, size, handle) NULL
40 #define __dma_free_coherent(size, addr) ((void)0)
41 #define __dma_sync(addr, size, rw) ((void)0)
42 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
44 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
50 struct dma_mapping_ops
{
51 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
52 dma_addr_t
*dma_handle
, gfp_t flag
);
53 void (*free_coherent
)(struct device
*dev
, size_t size
,
54 void *vaddr
, dma_addr_t dma_handle
);
55 dma_addr_t (*map_single
)(struct device
*dev
, void *ptr
,
56 size_t size
, enum dma_data_direction direction
);
57 void (*unmap_single
)(struct device
*dev
, dma_addr_t dma_addr
,
58 size_t size
, enum dma_data_direction direction
);
59 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
60 int nents
, enum dma_data_direction direction
);
61 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
62 int nents
, enum dma_data_direction direction
);
63 int (*dma_supported
)(struct device
*dev
, u64 mask
);
64 int (*dac_dma_supported
)(struct device
*dev
, u64 mask
);
65 int (*set_dma_mask
)(struct device
*dev
, u64 dma_mask
);
68 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
70 /* We don't handle the NULL dev case for ISA for now. We could
71 * do it via an out of line call but it is not needed for now. The
72 * only ISA DMA device we support is the floppy and we have a hack
73 * in the floppy driver directly to get a device for us.
75 if (unlikely(dev
== NULL
|| dev
->archdata
.dma_ops
== NULL
))
77 return dev
->archdata
.dma_ops
;
80 static inline int dma_supported(struct device
*dev
, u64 mask
)
82 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
84 if (unlikely(dma_ops
== NULL
))
86 if (dma_ops
->dma_supported
== NULL
)
88 return dma_ops
->dma_supported(dev
, mask
);
91 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
93 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
95 if (unlikely(dma_ops
== NULL
))
97 if (dma_ops
->set_dma_mask
!= NULL
)
98 return dma_ops
->set_dma_mask(dev
, dma_mask
);
99 if (!dev
->dma_mask
|| !dma_supported(dev
, *dev
->dma_mask
))
101 *dev
->dma_mask
= dma_mask
;
105 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
106 dma_addr_t
*dma_handle
, gfp_t flag
)
108 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
111 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, flag
);
114 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
115 void *cpu_addr
, dma_addr_t dma_handle
)
117 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
120 dma_ops
->free_coherent(dev
, size
, cpu_addr
, dma_handle
);
123 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
125 enum dma_data_direction direction
)
127 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
130 return dma_ops
->map_single(dev
, cpu_addr
, size
, direction
);
133 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
135 enum dma_data_direction direction
)
137 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
140 dma_ops
->unmap_single(dev
, dma_addr
, size
, direction
);
143 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
144 unsigned long offset
, size_t size
,
145 enum dma_data_direction direction
)
147 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
150 return dma_ops
->map_single(dev
, page_address(page
) + offset
, size
,
154 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
156 enum dma_data_direction direction
)
158 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
161 dma_ops
->unmap_single(dev
, dma_address
, size
, direction
);
164 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
165 int nents
, enum dma_data_direction direction
)
167 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
170 return dma_ops
->map_sg(dev
, sg
, nents
, direction
);
173 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
175 enum dma_data_direction direction
)
177 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
180 dma_ops
->unmap_sg(dev
, sg
, nhwentries
, direction
);
185 * Available generic sets of operations
187 extern struct dma_mapping_ops dma_iommu_ops
;
188 extern struct dma_mapping_ops dma_direct_ops
;
190 extern unsigned long dma_direct_offset
;
192 #else /* CONFIG_PPC64 */
194 #define dma_supported(dev, mask) (1)
196 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
198 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
201 *dev
->dma_mask
= dma_mask
;
206 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
207 dma_addr_t
* dma_handle
,
210 #ifdef CONFIG_NOT_COHERENT_CACHE
211 return __dma_alloc_coherent(size
, dma_handle
, gfp
);
214 /* ignore region specifiers */
215 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
217 if (dev
== NULL
|| dev
->coherent_dma_mask
< 0xffffffff)
220 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
223 memset(ret
, 0, size
);
224 *dma_handle
= virt_to_bus(ret
);
232 dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
233 dma_addr_t dma_handle
)
235 #ifdef CONFIG_NOT_COHERENT_CACHE
236 __dma_free_coherent(size
, vaddr
);
238 free_pages((unsigned long)vaddr
, get_order(size
));
242 static inline dma_addr_t
243 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
244 enum dma_data_direction direction
)
246 BUG_ON(direction
== DMA_NONE
);
248 __dma_sync(ptr
, size
, direction
);
250 return virt_to_bus(ptr
);
254 #define dma_unmap_single(dev, addr, size, dir) ((void)0)
256 static inline dma_addr_t
257 dma_map_page(struct device
*dev
, struct page
*page
,
258 unsigned long offset
, size_t size
,
259 enum dma_data_direction direction
)
261 BUG_ON(direction
== DMA_NONE
);
263 __dma_sync_page(page
, offset
, size
, direction
);
265 return page_to_bus(page
) + offset
;
269 #define dma_unmap_page(dev, handle, size, dir) ((void)0)
272 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
273 enum dma_data_direction direction
)
277 BUG_ON(direction
== DMA_NONE
);
279 for (i
= 0; i
< nents
; i
++, sg
++) {
281 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
282 sg
->dma_address
= page_to_bus(sg
->page
) + sg
->offset
;
288 /* We don't do anything here. */
289 #define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
291 #endif /* CONFIG_PPC64 */
293 static inline void dma_sync_single_for_cpu(struct device
*dev
,
294 dma_addr_t dma_handle
, size_t size
,
295 enum dma_data_direction direction
)
297 BUG_ON(direction
== DMA_NONE
);
298 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
301 static inline void dma_sync_single_for_device(struct device
*dev
,
302 dma_addr_t dma_handle
, size_t size
,
303 enum dma_data_direction direction
)
305 BUG_ON(direction
== DMA_NONE
);
306 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
309 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
310 struct scatterlist
*sg
, int nents
,
311 enum dma_data_direction direction
)
315 BUG_ON(direction
== DMA_NONE
);
317 for (i
= 0; i
< nents
; i
++, sg
++)
318 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
321 static inline void dma_sync_sg_for_device(struct device
*dev
,
322 struct scatterlist
*sg
, int nents
,
323 enum dma_data_direction direction
)
327 BUG_ON(direction
== DMA_NONE
);
329 for (i
= 0; i
< nents
; i
++, sg
++)
330 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
333 static inline int dma_mapping_error(dma_addr_t dma_addr
)
336 return (dma_addr
== DMA_ERROR_CODE
);
342 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
343 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
344 #ifdef CONFIG_NOT_COHERENT_CACHE
345 #define dma_is_consistent(d, h) (0)
347 #define dma_is_consistent(d, h) (1)
350 static inline int dma_get_cache_alignment(void)
353 /* no easy way to get cache size on all processors, so return
354 * the maximum possible, to be safe */
355 return (1 << INTERNODE_CACHE_SHIFT
);
358 * Each processor family will define its own L1_CACHE_SHIFT,
359 * L1_CACHE_BYTES wraps to this, so this is always safe.
361 return L1_CACHE_BYTES
;
365 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
366 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
367 enum dma_data_direction direction
)
369 /* just sync everything for now */
370 dma_sync_single_for_cpu(dev
, dma_handle
, offset
+ size
, direction
);
373 static inline void dma_sync_single_range_for_device(struct device
*dev
,
374 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
375 enum dma_data_direction direction
)
377 /* just sync everything for now */
378 dma_sync_single_for_device(dev
, dma_handle
, offset
+ size
, direction
);
381 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
382 enum dma_data_direction direction
)
384 BUG_ON(direction
== DMA_NONE
);
385 __dma_sync(vaddr
, size
, (int)direction
);
388 #endif /* __KERNEL__ */
389 #endif /* _ASM_DMA_MAPPING_H */