1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
11 #include <asm/swiotlb.h>
13 extern dma_addr_t bad_dma_address
;
14 extern int iommu_merge
;
15 extern struct device fallback_dev
;
16 extern int panic_on_overflow
;
17 extern int force_iommu
;
19 struct dma_mapping_ops
{
20 int (*mapping_error
)(struct device
*dev
,
22 void* (*alloc_coherent
)(struct device
*dev
, size_t size
,
23 dma_addr_t
*dma_handle
, gfp_t gfp
);
24 void (*free_coherent
)(struct device
*dev
, size_t size
,
25 void *vaddr
, dma_addr_t dma_handle
);
26 dma_addr_t (*map_single
)(struct device
*hwdev
, phys_addr_t ptr
,
27 size_t size
, int direction
);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple
)(struct device
*hwdev
, phys_addr_t ptr
,
30 size_t size
, int direction
);
31 void (*unmap_single
)(struct device
*dev
, dma_addr_t addr
,
32 size_t size
, int direction
);
33 void (*sync_single_for_cpu
)(struct device
*hwdev
,
34 dma_addr_t dma_handle
, size_t size
,
36 void (*sync_single_for_device
)(struct device
*hwdev
,
37 dma_addr_t dma_handle
, size_t size
,
39 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
40 dma_addr_t dma_handle
, unsigned long offset
,
41 size_t size
, int direction
);
42 void (*sync_single_range_for_device
)(struct device
*hwdev
,
43 dma_addr_t dma_handle
, unsigned long offset
,
44 size_t size
, int direction
);
45 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
46 struct scatterlist
*sg
, int nelems
,
48 void (*sync_sg_for_device
)(struct device
*hwdev
,
49 struct scatterlist
*sg
, int nelems
,
51 int (*map_sg
)(struct device
*hwdev
, struct scatterlist
*sg
,
52 int nents
, int direction
);
53 void (*unmap_sg
)(struct device
*hwdev
,
54 struct scatterlist
*sg
, int nents
,
56 int (*dma_supported
)(struct device
*hwdev
, u64 mask
);
60 extern struct dma_mapping_ops
*dma_ops
;
62 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
67 if (unlikely(!dev
) || !dev
->archdata
.dma_ops
)
70 return dev
->archdata
.dma_ops
;
74 /* Make sure we keep the same behaviour */
75 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
80 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
81 if (ops
->mapping_error
)
82 return ops
->mapping_error(dev
, dma_addr
);
84 return (dma_addr
== bad_dma_address
);
88 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
89 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
91 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
92 dma_addr_t
*dma_handle
, gfp_t flag
);
94 void dma_free_coherent(struct device
*dev
, size_t size
,
95 void *vaddr
, dma_addr_t dma_handle
);
98 extern int dma_supported(struct device
*hwdev
, u64 mask
);
99 extern int dma_set_mask(struct device
*dev
, u64 mask
);
101 static inline dma_addr_t
102 dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
,
105 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
107 BUG_ON(!valid_dma_direction(direction
));
108 return ops
->map_single(hwdev
, virt_to_phys(ptr
), size
, direction
);
112 dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
115 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
117 BUG_ON(!valid_dma_direction(direction
));
118 if (ops
->unmap_single
)
119 ops
->unmap_single(dev
, addr
, size
, direction
);
123 dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
124 int nents
, int direction
)
126 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
128 BUG_ON(!valid_dma_direction(direction
));
129 return ops
->map_sg(hwdev
, sg
, nents
, direction
);
133 dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
,
136 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
138 BUG_ON(!valid_dma_direction(direction
));
140 ops
->unmap_sg(hwdev
, sg
, nents
, direction
);
144 dma_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
145 size_t size
, int direction
)
147 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
149 BUG_ON(!valid_dma_direction(direction
));
150 if (ops
->sync_single_for_cpu
)
151 ops
->sync_single_for_cpu(hwdev
, dma_handle
, size
, direction
);
152 flush_write_buffers();
156 dma_sync_single_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
157 size_t size
, int direction
)
159 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
161 BUG_ON(!valid_dma_direction(direction
));
162 if (ops
->sync_single_for_device
)
163 ops
->sync_single_for_device(hwdev
, dma_handle
, size
, direction
);
164 flush_write_buffers();
168 dma_sync_single_range_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
169 unsigned long offset
, size_t size
, int direction
)
171 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
173 BUG_ON(!valid_dma_direction(direction
));
174 if (ops
->sync_single_range_for_cpu
)
175 ops
->sync_single_range_for_cpu(hwdev
, dma_handle
, offset
,
177 flush_write_buffers();
181 dma_sync_single_range_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
182 unsigned long offset
, size_t size
,
185 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
187 BUG_ON(!valid_dma_direction(direction
));
188 if (ops
->sync_single_range_for_device
)
189 ops
->sync_single_range_for_device(hwdev
, dma_handle
,
190 offset
, size
, direction
);
191 flush_write_buffers();
195 dma_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
196 int nelems
, int direction
)
198 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
200 BUG_ON(!valid_dma_direction(direction
));
201 if (ops
->sync_sg_for_cpu
)
202 ops
->sync_sg_for_cpu(hwdev
, sg
, nelems
, direction
);
203 flush_write_buffers();
207 dma_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
208 int nelems
, int direction
)
210 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
212 BUG_ON(!valid_dma_direction(direction
));
213 if (ops
->sync_sg_for_device
)
214 ops
->sync_sg_for_device(hwdev
, sg
, nelems
, direction
);
216 flush_write_buffers();
219 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
220 size_t offset
, size_t size
,
223 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
225 BUG_ON(!valid_dma_direction(direction
));
226 return ops
->map_single(dev
, page_to_phys(page
) + offset
,
230 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
231 size_t size
, int direction
)
233 dma_unmap_single(dev
, addr
, size
, direction
);
237 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
238 enum dma_data_direction dir
)
240 flush_write_buffers();
243 static inline int dma_get_cache_alignment(void)
245 /* no easy way to get cache size on all x86, so return the
246 * maximum possible, to be safe */
247 return boot_cpu_data
.x86_clflush_size
;
250 #define dma_is_consistent(d, h) (1)
253 # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
254 struct dma_coherent_mem
{
259 unsigned long *bitmap
;
263 dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
264 dma_addr_t device_addr
, size_t size
, int flags
);
267 dma_release_declared_memory(struct device
*dev
);
270 dma_mark_declared_memory_occupied(struct device
*dev
,
271 dma_addr_t device_addr
, size_t size
);
272 #endif /* CONFIG_X86_32 */