1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
11 #include <asm/swiotlb.h>
12 #include <asm-generic/dma-coherent.h>
14 extern dma_addr_t bad_dma_address
;
15 extern int iommu_merge
;
16 extern struct device x86_dma_fallback_dev
;
17 extern int panic_on_overflow
;
19 struct dma_mapping_ops
{
20 int (*mapping_error
)(struct device
*dev
,
22 void* (*alloc_coherent
)(struct device
*dev
, size_t size
,
23 dma_addr_t
*dma_handle
, gfp_t gfp
);
24 void (*free_coherent
)(struct device
*dev
, size_t size
,
25 void *vaddr
, dma_addr_t dma_handle
);
26 dma_addr_t (*map_single
)(struct device
*hwdev
, phys_addr_t ptr
,
27 size_t size
, int direction
);
28 void (*unmap_single
)(struct device
*dev
, dma_addr_t addr
,
29 size_t size
, int direction
);
30 void (*sync_single_for_cpu
)(struct device
*hwdev
,
31 dma_addr_t dma_handle
, size_t size
,
33 void (*sync_single_for_device
)(struct device
*hwdev
,
34 dma_addr_t dma_handle
, size_t size
,
36 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
37 dma_addr_t dma_handle
, unsigned long offset
,
38 size_t size
, int direction
);
39 void (*sync_single_range_for_device
)(struct device
*hwdev
,
40 dma_addr_t dma_handle
, unsigned long offset
,
41 size_t size
, int direction
);
42 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
43 struct scatterlist
*sg
, int nelems
,
45 void (*sync_sg_for_device
)(struct device
*hwdev
,
46 struct scatterlist
*sg
, int nelems
,
48 int (*map_sg
)(struct device
*hwdev
, struct scatterlist
*sg
,
49 int nents
, int direction
);
50 void (*unmap_sg
)(struct device
*hwdev
,
51 struct scatterlist
*sg
, int nents
,
53 int (*dma_supported
)(struct device
*hwdev
, u64 mask
);
57 extern struct dma_mapping_ops
*dma_ops
;
59 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
64 if (unlikely(!dev
) || !dev
->archdata
.dma_ops
)
67 return dev
->archdata
.dma_ops
;
68 #endif /* _ASM_X86_DMA_MAPPING_H */
71 /* Make sure we keep the same behaviour */
72 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
75 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
76 if (ops
->mapping_error
)
77 return ops
->mapping_error(dev
, dma_addr
);
80 return (dma_addr
== bad_dma_address
);
83 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
84 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
85 #define dma_is_consistent(d, h) (1)
87 extern int dma_supported(struct device
*hwdev
, u64 mask
);
88 extern int dma_set_mask(struct device
*dev
, u64 mask
);
90 extern void *dma_generic_alloc_coherent(struct device
*dev
, size_t size
,
91 dma_addr_t
*dma_addr
, gfp_t flag
);
93 static inline dma_addr_t
94 dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
,
97 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
99 BUG_ON(!valid_dma_direction(direction
));
100 return ops
->map_single(hwdev
, virt_to_phys(ptr
), size
, direction
);
104 dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
107 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
109 BUG_ON(!valid_dma_direction(direction
));
110 if (ops
->unmap_single
)
111 ops
->unmap_single(dev
, addr
, size
, direction
);
115 dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
116 int nents
, int direction
)
118 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
120 BUG_ON(!valid_dma_direction(direction
));
121 return ops
->map_sg(hwdev
, sg
, nents
, direction
);
125 dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
,
128 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
130 BUG_ON(!valid_dma_direction(direction
));
132 ops
->unmap_sg(hwdev
, sg
, nents
, direction
);
136 dma_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
137 size_t size
, int direction
)
139 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
141 BUG_ON(!valid_dma_direction(direction
));
142 if (ops
->sync_single_for_cpu
)
143 ops
->sync_single_for_cpu(hwdev
, dma_handle
, size
, direction
);
144 flush_write_buffers();
148 dma_sync_single_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
149 size_t size
, int direction
)
151 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
153 BUG_ON(!valid_dma_direction(direction
));
154 if (ops
->sync_single_for_device
)
155 ops
->sync_single_for_device(hwdev
, dma_handle
, size
, direction
);
156 flush_write_buffers();
160 dma_sync_single_range_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
161 unsigned long offset
, size_t size
, int direction
)
163 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
165 BUG_ON(!valid_dma_direction(direction
));
166 if (ops
->sync_single_range_for_cpu
)
167 ops
->sync_single_range_for_cpu(hwdev
, dma_handle
, offset
,
169 flush_write_buffers();
173 dma_sync_single_range_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
174 unsigned long offset
, size_t size
,
177 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
179 BUG_ON(!valid_dma_direction(direction
));
180 if (ops
->sync_single_range_for_device
)
181 ops
->sync_single_range_for_device(hwdev
, dma_handle
,
182 offset
, size
, direction
);
183 flush_write_buffers();
187 dma_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
188 int nelems
, int direction
)
190 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
192 BUG_ON(!valid_dma_direction(direction
));
193 if (ops
->sync_sg_for_cpu
)
194 ops
->sync_sg_for_cpu(hwdev
, sg
, nelems
, direction
);
195 flush_write_buffers();
199 dma_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
200 int nelems
, int direction
)
202 struct dma_mapping_ops
*ops
= get_dma_ops(hwdev
);
204 BUG_ON(!valid_dma_direction(direction
));
205 if (ops
->sync_sg_for_device
)
206 ops
->sync_sg_for_device(hwdev
, sg
, nelems
, direction
);
208 flush_write_buffers();
211 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
212 size_t offset
, size_t size
,
215 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
217 BUG_ON(!valid_dma_direction(direction
));
218 return ops
->map_single(dev
, page_to_phys(page
) + offset
,
222 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
223 size_t size
, int direction
)
225 dma_unmap_single(dev
, addr
, size
, direction
);
229 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
230 enum dma_data_direction dir
)
232 flush_write_buffers();
235 static inline int dma_get_cache_alignment(void)
237 /* no easy way to get cache size on all x86, so return the
238 * maximum possible, to be safe */
239 return boot_cpu_data
.x86_clflush_size
;
242 static inline unsigned long dma_alloc_coherent_mask(struct device
*dev
,
245 unsigned long dma_mask
= 0;
247 dma_mask
= dev
->coherent_dma_mask
;
249 dma_mask
= (gfp
& GFP_DMA
) ? DMA_24BIT_MASK
: DMA_32BIT_MASK
;
254 static inline gfp_t
dma_alloc_coherent_gfp_flags(struct device
*dev
, gfp_t gfp
)
256 unsigned long dma_mask
= dma_alloc_coherent_mask(dev
, gfp
);
258 if (dma_mask
<= DMA_24BIT_MASK
)
261 if (dma_mask
<= DMA_32BIT_MASK
&& !(gfp
& GFP_DMA
))
268 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
271 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
274 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
276 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &memory
))
280 dev
= &x86_dma_fallback_dev
;
284 if (!is_device_dma_capable(dev
))
287 if (!ops
->alloc_coherent
)
290 return ops
->alloc_coherent(dev
, size
, dma_handle
,
291 dma_alloc_coherent_gfp_flags(dev
, gfp
));
294 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
295 void *vaddr
, dma_addr_t bus
)
297 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
299 WARN_ON(irqs_disabled()); /* for portability */
301 if (dma_release_from_coherent(dev
, get_order(size
), vaddr
))
304 if (ops
->free_coherent
)
305 ops
->free_coherent(dev
, size
, vaddr
, bus
);