1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
17 extern dma_addr_t bad_dma_address
;
18 extern int iommu_merge
;
19 extern struct device x86_dma_fallback_dev
;
20 extern int panic_on_overflow
;
22 extern struct dma_map_ops
*dma_ops
;
24 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
29 if (unlikely(!dev
) || !dev
->archdata
.dma_ops
)
32 return dev
->archdata
.dma_ops
;
36 /* Make sure we keep the same behaviour */
37 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
39 struct dma_map_ops
*ops
= get_dma_ops(dev
);
40 if (ops
->mapping_error
)
41 return ops
->mapping_error(dev
, dma_addr
);
43 return (dma_addr
== bad_dma_address
);
46 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
48 #define dma_is_consistent(d, h) (1)
50 extern int dma_supported(struct device
*hwdev
, u64 mask
);
51 extern int dma_set_mask(struct device
*dev
, u64 mask
);
53 extern void *dma_generic_alloc_coherent(struct device
*dev
, size_t size
,
54 dma_addr_t
*dma_addr
, gfp_t flag
);
56 static inline dma_addr_t
57 dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
,
58 enum dma_data_direction dir
)
60 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
63 kmemcheck_mark_initialized(ptr
, size
);
64 BUG_ON(!valid_dma_direction(dir
));
65 addr
= ops
->map_page(hwdev
, virt_to_page(ptr
),
66 (unsigned long)ptr
& ~PAGE_MASK
, size
,
68 debug_dma_map_page(hwdev
, virt_to_page(ptr
),
69 (unsigned long)ptr
& ~PAGE_MASK
, size
,
75 dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
76 enum dma_data_direction dir
)
78 struct dma_map_ops
*ops
= get_dma_ops(dev
);
80 BUG_ON(!valid_dma_direction(dir
));
82 ops
->unmap_page(dev
, addr
, size
, dir
, NULL
);
83 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
87 dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
88 int nents
, enum dma_data_direction dir
)
90 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
93 struct scatterlist
*s
;
96 for_each_sg(sg
, s
, nents
, i
)
97 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
98 BUG_ON(!valid_dma_direction(dir
));
99 ents
= ops
->map_sg(hwdev
, sg
, nents
, dir
, NULL
);
100 debug_dma_map_sg(hwdev
, sg
, nents
, ents
, dir
);
106 dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
,
107 enum dma_data_direction dir
)
109 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
111 BUG_ON(!valid_dma_direction(dir
));
112 debug_dma_unmap_sg(hwdev
, sg
, nents
, dir
);
114 ops
->unmap_sg(hwdev
, sg
, nents
, dir
, NULL
);
118 dma_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
119 size_t size
, enum dma_data_direction dir
)
121 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
123 BUG_ON(!valid_dma_direction(dir
));
124 if (ops
->sync_single_for_cpu
)
125 ops
->sync_single_for_cpu(hwdev
, dma_handle
, size
, dir
);
126 debug_dma_sync_single_for_cpu(hwdev
, dma_handle
, size
, dir
);
127 flush_write_buffers();
131 dma_sync_single_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
132 size_t size
, enum dma_data_direction dir
)
134 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
136 BUG_ON(!valid_dma_direction(dir
));
137 if (ops
->sync_single_for_device
)
138 ops
->sync_single_for_device(hwdev
, dma_handle
, size
, dir
);
139 debug_dma_sync_single_for_device(hwdev
, dma_handle
, size
, dir
);
140 flush_write_buffers();
144 dma_sync_single_range_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
145 unsigned long offset
, size_t size
,
146 enum dma_data_direction dir
)
148 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
150 BUG_ON(!valid_dma_direction(dir
));
151 if (ops
->sync_single_range_for_cpu
)
152 ops
->sync_single_range_for_cpu(hwdev
, dma_handle
, offset
,
154 debug_dma_sync_single_range_for_cpu(hwdev
, dma_handle
,
156 flush_write_buffers();
160 dma_sync_single_range_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
161 unsigned long offset
, size_t size
,
162 enum dma_data_direction dir
)
164 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
166 BUG_ON(!valid_dma_direction(dir
));
167 if (ops
->sync_single_range_for_device
)
168 ops
->sync_single_range_for_device(hwdev
, dma_handle
,
170 debug_dma_sync_single_range_for_device(hwdev
, dma_handle
,
172 flush_write_buffers();
176 dma_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
177 int nelems
, enum dma_data_direction dir
)
179 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
181 BUG_ON(!valid_dma_direction(dir
));
182 if (ops
->sync_sg_for_cpu
)
183 ops
->sync_sg_for_cpu(hwdev
, sg
, nelems
, dir
);
184 debug_dma_sync_sg_for_cpu(hwdev
, sg
, nelems
, dir
);
185 flush_write_buffers();
189 dma_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
190 int nelems
, enum dma_data_direction dir
)
192 struct dma_map_ops
*ops
= get_dma_ops(hwdev
);
194 BUG_ON(!valid_dma_direction(dir
));
195 if (ops
->sync_sg_for_device
)
196 ops
->sync_sg_for_device(hwdev
, sg
, nelems
, dir
);
197 debug_dma_sync_sg_for_device(hwdev
, sg
, nelems
, dir
);
199 flush_write_buffers();
202 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
203 size_t offset
, size_t size
,
204 enum dma_data_direction dir
)
206 struct dma_map_ops
*ops
= get_dma_ops(dev
);
209 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
210 BUG_ON(!valid_dma_direction(dir
));
211 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, NULL
);
212 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
217 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
218 size_t size
, enum dma_data_direction dir
)
220 struct dma_map_ops
*ops
= get_dma_ops(dev
);
222 BUG_ON(!valid_dma_direction(dir
));
224 ops
->unmap_page(dev
, addr
, size
, dir
, NULL
);
225 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
229 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
230 enum dma_data_direction dir
)
232 flush_write_buffers();
235 static inline int dma_get_cache_alignment(void)
237 /* no easy way to get cache size on all x86, so return the
238 * maximum possible, to be safe */
239 return boot_cpu_data
.x86_clflush_size
;
242 static inline unsigned long dma_alloc_coherent_mask(struct device
*dev
,
245 unsigned long dma_mask
= 0;
247 dma_mask
= dev
->coherent_dma_mask
;
249 dma_mask
= (gfp
& GFP_DMA
) ? DMA_24BIT_MASK
: DMA_32BIT_MASK
;
254 static inline gfp_t
dma_alloc_coherent_gfp_flags(struct device
*dev
, gfp_t gfp
)
256 unsigned long dma_mask
= dma_alloc_coherent_mask(dev
, gfp
);
258 if (dma_mask
<= DMA_24BIT_MASK
)
261 if (dma_mask
<= DMA_32BIT_MASK
&& !(gfp
& GFP_DMA
))
268 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
271 struct dma_map_ops
*ops
= get_dma_ops(dev
);
274 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
276 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &memory
))
280 dev
= &x86_dma_fallback_dev
;
284 if (!is_device_dma_capable(dev
))
287 if (!ops
->alloc_coherent
)
290 memory
= ops
->alloc_coherent(dev
, size
, dma_handle
,
291 dma_alloc_coherent_gfp_flags(dev
, gfp
));
292 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, memory
);
297 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
298 void *vaddr
, dma_addr_t bus
)
300 struct dma_map_ops
*ops
= get_dma_ops(dev
);
302 WARN_ON(irqs_disabled()); /* for portability */
304 if (dma_release_from_coherent(dev
, get_order(size
), vaddr
))
307 debug_dma_free_coherent(dev
, size
, vaddr
, bus
);
308 if (ops
->free_coherent
)
309 ops
->free_coherent(dev
, size
, vaddr
, bus
);