1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/config.h>
11 #include <asm/scatterlist.h>
13 #include <asm/swiotlb.h>
15 struct dma_mapping_ops
{
16 int (*mapping_error
)(dma_addr_t dma_addr
);
17 void* (*alloc_coherent
)(struct device
*dev
, size_t size
,
18 dma_addr_t
*dma_handle
, gfp_t gfp
);
19 void (*free_coherent
)(struct device
*dev
, size_t size
,
20 void *vaddr
, dma_addr_t dma_handle
);
21 dma_addr_t (*map_single
)(struct device
*hwdev
, void *ptr
,
22 size_t size
, int direction
);
23 /* like map_single, but doesn't check the device mask */
24 dma_addr_t (*map_simple
)(struct device
*hwdev
, char *ptr
,
25 size_t size
, int direction
);
26 void (*unmap_single
)(struct device
*dev
, dma_addr_t addr
,
27 size_t size
, int direction
);
28 void (*sync_single_for_cpu
)(struct device
*hwdev
,
29 dma_addr_t dma_handle
, size_t size
,
31 void (*sync_single_for_device
)(struct device
*hwdev
,
32 dma_addr_t dma_handle
, size_t size
,
34 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
35 dma_addr_t dma_handle
, unsigned long offset
,
36 size_t size
, int direction
);
37 void (*sync_single_range_for_device
)(struct device
*hwdev
,
38 dma_addr_t dma_handle
, unsigned long offset
,
39 size_t size
, int direction
);
40 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
41 struct scatterlist
*sg
, int nelems
,
43 void (*sync_sg_for_device
)(struct device
*hwdev
,
44 struct scatterlist
*sg
, int nelems
,
46 int (*map_sg
)(struct device
*hwdev
, struct scatterlist
*sg
,
47 int nents
, int direction
);
48 void (*unmap_sg
)(struct device
*hwdev
,
49 struct scatterlist
*sg
, int nents
,
51 int (*dma_supported
)(struct device
*hwdev
, u64 mask
);
55 extern dma_addr_t bad_dma_address
;
56 extern struct dma_mapping_ops
* dma_ops
;
57 extern int iommu_merge
;
59 static inline int dma_mapping_error(dma_addr_t dma_addr
)
61 if (dma_ops
->mapping_error
)
62 return dma_ops
->mapping_error(dma_addr
);
64 return (dma_addr
== bad_dma_address
);
67 extern void *dma_alloc_coherent(struct device
*dev
, size_t size
,
68 dma_addr_t
*dma_handle
, gfp_t gfp
);
69 extern void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
70 dma_addr_t dma_handle
);
72 static inline dma_addr_t
73 dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
,
76 return dma_ops
->map_single(hwdev
, ptr
, size
, direction
);
80 dma_unmap_single(struct device
*dev
, dma_addr_t addr
,size_t size
,
83 dma_ops
->unmap_single(dev
, addr
, size
, direction
);
86 #define dma_map_page(dev,page,offset,size,dir) \
87 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
89 #define dma_unmap_page dma_unmap_single
92 dma_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
93 size_t size
, int direction
)
95 if (dma_ops
->sync_single_for_cpu
)
96 dma_ops
->sync_single_for_cpu(hwdev
, dma_handle
, size
,
98 flush_write_buffers();
102 dma_sync_single_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
103 size_t size
, int direction
)
105 if (dma_ops
->sync_single_for_device
)
106 dma_ops
->sync_single_for_device(hwdev
, dma_handle
, size
,
108 flush_write_buffers();
112 dma_sync_single_range_for_cpu(struct device
*hwdev
, dma_addr_t dma_handle
,
113 unsigned long offset
, size_t size
, int direction
)
115 if (dma_ops
->sync_single_range_for_cpu
) {
116 dma_ops
->sync_single_range_for_cpu(hwdev
, dma_handle
, offset
, size
, direction
);
119 flush_write_buffers();
123 dma_sync_single_range_for_device(struct device
*hwdev
, dma_addr_t dma_handle
,
124 unsigned long offset
, size_t size
, int direction
)
126 if (dma_ops
->sync_single_range_for_device
)
127 dma_ops
->sync_single_range_for_device(hwdev
, dma_handle
,
128 offset
, size
, direction
);
130 flush_write_buffers();
134 dma_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
135 int nelems
, int direction
)
137 if (dma_ops
->sync_sg_for_cpu
)
138 dma_ops
->sync_sg_for_cpu(hwdev
, sg
, nelems
, direction
);
139 flush_write_buffers();
143 dma_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
144 int nelems
, int direction
)
146 if (dma_ops
->sync_sg_for_device
) {
147 dma_ops
->sync_sg_for_device(hwdev
, sg
, nelems
, direction
);
150 flush_write_buffers();
154 dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
, int direction
)
156 return dma_ops
->map_sg(hwdev
, sg
, nents
, direction
);
160 dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
,
163 dma_ops
->unmap_sg(hwdev
, sg
, nents
, direction
);
166 extern int dma_supported(struct device
*hwdev
, u64 mask
);
168 /* same for gart, swiotlb, and nommu */
169 static inline int dma_get_cache_alignment(void)
171 return boot_cpu_data
.x86_clflush_size
;
174 #define dma_is_consistent(h) 1
176 extern int dma_set_mask(struct device
*dev
, u64 mask
);
179 dma_cache_sync(void *vaddr
, size_t size
, enum dma_data_direction dir
)
181 flush_write_buffers();
184 extern struct device fallback_dev
;
185 extern int panic_on_overflow
;
187 #endif /* _X8664_DMA_MAPPING_H */