drm/bochs: Use new drm_fb_helper functions
[linux-2.6/btrfs-unstable.git] / include / asm-generic / dma-mapping-common.h
blob940d5ec122c96e5a72173b9db2d0bcff44d39405
1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
4 #include <linux/kmemcheck.h>
5 #include <linux/bug.h>
6 #include <linux/scatterlist.h>
7 #include <linux/dma-debug.h>
8 #include <linux/dma-attrs.h>
10 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
11 size_t size,
12 enum dma_data_direction dir,
13 struct dma_attrs *attrs)
15 struct dma_map_ops *ops = get_dma_ops(dev);
16 dma_addr_t addr;
18 kmemcheck_mark_initialized(ptr, size);
19 BUG_ON(!valid_dma_direction(dir));
20 addr = ops->map_page(dev, virt_to_page(ptr),
21 (unsigned long)ptr & ~PAGE_MASK, size,
22 dir, attrs);
23 debug_dma_map_page(dev, virt_to_page(ptr),
24 (unsigned long)ptr & ~PAGE_MASK, size,
25 dir, addr, true);
26 return addr;
29 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
30 size_t size,
31 enum dma_data_direction dir,
32 struct dma_attrs *attrs)
34 struct dma_map_ops *ops = get_dma_ops(dev);
36 BUG_ON(!valid_dma_direction(dir));
37 if (ops->unmap_page)
38 ops->unmap_page(dev, addr, size, dir, attrs);
39 debug_dma_unmap_page(dev, addr, size, dir, true);
43 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
44 * It should never return a value < 0.
46 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
47 int nents, enum dma_data_direction dir,
48 struct dma_attrs *attrs)
50 struct dma_map_ops *ops = get_dma_ops(dev);
51 int i, ents;
52 struct scatterlist *s;
54 for_each_sg(sg, s, nents, i)
55 kmemcheck_mark_initialized(sg_virt(s), s->length);
56 BUG_ON(!valid_dma_direction(dir));
57 ents = ops->map_sg(dev, sg, nents, dir, attrs);
58 BUG_ON(ents < 0);
59 debug_dma_map_sg(dev, sg, nents, ents, dir);
61 return ents;
64 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
65 int nents, enum dma_data_direction dir,
66 struct dma_attrs *attrs)
68 struct dma_map_ops *ops = get_dma_ops(dev);
70 BUG_ON(!valid_dma_direction(dir));
71 debug_dma_unmap_sg(dev, sg, nents, dir);
72 if (ops->unmap_sg)
73 ops->unmap_sg(dev, sg, nents, dir, attrs);
76 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
77 size_t offset, size_t size,
78 enum dma_data_direction dir)
80 struct dma_map_ops *ops = get_dma_ops(dev);
81 dma_addr_t addr;
83 kmemcheck_mark_initialized(page_address(page) + offset, size);
84 BUG_ON(!valid_dma_direction(dir));
85 addr = ops->map_page(dev, page, offset, size, dir, NULL);
86 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
88 return addr;
91 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
92 size_t size, enum dma_data_direction dir)
94 struct dma_map_ops *ops = get_dma_ops(dev);
96 BUG_ON(!valid_dma_direction(dir));
97 if (ops->unmap_page)
98 ops->unmap_page(dev, addr, size, dir, NULL);
99 debug_dma_unmap_page(dev, addr, size, dir, false);
102 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
103 size_t size,
104 enum dma_data_direction dir)
106 struct dma_map_ops *ops = get_dma_ops(dev);
108 BUG_ON(!valid_dma_direction(dir));
109 if (ops->sync_single_for_cpu)
110 ops->sync_single_for_cpu(dev, addr, size, dir);
111 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
114 static inline void dma_sync_single_for_device(struct device *dev,
115 dma_addr_t addr, size_t size,
116 enum dma_data_direction dir)
118 struct dma_map_ops *ops = get_dma_ops(dev);
120 BUG_ON(!valid_dma_direction(dir));
121 if (ops->sync_single_for_device)
122 ops->sync_single_for_device(dev, addr, size, dir);
123 debug_dma_sync_single_for_device(dev, addr, size, dir);
126 static inline void dma_sync_single_range_for_cpu(struct device *dev,
127 dma_addr_t addr,
128 unsigned long offset,
129 size_t size,
130 enum dma_data_direction dir)
132 const struct dma_map_ops *ops = get_dma_ops(dev);
134 BUG_ON(!valid_dma_direction(dir));
135 if (ops->sync_single_for_cpu)
136 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
137 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
140 static inline void dma_sync_single_range_for_device(struct device *dev,
141 dma_addr_t addr,
142 unsigned long offset,
143 size_t size,
144 enum dma_data_direction dir)
146 const struct dma_map_ops *ops = get_dma_ops(dev);
148 BUG_ON(!valid_dma_direction(dir));
149 if (ops->sync_single_for_device)
150 ops->sync_single_for_device(dev, addr + offset, size, dir);
151 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
154 static inline void
155 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
156 int nelems, enum dma_data_direction dir)
158 struct dma_map_ops *ops = get_dma_ops(dev);
160 BUG_ON(!valid_dma_direction(dir));
161 if (ops->sync_sg_for_cpu)
162 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
163 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
166 static inline void
167 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
168 int nelems, enum dma_data_direction dir)
170 struct dma_map_ops *ops = get_dma_ops(dev);
172 BUG_ON(!valid_dma_direction(dir));
173 if (ops->sync_sg_for_device)
174 ops->sync_sg_for_device(dev, sg, nelems, dir);
175 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
179 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
180 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
181 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
182 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
184 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
185 void *cpu_addr, dma_addr_t dma_addr, size_t size);
187 void *dma_common_contiguous_remap(struct page *page, size_t size,
188 unsigned long vm_flags,
189 pgprot_t prot, const void *caller);
191 void *dma_common_pages_remap(struct page **pages, size_t size,
192 unsigned long vm_flags, pgprot_t prot,
193 const void *caller);
194 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
197 * dma_mmap_attrs - map a coherent DMA allocation into user space
198 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
199 * @vma: vm_area_struct describing requested user mapping
200 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
201 * @handle: device-view address returned from dma_alloc_attrs
202 * @size: size of memory originally requested in dma_alloc_attrs
203 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
205 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
206 * into user space. The coherent DMA buffer must not be freed by the
207 * driver until the user space mapping has been released.
209 static inline int
210 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
211 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
213 struct dma_map_ops *ops = get_dma_ops(dev);
214 BUG_ON(!ops);
215 if (ops->mmap)
216 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
217 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
220 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
223 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
224 void *cpu_addr, dma_addr_t dma_addr, size_t size);
226 static inline int
227 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
228 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
230 struct dma_map_ops *ops = get_dma_ops(dev);
231 BUG_ON(!ops);
232 if (ops->get_sgtable)
233 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
234 attrs);
235 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
238 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
240 #endif