sched: improve prev_sum_exec_runtime setting
[linux-2.6/mini2440.git] / include / asm-sh / dma-mapping.h
blob84fefdaa01a53d8863fb145d0ff39e416328a597
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/mm.h>
5 #include <asm/scatterlist.h>
6 #include <asm/cacheflush.h>
7 #include <asm/io.h>
9 extern struct bus_type pci_bus_type;
11 /* arch/sh/mm/consistent.c */
12 extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
13 extern void consistent_free(void *vaddr, size_t size);
14 extern void consistent_sync(void *vaddr, size_t size, int direction);
16 #define dma_supported(dev, mask) (1)
18 static inline int dma_set_mask(struct device *dev, u64 mask)
20 if (!dev->dma_mask || !dma_supported(dev, mask))
21 return -EIO;
23 *dev->dma_mask = mask;
25 return 0;
28 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
29 dma_addr_t *dma_handle, gfp_t flag)
31 if (sh_mv.mv_consistent_alloc) {
32 void *ret;
34 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
35 if (ret != NULL)
36 return ret;
39 return consistent_alloc(flag, size, dma_handle);
42 static inline void dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle)
45 if (sh_mv.mv_consistent_free) {
46 int ret;
48 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
49 if (ret == 0)
50 return;
53 consistent_free(vaddr, size);
56 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
57 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
58 #define dma_is_consistent(d, h) (1)
60 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
61 enum dma_data_direction dir)
63 consistent_sync(vaddr, size, (int)dir);
66 static inline dma_addr_t dma_map_single(struct device *dev,
67 void *ptr, size_t size,
68 enum dma_data_direction dir)
70 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
71 if (dev->bus == &pci_bus_type)
72 return virt_to_phys(ptr);
73 #endif
74 dma_cache_sync(dev, ptr, size, dir);
76 return virt_to_phys(ptr);
79 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
81 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
84 int i;
86 for (i = 0; i < nents; i++) {
87 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
88 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
89 sg[i].length, dir);
90 #endif
91 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
94 return nents;
97 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
99 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
100 unsigned long offset, size_t size,
101 enum dma_data_direction dir)
103 return dma_map_single(dev, page_address(page) + offset, size, dir);
106 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
107 size_t size, enum dma_data_direction dir)
109 dma_unmap_single(dev, dma_address, size, dir);
112 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
113 size_t size, enum dma_data_direction dir)
115 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
116 if (dev->bus == &pci_bus_type)
117 return;
118 #endif
119 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
122 static inline void dma_sync_single_range(struct device *dev,
123 dma_addr_t dma_handle,
124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
127 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
128 if (dev->bus == &pci_bus_type)
129 return;
130 #endif
131 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
134 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
135 int nelems, enum dma_data_direction dir)
137 int i;
139 for (i = 0; i < nelems; i++) {
140 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
141 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
142 sg[i].length, dir);
143 #endif
144 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
148 static inline void dma_sync_single_for_cpu(struct device *dev,
149 dma_addr_t dma_handle, size_t size,
150 enum dma_data_direction dir)
152 dma_sync_single(dev, dma_handle, size, dir);
155 static inline void dma_sync_single_for_device(struct device *dev,
156 dma_addr_t dma_handle,
157 size_t size,
158 enum dma_data_direction dir)
160 dma_sync_single(dev, dma_handle, size, dir);
163 static inline void dma_sync_single_range_for_cpu(struct device *dev,
164 dma_addr_t dma_handle,
165 unsigned long offset,
166 size_t size,
167 enum dma_data_direction direction)
169 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
172 static inline void dma_sync_single_range_for_device(struct device *dev,
173 dma_addr_t dma_handle,
174 unsigned long offset,
175 size_t size,
176 enum dma_data_direction direction)
178 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
182 static inline void dma_sync_sg_for_cpu(struct device *dev,
183 struct scatterlist *sg, int nelems,
184 enum dma_data_direction dir)
186 dma_sync_sg(dev, sg, nelems, dir);
189 static inline void dma_sync_sg_for_device(struct device *dev,
190 struct scatterlist *sg, int nelems,
191 enum dma_data_direction dir)
193 dma_sync_sg(dev, sg, nelems, dir);
197 static inline int dma_get_cache_alignment(void)
200 * Each processor family will define its own L1_CACHE_SHIFT,
201 * L1_CACHE_BYTES wraps to this, so this is always safe.
203 return L1_CACHE_BYTES;
206 static inline int dma_mapping_error(dma_addr_t dma_addr)
208 return dma_addr == 0;
210 #endif /* __ASM_SH_DMA_MAPPING_H */