GRU Driver: export is_uv_system(), zap_page_range() & follow_page()
[linux-2.6/verdex.git] / include / asm-sh / dma-mapping.h
blob627315ecdb520de2c0f8688a18934fe05c93121f
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6 #include <asm/cacheflush.h>
7 #include <asm/io.h>
8 #include <asm-generic/dma-coherent.h>
10 extern struct bus_type pci_bus_type;
12 #define dma_supported(dev, mask) (1)
14 static inline int dma_set_mask(struct device *dev, u64 mask)
16 if (!dev->dma_mask || !dma_supported(dev, mask))
17 return -EIO;
19 *dev->dma_mask = mask;
21 return 0;
24 void *dma_alloc_coherent(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, gfp_t flag);
27 void dma_free_coherent(struct device *dev, size_t size,
28 void *vaddr, dma_addr_t dma_handle);
30 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
31 enum dma_data_direction dir);
33 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
34 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
35 #define dma_is_consistent(d, h) (1)
37 static inline dma_addr_t dma_map_single(struct device *dev,
38 void *ptr, size_t size,
39 enum dma_data_direction dir)
41 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
42 if (dev->bus == &pci_bus_type)
43 return virt_to_phys(ptr);
44 #endif
45 dma_cache_sync(dev, ptr, size, dir);
47 return virt_to_phys(ptr);
50 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
52 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
53 int nents, enum dma_data_direction dir)
55 int i;
57 for (i = 0; i < nents; i++) {
58 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
59 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
60 #endif
61 sg[i].dma_address = sg_phys(&sg[i]);
64 return nents;
67 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
69 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
70 unsigned long offset, size_t size,
71 enum dma_data_direction dir)
73 return dma_map_single(dev, page_address(page) + offset, size, dir);
76 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
77 size_t size, enum dma_data_direction dir)
79 dma_unmap_single(dev, dma_address, size, dir);
82 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
83 size_t size, enum dma_data_direction dir)
85 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
86 if (dev->bus == &pci_bus_type)
87 return;
88 #endif
89 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
92 static inline void dma_sync_single_range(struct device *dev,
93 dma_addr_t dma_handle,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir)
97 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
98 if (dev->bus == &pci_bus_type)
99 return;
100 #endif
101 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
104 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
105 int nelems, enum dma_data_direction dir)
107 int i;
109 for (i = 0; i < nelems; i++) {
110 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
111 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
112 #endif
113 sg[i].dma_address = sg_phys(&sg[i]);
117 static inline void dma_sync_single_for_cpu(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir)
121 dma_sync_single(dev, dma_handle, size, dir);
124 static inline void dma_sync_single_for_device(struct device *dev,
125 dma_addr_t dma_handle,
126 size_t size,
127 enum dma_data_direction dir)
129 dma_sync_single(dev, dma_handle, size, dir);
132 static inline void dma_sync_single_range_for_cpu(struct device *dev,
133 dma_addr_t dma_handle,
134 unsigned long offset,
135 size_t size,
136 enum dma_data_direction direction)
138 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
141 static inline void dma_sync_single_range_for_device(struct device *dev,
142 dma_addr_t dma_handle,
143 unsigned long offset,
144 size_t size,
145 enum dma_data_direction direction)
147 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
151 static inline void dma_sync_sg_for_cpu(struct device *dev,
152 struct scatterlist *sg, int nelems,
153 enum dma_data_direction dir)
155 dma_sync_sg(dev, sg, nelems, dir);
158 static inline void dma_sync_sg_for_device(struct device *dev,
159 struct scatterlist *sg, int nelems,
160 enum dma_data_direction dir)
162 dma_sync_sg(dev, sg, nelems, dir);
166 static inline int dma_get_cache_alignment(void)
169 * Each processor family will define its own L1_CACHE_SHIFT,
170 * L1_CACHE_BYTES wraps to this, so this is always safe.
172 return L1_CACHE_BYTES;
175 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
177 return dma_addr == 0;
180 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
182 extern int
183 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
184 dma_addr_t device_addr, size_t size, int flags);
186 extern void
187 dma_release_declared_memory(struct device *dev);
189 extern void *
190 dma_mark_declared_memory_occupied(struct device *dev,
191 dma_addr_t device_addr, size_t size);
193 #endif /* __ASM_SH_DMA_MAPPING_H */