added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / ia64 / include / asm / dma-mapping.h
blob36c0009dbece5d29d80ac8253edb64e6cd98867b
1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
4 /*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
12 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
14 extern struct dma_map_ops *dma_ops;
15 extern struct ia64_machine_vector ia64_mv;
16 extern void set_iommu_machvec(void);
18 extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
19 enum dma_data_direction);
20 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
21 enum dma_data_direction);
23 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
24 dma_addr_t *daddr, gfp_t gfp)
26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
27 return ops->alloc_coherent(dev, size, daddr, gfp);
30 static inline void dma_free_coherent(struct device *dev, size_t size,
31 void *caddr, dma_addr_t daddr)
33 struct dma_map_ops *ops = platform_dma_get_ops(dev);
34 ops->free_coherent(dev, size, caddr, daddr);
37 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
38 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 static inline dma_addr_t dma_map_single_attrs(struct device *dev,
41 void *caddr, size_t size,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs)
45 struct dma_map_ops *ops = platform_dma_get_ops(dev);
46 return ops->map_page(dev, virt_to_page(caddr),
47 (unsigned long)caddr & ~PAGE_MASK, size,
48 dir, attrs);
51 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
52 size_t size,
53 enum dma_data_direction dir,
54 struct dma_attrs *attrs)
56 struct dma_map_ops *ops = platform_dma_get_ops(dev);
57 ops->unmap_page(dev, daddr, size, dir, attrs);
60 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
61 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
63 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
64 int nents, enum dma_data_direction dir,
65 struct dma_attrs *attrs)
67 struct dma_map_ops *ops = platform_dma_get_ops(dev);
68 return ops->map_sg(dev, sgl, nents, dir, attrs);
71 static inline void dma_unmap_sg_attrs(struct device *dev,
72 struct scatterlist *sgl, int nents,
73 enum dma_data_direction dir,
74 struct dma_attrs *attrs)
76 struct dma_map_ops *ops = platform_dma_get_ops(dev);
77 ops->unmap_sg(dev, sgl, nents, dir, attrs);
80 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
81 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
83 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
84 size_t size,
85 enum dma_data_direction dir)
87 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 ops->sync_single_for_cpu(dev, daddr, size, dir);
91 static inline void dma_sync_sg_for_cpu(struct device *dev,
92 struct scatterlist *sgl,
93 int nents, enum dma_data_direction dir)
95 struct dma_map_ops *ops = platform_dma_get_ops(dev);
96 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
99 static inline void dma_sync_single_for_device(struct device *dev,
100 dma_addr_t daddr,
101 size_t size,
102 enum dma_data_direction dir)
104 struct dma_map_ops *ops = platform_dma_get_ops(dev);
105 ops->sync_single_for_device(dev, daddr, size, dir);
108 static inline void dma_sync_sg_for_device(struct device *dev,
109 struct scatterlist *sgl,
110 int nents,
111 enum dma_data_direction dir)
113 struct dma_map_ops *ops = platform_dma_get_ops(dev);
114 ops->sync_sg_for_device(dev, sgl, nents, dir);
117 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
119 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 return ops->mapping_error(dev, daddr);
123 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
124 size_t offset, size_t size,
125 enum dma_data_direction dir)
127 struct dma_map_ops *ops = platform_dma_get_ops(dev);
128 return ops->map_page(dev, page, offset, size, dir, NULL);
131 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
132 size_t size, enum dma_data_direction dir)
134 dma_unmap_single(dev, addr, size, dir);
138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
139 * See Documentation/DMA-API.txt for details.
142 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
143 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
144 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
145 dma_sync_single_for_device(dev, dma_handle, size, dir)
147 static inline int dma_supported(struct device *dev, u64 mask)
149 struct dma_map_ops *ops = platform_dma_get_ops(dev);
150 return ops->dma_supported(dev, mask);
153 static inline int
154 dma_set_mask (struct device *dev, u64 mask)
156 if (!dev->dma_mask || !dma_supported(dev, mask))
157 return -EIO;
158 *dev->dma_mask = mask;
159 return 0;
162 extern int dma_get_cache_alignment(void);
164 static inline void
165 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
166 enum dma_data_direction dir)
169 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
170 * ensure that dma_cache_sync() enforces order, hence the mb().
172 mb();
175 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
177 #endif /* _ASM_IA64_DMA_MAPPING_H */