1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
5 #include <asm/scatterlist.h>
9 extern void *consistent_alloc(struct pci_dev
*hwdev
, size_t size
,
10 dma_addr_t
*dma_handle
);
11 extern void consistent_free(struct pci_dev
*hwdev
, size_t size
,
12 void *vaddr
, dma_addr_t dma_handle
);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
18 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
21 *dev
->dma_mask
= mask
;
26 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t flag
)
29 return consistent_alloc(NULL
, size
, dma_handle
);
32 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
33 void *vaddr
, dma_addr_t dma_handle
)
35 consistent_free(NULL
, size
, vaddr
, dma_handle
);
38 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 #define dma_is_consistent(d, h) (1)
42 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
43 enum dma_data_direction dir
)
45 unsigned long s
= (unsigned long) vaddr
& L1_CACHE_ALIGN_MASK
;
46 unsigned long e
= (vaddr
+ size
) & L1_CACHE_ALIGN_MASK
;
48 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
49 asm volatile ("ocbp %0, 0" : : "r" (s
));
52 static inline dma_addr_t
dma_map_single(struct device
*dev
,
53 void *ptr
, size_t size
,
54 enum dma_data_direction dir
)
56 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
57 if (dev
->bus
== &pci_bus_type
)
58 return virt_to_phys(ptr
);
60 dma_cache_sync(dev
, ptr
, size
, dir
);
62 return virt_to_phys(ptr
);
65 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
67 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
68 int nents
, enum dma_data_direction dir
)
72 for (i
= 0; i
< nents
; i
++) {
73 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev
, page_address(sg
[i
].page
) + sg
[i
].offset
,
77 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
83 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
85 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
86 unsigned long offset
, size_t size
,
87 enum dma_data_direction dir
)
89 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
92 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
93 size_t size
, enum dma_data_direction dir
)
95 dma_unmap_single(dev
, dma_address
, size
, dir
);
98 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
99 size_t size
, enum dma_data_direction dir
)
101 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
102 if (dev
->bus
== &pci_bus_type
)
105 dma_cache_sync(dev
, phys_to_virt(dma_handle
), size
, dir
);
108 static inline void dma_sync_single_range(struct device
*dev
,
109 dma_addr_t dma_handle
,
110 unsigned long offset
, size_t size
,
111 enum dma_data_direction dir
)
113 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
114 if (dev
->bus
== &pci_bus_type
)
117 dma_cache_sync(dev
, phys_to_virt(dma_handle
) + offset
, size
, dir
);
120 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
121 int nelems
, enum dma_data_direction dir
)
125 for (i
= 0; i
< nelems
; i
++) {
126 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
127 dma_cache_sync(dev
, page_address(sg
[i
].page
) + sg
[i
].offset
,
130 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
134 static inline void dma_sync_single_for_cpu(struct device
*dev
,
135 dma_addr_t dma_handle
, size_t size
,
136 enum dma_data_direction dir
)
138 dma_sync_single(dev
, dma_handle
, size
, dir
);
141 static inline void dma_sync_single_for_device(struct device
*dev
,
142 dma_addr_t dma_handle
, size_t size
,
143 enum dma_data_direction dir
)
145 dma_sync_single(dev
, dma_handle
, size
, dir
);
148 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
149 dma_addr_t dma_handle
,
150 unsigned long offset
,
152 enum dma_data_direction direction
)
154 dma_sync_single_for_cpu(dev
, dma_handle
+offset
, size
, direction
);
157 static inline void dma_sync_single_range_for_device(struct device
*dev
,
158 dma_addr_t dma_handle
,
159 unsigned long offset
,
161 enum dma_data_direction direction
)
163 dma_sync_single_for_device(dev
, dma_handle
+offset
, size
, direction
);
166 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
167 struct scatterlist
*sg
, int nelems
,
168 enum dma_data_direction dir
)
170 dma_sync_sg(dev
, sg
, nelems
, dir
);
173 static inline void dma_sync_sg_for_device(struct device
*dev
,
174 struct scatterlist
*sg
, int nelems
,
175 enum dma_data_direction dir
)
177 dma_sync_sg(dev
, sg
, nelems
, dir
);
180 static inline int dma_get_cache_alignment(void)
183 * Each processor family will define its own L1_CACHE_SHIFT,
184 * L1_CACHE_BYTES wraps to this, so this is always safe.
186 return L1_CACHE_BYTES
;
189 static inline int dma_mapping_error(dma_addr_t dma_addr
)
191 return dma_addr
== 0;
194 #endif /* __ASM_SH_DMA_MAPPING_H */