1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/config.h>
6 #include <asm/scatterlist.h>
9 /* arch/sh/mm/consistent.c */
10 extern void *consistent_alloc(int gfp
, size_t size
, dma_addr_t
*handle
);
11 extern void consistent_free(void *vaddr
, size_t size
);
12 extern void consistent_sync(void *vaddr
, size_t size
, int direction
);
14 #ifdef CONFIG_SH_DREAMCAST
16 extern struct bus_type pci_bus_type
;
17 extern void *__pci_alloc_consistent(struct pci_dev
*hwdev
, size_t size
,
18 dma_addr_t
*dma_handle
);
19 extern void __pci_free_consistent(struct pci_dev
*hwdev
, size_t size
,
20 void *vaddr
, dma_addr_t dma_handle
);
23 #define dma_supported(dev, mask) (1)
25 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
27 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
30 *dev
->dma_mask
= mask
;
35 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
36 dma_addr_t
*dma_handle
, int flag
)
39 * Some platforms have special pci_alloc_consistent() implementations,
40 * in these instances we can't use the generic consistent_alloc().
42 #ifdef CONFIG_SH_DREAMCAST
43 if (dev
&& dev
->bus
== &pci_bus_type
)
44 return __pci_alloc_consistent(NULL
, size
, dma_handle
);
46 if (sh_mv
.mv_consistent_alloc
)
47 return sh_mv
.mv_consistent_alloc(dev
, size
, dma_handle
, flag
);
49 return consistent_alloc(flag
, size
, dma_handle
);
52 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
53 void *vaddr
, dma_addr_t dma_handle
)
56 * Same note as above applies to pci_free_consistent()..
58 #ifdef CONFIG_SH_DREAMCAST
59 if (dev
&& dev
->bus
== &pci_bus_type
) {
60 __pci_free_consistent(NULL
, size
, vaddr
, dma_handle
);
65 if (sh_mv
.mv_consistent_free
) {
66 sh_mv
.mv_consistent_free(dev
, size
, vaddr
, dma_handle
);
70 consistent_free(vaddr
, size
);
73 static inline void dma_cache_sync(void *vaddr
, size_t size
,
74 enum dma_data_direction dir
)
76 consistent_sync(vaddr
, size
, (int)dir
);
79 static inline dma_addr_t
dma_map_single(struct device
*dev
,
80 void *ptr
, size_t size
,
81 enum dma_data_direction dir
)
83 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 if (dev
->bus
== &pci_bus_type
)
85 return virt_to_bus(ptr
);
87 dma_cache_sync(ptr
, size
, dir
);
89 return virt_to_bus(ptr
);
92 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
94 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
95 int nents
, enum dma_data_direction dir
)
99 for (i
= 0; i
< nents
; i
++) {
100 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
101 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
104 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
110 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
112 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
113 unsigned long offset
, size_t size
,
114 enum dma_data_direction dir
)
116 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
119 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
120 size_t size
, enum dma_data_direction dir
)
122 dma_unmap_single(dev
, dma_address
, size
, dir
);
125 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
126 size_t size
, enum dma_data_direction dir
)
128 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
129 if (dev
->bus
== &pci_bus_type
)
132 dma_cache_sync(bus_to_virt(dma_handle
), size
, dir
);
135 static inline void dma_sync_single_range(struct device
*dev
,
136 dma_addr_t dma_handle
,
137 unsigned long offset
, size_t size
,
138 enum dma_data_direction dir
)
140 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
141 if (dev
->bus
== &pci_bus_type
)
144 dma_cache_sync(bus_to_virt(dma_handle
) + offset
, size
, dir
);
147 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
148 int nelems
, enum dma_data_direction dir
)
152 for (i
= 0; i
< nelems
; i
++) {
153 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
154 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
157 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
161 static inline void dma_sync_single_for_cpu(struct device
*dev
,
162 dma_addr_t dma_handle
, size_t size
,
163 enum dma_data_direction dir
)
164 __attribute__ ((alias("dma_sync_single")));
166 static inline void dma_sync_single_for_device(struct device
*dev
,
167 dma_addr_t dma_handle
, size_t size
,
168 enum dma_data_direction dir
)
169 __attribute__ ((alias("dma_sync_single")));
171 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
172 struct scatterlist
*sg
, int nelems
,
173 enum dma_data_direction dir
)
174 __attribute__ ((alias("dma_sync_sg")));
176 static inline void dma_sync_sg_for_device(struct device
*dev
,
177 struct scatterlist
*sg
, int nelems
,
178 enum dma_data_direction dir
)
179 __attribute__ ((alias("dma_sync_sg")));
181 static inline int dma_get_cache_alignment(void)
184 * Each processor family will define its own L1_CACHE_SHIFT,
185 * L1_CACHE_BYTES wraps to this, so this is always safe.
187 return L1_CACHE_BYTES
;
190 static inline int dma_mapping_error(dma_addr_t dma_addr
)
192 return dma_addr
== 0;
195 #endif /* __ASM_SH_DMA_MAPPING_H */