1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/config.h>
6 #include <asm/scatterlist.h>
10 extern void *consistent_alloc(struct pci_dev
*hwdev
, size_t size
,
11 dma_addr_t
*dma_handle
);
12 extern void consistent_free(struct pci_dev
*hwdev
, size_t size
,
13 void *vaddr
, dma_addr_t dma_handle
);
15 #define dma_supported(dev, mask) (1)
17 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
19 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
22 *dev
->dma_mask
= mask
;
27 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
28 dma_addr_t
*dma_handle
, gfp_t flag
)
30 return consistent_alloc(NULL
, size
, dma_handle
);
33 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
34 void *vaddr
, dma_addr_t dma_handle
)
36 consistent_free(NULL
, size
, vaddr
, dma_handle
);
39 static inline void dma_cache_sync(void *vaddr
, size_t size
,
40 enum dma_data_direction dir
)
42 dma_cache_wback_inv((unsigned long)vaddr
, size
);
45 static inline dma_addr_t
dma_map_single(struct device
*dev
,
46 void *ptr
, size_t size
,
47 enum dma_data_direction dir
)
49 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
50 if (dev
->bus
== &pci_bus_type
)
51 return virt_to_bus(ptr
);
53 dma_cache_sync(ptr
, size
, dir
);
55 return virt_to_bus(ptr
);
58 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
60 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
61 int nents
, enum dma_data_direction dir
)
65 for (i
= 0; i
< nents
; i
++) {
66 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
67 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
70 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
76 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
78 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
79 unsigned long offset
, size_t size
,
80 enum dma_data_direction dir
)
82 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
85 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
86 size_t size
, enum dma_data_direction dir
)
88 dma_unmap_single(dev
, dma_address
, size
, dir
);
91 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
92 size_t size
, enum dma_data_direction dir
)
94 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
95 if (dev
->bus
== &pci_bus_type
)
98 dma_cache_sync(bus_to_virt(dma_handle
), size
, dir
);
101 static inline void dma_sync_single_range(struct device
*dev
,
102 dma_addr_t dma_handle
,
103 unsigned long offset
, size_t size
,
104 enum dma_data_direction dir
)
106 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
107 if (dev
->bus
== &pci_bus_type
)
110 dma_cache_sync(bus_to_virt(dma_handle
) + offset
, size
, dir
);
113 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
114 int nelems
, enum dma_data_direction dir
)
118 for (i
= 0; i
< nelems
; i
++) {
119 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
120 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
123 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
127 static inline void dma_sync_single_for_cpu(struct device
*dev
,
128 dma_addr_t dma_handle
, size_t size
,
129 enum dma_data_direction dir
)
130 __attribute__ ((alias("dma_sync_single")));
132 static inline void dma_sync_single_for_device(struct device
*dev
,
133 dma_addr_t dma_handle
, size_t size
,
134 enum dma_data_direction dir
)
135 __attribute__ ((alias("dma_sync_single")));
137 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
138 struct scatterlist
*sg
, int nelems
,
139 enum dma_data_direction dir
)
140 __attribute__ ((alias("dma_sync_sg")));
142 static inline void dma_sync_sg_for_device(struct device
*dev
,
143 struct scatterlist
*sg
, int nelems
,
144 enum dma_data_direction dir
)
145 __attribute__ ((alias("dma_sync_sg")));
147 static inline int dma_get_cache_alignment(void)
150 * Each processor family will define its own L1_CACHE_SHIFT,
151 * L1_CACHE_BYTES wraps to this, so this is always safe.
153 return L1_CACHE_BYTES
;
156 static inline int dma_mapping_error(dma_addr_t dma_addr
)
158 return dma_addr
== 0;
161 #endif /* __ASM_SH_DMA_MAPPING_H */