1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
5 #include <asm/scatterlist.h>
9 extern void *consistent_alloc(struct pci_dev
*hwdev
, size_t size
,
10 dma_addr_t
*dma_handle
);
11 extern void consistent_free(struct pci_dev
*hwdev
, size_t size
,
12 void *vaddr
, dma_addr_t dma_handle
);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
18 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
21 *dev
->dma_mask
= mask
;
26 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t flag
)
29 return consistent_alloc(NULL
, size
, dma_handle
);
32 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
33 void *vaddr
, dma_addr_t dma_handle
)
35 consistent_free(NULL
, size
, vaddr
, dma_handle
);
38 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
39 enum dma_data_direction dir
)
41 dma_cache_wback_inv((unsigned long)vaddr
, size
);
44 static inline dma_addr_t
dma_map_single(struct device
*dev
,
45 void *ptr
, size_t size
,
46 enum dma_data_direction dir
)
48 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
49 if (dev
->bus
== &pci_bus_type
)
50 return virt_to_bus(ptr
);
52 dma_cache_sync(ptr
, size
, dir
);
54 return virt_to_bus(ptr
);
57 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
59 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
60 int nents
, enum dma_data_direction dir
)
64 for (i
= 0; i
< nents
; i
++) {
65 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
66 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
69 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
75 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
77 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
78 unsigned long offset
, size_t size
,
79 enum dma_data_direction dir
)
81 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
84 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
85 size_t size
, enum dma_data_direction dir
)
87 dma_unmap_single(dev
, dma_address
, size
, dir
);
90 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
91 size_t size
, enum dma_data_direction dir
)
93 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
94 if (dev
->bus
== &pci_bus_type
)
97 dma_cache_sync(bus_to_virt(dma_handle
), size
, dir
);
100 static inline void dma_sync_single_range(struct device
*dev
,
101 dma_addr_t dma_handle
,
102 unsigned long offset
, size_t size
,
103 enum dma_data_direction dir
)
105 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
106 if (dev
->bus
== &pci_bus_type
)
109 dma_cache_sync(bus_to_virt(dma_handle
) + offset
, size
, dir
);
112 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
113 int nelems
, enum dma_data_direction dir
)
117 for (i
= 0; i
< nelems
; i
++) {
118 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
119 dma_cache_sync(page_address(sg
[i
].page
) + sg
[i
].offset
,
122 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
126 static inline void dma_sync_single_for_cpu(struct device
*dev
,
127 dma_addr_t dma_handle
, size_t size
,
128 enum dma_data_direction dir
)
130 dma_sync_single(dev
, dma_handle
, size
, dir
);
133 static inline void dma_sync_single_for_device(struct device
*dev
,
134 dma_addr_t dma_handle
, size_t size
,
135 enum dma_data_direction dir
)
137 dma_sync_single(dev
, dma_handle
, size
, dir
);
140 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
141 struct scatterlist
*sg
, int nelems
,
142 enum dma_data_direction dir
)
144 dma_sync_sg(dev
, sg
, nelems
, dir
);
147 static inline void dma_sync_sg_for_device(struct device
*dev
,
148 struct scatterlist
*sg
, int nelems
,
149 enum dma_data_direction dir
)
151 dma_sync_sg(dev
, sg
, nelems
, dir
);
154 static inline int dma_get_cache_alignment(void)
157 * Each processor family will define its own L1_CACHE_SHIFT,
158 * L1_CACHE_BYTES wraps to this, so this is always safe.
160 return L1_CACHE_BYTES
;
163 static inline int dma_mapping_error(dma_addr_t dma_addr
)
165 return dma_addr
== 0;
168 #endif /* __ASM_SH_DMA_MAPPING_H */