1 /* DMA mapping. Nothing tricky here, just virt_to_phys */
3 #ifndef _ASM_CRIS_DMA_MAPPING_H
4 #define _ASM_CRIS_DMA_MAPPING_H
7 #include <linux/kernel.h>
11 #include <asm/scatterlist.h>
13 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
14 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
17 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
18 dma_addr_t
*dma_handle
, gfp_t flag
);
20 void dma_free_coherent(struct device
*dev
, size_t size
,
21 void *vaddr
, dma_addr_t dma_handle
);
24 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
32 dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
33 dma_addr_t dma_handle
)
38 static inline dma_addr_t
39 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
40 enum dma_data_direction direction
)
42 BUG_ON(direction
== DMA_NONE
);
43 return virt_to_phys(ptr
);
47 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
48 enum dma_data_direction direction
)
50 BUG_ON(direction
== DMA_NONE
);
54 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
55 enum dma_data_direction direction
)
61 static inline dma_addr_t
62 dma_map_page(struct device
*dev
, struct page
*page
, unsigned long offset
,
63 size_t size
, enum dma_data_direction direction
)
65 BUG_ON(direction
== DMA_NONE
);
66 return page_to_phys(page
) + offset
;
70 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
71 enum dma_data_direction direction
)
73 BUG_ON(direction
== DMA_NONE
);
78 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
79 enum dma_data_direction direction
)
81 BUG_ON(direction
== DMA_NONE
);
85 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
86 enum dma_data_direction direction
)
91 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
92 enum dma_data_direction direction
)
97 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
98 unsigned long offset
, size_t size
,
99 enum dma_data_direction direction
)
104 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
105 unsigned long offset
, size_t size
,
106 enum dma_data_direction direction
)
111 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
112 enum dma_data_direction direction
)
117 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
118 enum dma_data_direction direction
)
123 dma_mapping_error(dma_addr_t dma_addr
)
129 dma_supported(struct device
*dev
, u64 mask
)
132 * we fall back to GFP_DMA when the mask isn't all 1s,
133 * so we can't guarantee allocations that must be
134 * within a tighter range than GFP_DMA..
136 if(mask
< 0x00ffffff)
143 dma_set_mask(struct device
*dev
, u64 mask
)
145 if(!dev
->dma_mask
|| !dma_supported(dev
, mask
))
148 *dev
->dma_mask
= mask
;
154 dma_get_cache_alignment(void)
156 return (1 << INTERNODE_CACHE_SHIFT
);
159 #define dma_is_consistent(d, h) (1)
162 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
163 enum dma_data_direction direction
)