1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
8 #include <asm/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
15 dma_addr_t
*dma_handle
, gfp_t flag
);
17 void dma_free_coherent(struct device
*dev
, size_t size
,
18 void *vaddr
, dma_addr_t dma_handle
);
20 static inline dma_addr_t
21 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
22 enum dma_data_direction direction
)
24 if (direction
== DMA_NONE
)
27 flush_write_buffers();
28 return virt_to_phys(ptr
);
32 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
33 enum dma_data_direction direction
)
35 if (direction
== DMA_NONE
)
40 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
41 enum dma_data_direction direction
)
45 if (direction
== DMA_NONE
)
47 WARN_ON(nents
== 0 || sg
[0].length
== 0);
49 for (i
= 0; i
< nents
; i
++ ) {
52 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
55 flush_write_buffers();
59 static inline dma_addr_t
60 dma_map_page(struct device
*dev
, struct page
*page
, unsigned long offset
,
61 size_t size
, enum dma_data_direction direction
)
63 BUG_ON(direction
== DMA_NONE
);
64 return page_to_phys(page
) + offset
;
68 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
69 enum dma_data_direction direction
)
71 BUG_ON(direction
== DMA_NONE
);
76 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
77 enum dma_data_direction direction
)
79 BUG_ON(direction
== DMA_NONE
);
83 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
84 enum dma_data_direction direction
)
89 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
90 enum dma_data_direction direction
)
92 flush_write_buffers();
96 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
97 unsigned long offset
, size_t size
,
98 enum dma_data_direction direction
)
103 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
104 unsigned long offset
, size_t size
,
105 enum dma_data_direction direction
)
107 flush_write_buffers();
111 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
112 enum dma_data_direction direction
)
117 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
118 enum dma_data_direction direction
)
120 flush_write_buffers();
124 dma_mapping_error(dma_addr_t dma_addr
)
130 dma_supported(struct device
*dev
, u64 mask
)
133 * we fall back to GFP_DMA when the mask isn't all 1s,
134 * so we can't guarantee allocations that must be
135 * within a tighter range than GFP_DMA..
137 if(mask
< 0x00ffffff)
144 dma_set_mask(struct device
*dev
, u64 mask
)
146 if(!dev
->dma_mask
|| !dma_supported(dev
, mask
))
149 *dev
->dma_mask
= mask
;
155 dma_get_cache_alignment(void)
157 /* no easy way to get cache size on all x86, so return the
158 * maximum possible, to be safe */
159 return (1 << INTERNODE_CACHE_SHIFT
);
162 #define dma_is_consistent(d) (1)
165 dma_cache_sync(void *vaddr
, size_t size
,
166 enum dma_data_direction direction
)
168 flush_write_buffers();
171 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
173 dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
174 dma_addr_t device_addr
, size_t size
, int flags
);
177 dma_release_declared_memory(struct device
*dev
);
180 dma_mark_declared_memory_occupied(struct device
*dev
,
181 dma_addr_t device_addr
, size_t size
);