1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
8 #include <asm/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
15 dma_addr_t
*dma_handle
, gfp_t flag
);
17 void dma_free_coherent(struct device
*dev
, size_t size
,
18 void *vaddr
, dma_addr_t dma_handle
);
20 static inline dma_addr_t
21 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
22 enum dma_data_direction direction
)
24 BUG_ON(!valid_dma_direction(direction
));
26 flush_write_buffers();
27 return virt_to_phys(ptr
);
31 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
32 enum dma_data_direction direction
)
34 BUG_ON(!valid_dma_direction(direction
));
38 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
39 enum dma_data_direction direction
)
43 BUG_ON(!valid_dma_direction(direction
));
44 WARN_ON(nents
== 0 || sg
[0].length
== 0);
46 for (i
= 0; i
< nents
; i
++ ) {
49 sg
[i
].dma_address
= page_to_phys(sg
[i
].page
) + sg
[i
].offset
;
52 flush_write_buffers();
56 static inline dma_addr_t
57 dma_map_page(struct device
*dev
, struct page
*page
, unsigned long offset
,
58 size_t size
, enum dma_data_direction direction
)
60 BUG_ON(!valid_dma_direction(direction
));
61 return page_to_phys(page
) + offset
;
65 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
66 enum dma_data_direction direction
)
68 BUG_ON(!valid_dma_direction(direction
));
73 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
74 enum dma_data_direction direction
)
76 BUG_ON(!valid_dma_direction(direction
));
80 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
81 enum dma_data_direction direction
)
86 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
87 enum dma_data_direction direction
)
89 flush_write_buffers();
93 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
94 unsigned long offset
, size_t size
,
95 enum dma_data_direction direction
)
100 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
101 unsigned long offset
, size_t size
,
102 enum dma_data_direction direction
)
104 flush_write_buffers();
108 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
109 enum dma_data_direction direction
)
114 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
115 enum dma_data_direction direction
)
117 flush_write_buffers();
121 dma_mapping_error(dma_addr_t dma_addr
)
127 dma_supported(struct device
*dev
, u64 mask
)
130 * we fall back to GFP_DMA when the mask isn't all 1s,
131 * so we can't guarantee allocations that must be
132 * within a tighter range than GFP_DMA..
134 if(mask
< 0x00ffffff)
141 dma_set_mask(struct device
*dev
, u64 mask
)
143 if(!dev
->dma_mask
|| !dma_supported(dev
, mask
))
146 *dev
->dma_mask
= mask
;
152 dma_get_cache_alignment(void)
154 /* no easy way to get cache size on all x86, so return the
155 * maximum possible, to be safe */
156 return (1 << INTERNODE_CACHE_SHIFT
);
159 #define dma_is_consistent(d) (1)
162 dma_cache_sync(void *vaddr
, size_t size
,
163 enum dma_data_direction direction
)
165 flush_write_buffers();
168 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
170 dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
171 dma_addr_t device_addr
, size_t size
, int flags
);
174 dma_release_declared_memory(struct device
*dev
);
177 dma_mark_declared_memory_occupied(struct device
*dev
,
178 dma_addr_t device_addr
, size_t size
);