1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
12 struct dma_mapping_ops
{
13 int (*mapping_error
)(struct device
*dev
,
15 void* (*alloc_coherent
)(struct device
*dev
, size_t size
,
16 dma_addr_t
*dma_handle
, gfp_t gfp
);
17 void (*free_coherent
)(struct device
*dev
, size_t size
,
18 void *vaddr
, dma_addr_t dma_handle
);
19 dma_addr_t (*map_single
)(struct device
*hwdev
, unsigned long ptr
,
20 size_t size
, int direction
);
21 void (*unmap_single
)(struct device
*dev
, dma_addr_t addr
,
22 size_t size
, int direction
);
23 void (*sync_single_for_cpu
)(struct device
*hwdev
,
24 dma_addr_t dma_handle
, size_t size
,
26 void (*sync_single_for_device
)(struct device
*hwdev
,
27 dma_addr_t dma_handle
, size_t size
,
29 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
30 dma_addr_t dma_handle
, unsigned long offset
,
31 size_t size
, int direction
);
32 void (*sync_single_range_for_device
)(struct device
*hwdev
,
33 dma_addr_t dma_handle
, unsigned long offset
,
34 size_t size
, int direction
);
35 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
36 struct scatterlist
*sg
, int nelems
,
38 void (*sync_sg_for_device
)(struct device
*hwdev
,
39 struct scatterlist
*sg
, int nelems
,
41 int (*map_sg
)(struct device
*hwdev
, struct scatterlist
*sg
,
42 int nents
, int direction
);
43 void (*unmap_sg
)(struct device
*hwdev
,
44 struct scatterlist
*sg
, int nents
,
46 int (*dma_supported_op
)(struct device
*hwdev
, u64 mask
);
50 extern struct dma_mapping_ops
*dma_ops
;
51 extern struct ia64_machine_vector ia64_mv
;
52 extern void set_iommu_machvec(void);
54 #define dma_alloc_coherent(dev, size, handle, gfp) \
55 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
57 /* coherent mem. is cheap */
59 dma_alloc_noncoherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
62 return dma_alloc_coherent(dev
, size
, dma_handle
, flag
);
64 #define dma_free_coherent platform_dma_free_coherent
66 dma_free_noncoherent(struct device
*dev
, size_t size
, void *cpu_addr
,
67 dma_addr_t dma_handle
)
69 dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
71 #define dma_map_single_attrs platform_dma_map_single_attrs
72 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
75 return dma_map_single_attrs(dev
, cpu_addr
, size
, dir
, NULL
);
77 #define dma_map_sg_attrs platform_dma_map_sg_attrs
78 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
81 return dma_map_sg_attrs(dev
, sgl
, nents
, dir
, NULL
);
83 #define dma_unmap_single_attrs platform_dma_unmap_single_attrs
84 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t cpu_addr
,
87 return dma_unmap_single_attrs(dev
, cpu_addr
, size
, dir
, NULL
);
89 #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
90 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
93 return dma_unmap_sg_attrs(dev
, sgl
, nents
, dir
, NULL
);
95 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
96 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
97 #define dma_sync_single_for_device platform_dma_sync_single_for_device
98 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device
99 #define dma_mapping_error platform_dma_mapping_error
101 #define dma_map_page(dev, pg, off, size, dir) \
102 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
103 #define dma_unmap_page(dev, dma_addr, size, dir) \
104 dma_unmap_single(dev, dma_addr, size, dir)
107 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
108 * See Documentation/DMA-API.txt for details.
111 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
112 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
113 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
114 dma_sync_single_for_device(dev, dma_handle, size, dir)
116 #define dma_supported platform_dma_supported
119 dma_set_mask (struct device
*dev
, u64 mask
)
121 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
123 *dev
->dma_mask
= mask
;
127 extern int dma_get_cache_alignment(void);
130 dma_cache_sync (struct device
*dev
, void *vaddr
, size_t size
,
131 enum dma_data_direction dir
)
134 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
135 * ensure that dma_cache_sync() enforces order, hence the mb().
140 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
142 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
149 #endif /* _ASM_IA64_DMA_MAPPING_H */