1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
22 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
27 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
32 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
37 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
42 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
48 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
53 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
59 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
64 #define DMA_ATTR_NO_WARN (1UL << 8)
67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
71 #define DMA_ATTR_PRIVILEGED (1UL << 9)
74 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
80 void* (*alloc
)(struct device
*dev
, size_t size
,
81 dma_addr_t
*dma_handle
, gfp_t gfp
,
83 void (*free
)(struct device
*dev
, size_t size
,
84 void *vaddr
, dma_addr_t dma_handle
,
86 int (*mmap
)(struct device
*, struct vm_area_struct
*,
87 void *, dma_addr_t
, size_t,
90 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
91 dma_addr_t
, size_t, unsigned long attrs
);
93 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
94 unsigned long offset
, size_t size
,
95 enum dma_data_direction dir
,
97 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
98 size_t size
, enum dma_data_direction dir
,
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
104 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
105 int nents
, enum dma_data_direction dir
,
106 unsigned long attrs
);
107 void (*unmap_sg
)(struct device
*dev
,
108 struct scatterlist
*sg
, int nents
,
109 enum dma_data_direction dir
,
110 unsigned long attrs
);
111 dma_addr_t (*map_resource
)(struct device
*dev
, phys_addr_t phys_addr
,
112 size_t size
, enum dma_data_direction dir
,
113 unsigned long attrs
);
114 void (*unmap_resource
)(struct device
*dev
, dma_addr_t dma_handle
,
115 size_t size
, enum dma_data_direction dir
,
116 unsigned long attrs
);
117 void (*sync_single_for_cpu
)(struct device
*dev
,
118 dma_addr_t dma_handle
, size_t size
,
119 enum dma_data_direction dir
);
120 void (*sync_single_for_device
)(struct device
*dev
,
121 dma_addr_t dma_handle
, size_t size
,
122 enum dma_data_direction dir
);
123 void (*sync_sg_for_cpu
)(struct device
*dev
,
124 struct scatterlist
*sg
, int nents
,
125 enum dma_data_direction dir
);
126 void (*sync_sg_for_device
)(struct device
*dev
,
127 struct scatterlist
*sg
, int nents
,
128 enum dma_data_direction dir
);
129 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
130 int (*dma_supported
)(struct device
*dev
, u64 mask
);
131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
132 u64 (*get_required_mask
)(struct device
*dev
);
137 extern const struct dma_map_ops dma_noop_ops
;
138 extern const struct dma_map_ops dma_virt_ops
;
140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
142 #define DMA_MASK_NONE 0x0ULL
144 static inline int valid_dma_direction(int dma_direction
)
146 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
147 (dma_direction
== DMA_TO_DEVICE
) ||
148 (dma_direction
== DMA_FROM_DEVICE
));
151 static inline int is_device_dma_capable(struct device
*dev
)
153 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
161 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
162 dma_addr_t
*dma_handle
, void **ret
);
163 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
);
165 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
166 void *cpu_addr
, size_t size
, int *ret
);
168 void *dma_alloc_from_global_coherent(ssize_t size
, dma_addr_t
*dma_handle
);
169 int dma_release_from_global_coherent(int order
, void *vaddr
);
170 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *cpu_addr
,
171 size_t size
, int *ret
);
174 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
175 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
176 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
178 static inline void *dma_alloc_from_global_coherent(ssize_t size
,
179 dma_addr_t
*dma_handle
)
184 static inline int dma_release_from_global_coherent(int order
, void *vaddr
)
189 static inline int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
,
190 void *cpu_addr
, size_t size
,
195 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
197 #ifdef CONFIG_HAS_DMA
198 #include <asm/dma-mapping.h>
199 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
201 if (dev
&& dev
->dma_ops
)
203 return get_arch_dma_ops(dev
? dev
->bus
: NULL
);
206 static inline void set_dma_ops(struct device
*dev
,
207 const struct dma_map_ops
*dma_ops
)
209 dev
->dma_ops
= dma_ops
;
213 * Define the dma api to allow compilation but not linking of
214 * dma dependent code. Code that depends on the dma-mapping
215 * API needs to set 'depends on HAS_DMA' in its Kconfig
217 extern const struct dma_map_ops bad_dma_ops
;
218 static inline const struct dma_map_ops
*get_dma_ops(struct device
*dev
)
224 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
226 enum dma_data_direction dir
,
229 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
232 kmemcheck_mark_initialized(ptr
, size
);
233 BUG_ON(!valid_dma_direction(dir
));
234 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
235 offset_in_page(ptr
), size
,
237 debug_dma_map_page(dev
, virt_to_page(ptr
),
238 offset_in_page(ptr
), size
,
243 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
245 enum dma_data_direction dir
,
248 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
250 BUG_ON(!valid_dma_direction(dir
));
252 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
253 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
257 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
258 * It should never return a value < 0.
260 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
261 int nents
, enum dma_data_direction dir
,
264 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
266 struct scatterlist
*s
;
268 for_each_sg(sg
, s
, nents
, i
)
269 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
270 BUG_ON(!valid_dma_direction(dir
));
271 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
273 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
278 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
279 int nents
, enum dma_data_direction dir
,
282 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
284 BUG_ON(!valid_dma_direction(dir
));
285 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
287 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
290 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
292 size_t offset
, size_t size
,
293 enum dma_data_direction dir
,
296 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
299 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
300 BUG_ON(!valid_dma_direction(dir
));
301 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
302 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
307 static inline void dma_unmap_page_attrs(struct device
*dev
,
308 dma_addr_t addr
, size_t size
,
309 enum dma_data_direction dir
,
312 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
314 BUG_ON(!valid_dma_direction(dir
));
316 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
317 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
320 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
321 phys_addr_t phys_addr
,
323 enum dma_data_direction dir
,
326 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
329 BUG_ON(!valid_dma_direction(dir
));
331 /* Don't allow RAM to be mapped */
332 BUG_ON(pfn_valid(PHYS_PFN(phys_addr
)));
335 if (ops
->map_resource
)
336 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
338 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
);
343 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
344 size_t size
, enum dma_data_direction dir
,
347 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
349 BUG_ON(!valid_dma_direction(dir
));
350 if (ops
->unmap_resource
)
351 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
352 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
355 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
357 enum dma_data_direction dir
)
359 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
361 BUG_ON(!valid_dma_direction(dir
));
362 if (ops
->sync_single_for_cpu
)
363 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
364 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
367 static inline void dma_sync_single_for_device(struct device
*dev
,
368 dma_addr_t addr
, size_t size
,
369 enum dma_data_direction dir
)
371 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
373 BUG_ON(!valid_dma_direction(dir
));
374 if (ops
->sync_single_for_device
)
375 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
376 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
379 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
381 unsigned long offset
,
383 enum dma_data_direction dir
)
385 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
387 BUG_ON(!valid_dma_direction(dir
));
388 if (ops
->sync_single_for_cpu
)
389 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
390 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
393 static inline void dma_sync_single_range_for_device(struct device
*dev
,
395 unsigned long offset
,
397 enum dma_data_direction dir
)
399 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
401 BUG_ON(!valid_dma_direction(dir
));
402 if (ops
->sync_single_for_device
)
403 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
404 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
408 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
409 int nelems
, enum dma_data_direction dir
)
411 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
413 BUG_ON(!valid_dma_direction(dir
));
414 if (ops
->sync_sg_for_cpu
)
415 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
416 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
420 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
421 int nelems
, enum dma_data_direction dir
)
423 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
425 BUG_ON(!valid_dma_direction(dir
));
426 if (ops
->sync_sg_for_device
)
427 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
428 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
432 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
433 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
434 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
435 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
436 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
437 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
439 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
440 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
442 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
443 unsigned long vm_flags
,
444 pgprot_t prot
, const void *caller
);
446 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
447 unsigned long vm_flags
, pgprot_t prot
,
449 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
452 * dma_mmap_attrs - map a coherent DMA allocation into user space
453 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
454 * @vma: vm_area_struct describing requested user mapping
455 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
456 * @handle: device-view address returned from dma_alloc_attrs
457 * @size: size of memory originally requested in dma_alloc_attrs
458 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
460 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
461 * into user space. The coherent DMA buffer must not be freed by the
462 * driver until the user space mapping has been released.
465 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
466 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
468 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
471 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
472 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
475 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
478 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
479 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
482 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
483 dma_addr_t dma_addr
, size_t size
,
486 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
488 if (ops
->get_sgtable
)
489 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
491 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
494 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
496 #ifndef arch_dma_alloc_attrs
497 #define arch_dma_alloc_attrs(dev, flag) (true)
500 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
501 dma_addr_t
*dma_handle
, gfp_t flag
,
504 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
509 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
512 if (!arch_dma_alloc_attrs(&dev
, &flag
))
517 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
518 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
522 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
523 void *cpu_addr
, dma_addr_t dma_handle
,
526 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
529 WARN_ON(irqs_disabled());
531 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
534 if (!ops
->free
|| !cpu_addr
)
537 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
538 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
541 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
542 dma_addr_t
*dma_handle
, gfp_t flag
)
544 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, 0);
547 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
548 void *cpu_addr
, dma_addr_t dma_handle
)
550 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
553 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
555 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
557 debug_dma_mapping_error(dev
, dma_addr
);
558 if (ops
->mapping_error
)
559 return ops
->mapping_error(dev
, dma_addr
);
563 static inline void dma_check_mask(struct device
*dev
, u64 mask
)
565 if (sme_active() && (mask
< (((u64
)sme_get_me_mask() << 1) - 1)))
566 dev_warn(dev
, "SME is active, device will require DMA bounce buffers\n");
569 static inline int dma_supported(struct device
*dev
, u64 mask
)
571 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
575 if (!ops
->dma_supported
)
577 return ops
->dma_supported(dev
, mask
);
580 #ifndef HAVE_ARCH_DMA_SET_MASK
581 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
583 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
586 dma_check_mask(dev
, mask
);
588 *dev
->dma_mask
= mask
;
593 static inline u64
dma_get_mask(struct device
*dev
)
595 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
596 return *dev
->dma_mask
;
597 return DMA_BIT_MASK(32);
600 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
601 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
603 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
605 if (!dma_supported(dev
, mask
))
608 dma_check_mask(dev
, mask
);
610 dev
->coherent_dma_mask
= mask
;
616 * Set both the DMA mask and the coherent DMA mask to the same thing.
617 * Note that we don't check the return value from dma_set_coherent_mask()
618 * as the DMA API guarantees that the coherent DMA mask can be set to
619 * the same or smaller than the streaming DMA mask.
621 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
623 int rc
= dma_set_mask(dev
, mask
);
625 dma_set_coherent_mask(dev
, mask
);
630 * Similar to the above, except it deals with the case where the device
631 * does not have dev->dma_mask appropriately setup.
633 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
635 dev
->dma_mask
= &dev
->coherent_dma_mask
;
636 return dma_set_mask_and_coherent(dev
, mask
);
639 extern u64
dma_get_required_mask(struct device
*dev
);
641 #ifndef arch_setup_dma_ops
642 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
643 u64 size
, const struct iommu_ops
*iommu
,
647 #ifndef arch_teardown_dma_ops
648 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
651 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
653 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
654 return dev
->dma_parms
->max_segment_size
;
658 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
661 if (dev
->dma_parms
) {
662 dev
->dma_parms
->max_segment_size
= size
;
668 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
670 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
671 return dev
->dma_parms
->segment_boundary_mask
;
672 return DMA_BIT_MASK(32);
675 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
677 if (dev
->dma_parms
) {
678 dev
->dma_parms
->segment_boundary_mask
= mask
;
685 static inline unsigned long dma_max_pfn(struct device
*dev
)
687 return *dev
->dma_mask
>> PAGE_SHIFT
;
691 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
692 dma_addr_t
*dma_handle
, gfp_t flag
)
694 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
699 #ifdef CONFIG_HAS_DMA
700 static inline int dma_get_cache_alignment(void)
702 #ifdef ARCH_DMA_MINALIGN
703 return ARCH_DMA_MINALIGN
;
709 /* flags for the coherent memory api */
710 #define DMA_MEMORY_EXCLUSIVE 0x01
712 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
713 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
714 dma_addr_t device_addr
, size_t size
, int flags
);
715 void dma_release_declared_memory(struct device
*dev
);
716 void *dma_mark_declared_memory_occupied(struct device
*dev
,
717 dma_addr_t device_addr
, size_t size
);
720 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
721 dma_addr_t device_addr
, size_t size
, int flags
)
727 dma_release_declared_memory(struct device
*dev
)
732 dma_mark_declared_memory_occupied(struct device
*dev
,
733 dma_addr_t device_addr
, size_t size
)
735 return ERR_PTR(-EBUSY
);
737 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
739 #ifdef CONFIG_HAS_DMA
740 int dma_configure(struct device
*dev
);
741 void dma_deconfigure(struct device
*dev
);
743 static inline int dma_configure(struct device
*dev
)
748 static inline void dma_deconfigure(struct device
*dev
) {}
754 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
755 dma_addr_t
*dma_handle
, gfp_t gfp
);
756 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
757 dma_addr_t dma_handle
);
758 extern void *dmam_alloc_attrs(struct device
*dev
, size_t size
,
759 dma_addr_t
*dma_handle
, gfp_t gfp
,
760 unsigned long attrs
);
761 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
762 extern int dmam_declare_coherent_memory(struct device
*dev
,
763 phys_addr_t phys_addr
,
764 dma_addr_t device_addr
, size_t size
,
766 extern void dmam_release_declared_memory(struct device
*dev
);
767 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
768 static inline int dmam_declare_coherent_memory(struct device
*dev
,
769 phys_addr_t phys_addr
, dma_addr_t device_addr
,
770 size_t size
, gfp_t gfp
)
775 static inline void dmam_release_declared_memory(struct device
*dev
)
778 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
780 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
781 dma_addr_t
*dma_addr
, gfp_t gfp
)
783 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
,
784 DMA_ATTR_WRITE_COMBINE
);
786 #ifndef dma_alloc_writecombine
787 #define dma_alloc_writecombine dma_alloc_wc
790 static inline void dma_free_wc(struct device
*dev
, size_t size
,
791 void *cpu_addr
, dma_addr_t dma_addr
)
793 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
794 DMA_ATTR_WRITE_COMBINE
);
796 #ifndef dma_free_writecombine
797 #define dma_free_writecombine dma_free_wc
800 static inline int dma_mmap_wc(struct device
*dev
,
801 struct vm_area_struct
*vma
,
802 void *cpu_addr
, dma_addr_t dma_addr
,
805 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
806 DMA_ATTR_WRITE_COMBINE
);
808 #ifndef dma_mmap_writecombine
809 #define dma_mmap_writecombine dma_mmap_wc
812 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
813 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
814 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
815 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
816 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
817 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
818 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
820 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
821 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
822 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
823 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
824 #define dma_unmap_len(PTR, LEN_NAME) (0)
825 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)