2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/memory.h>
22 #include <asm/highmem.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/sizes.h>
27 static u64
get_coherent_dma_mask(struct device
*dev
)
29 u64 mask
= ISA_DMA_THRESHOLD
;
32 mask
= dev
->coherent_dma_mask
;
35 * Sanity check the DMA mask - it must be non-zero, and
36 * must be able to be satisfied by a DMA allocation.
39 dev_warn(dev
, "coherent DMA mask is unset\n");
43 if ((~mask
) & ISA_DMA_THRESHOLD
) {
44 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
45 "than system GFP_DMA mask %#llx\n",
46 mask
, (unsigned long long)ISA_DMA_THRESHOLD
);
55 * Allocate a DMA buffer for 'dev' of size 'size' using the
56 * specified gfp mask. Note that 'size' must be page aligned.
58 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
, gfp_t gfp
)
60 unsigned long order
= get_order(size
);
61 struct page
*page
, *p
, *e
;
63 u64 mask
= get_coherent_dma_mask(dev
);
65 #ifdef CONFIG_DMA_API_DEBUG
66 u64 limit
= (mask
+ 1) & ~mask
;
67 if (limit
&& size
>= limit
) {
68 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
77 if (mask
< 0xffffffffULL
)
80 page
= alloc_pages(gfp
, order
);
85 * Now split the huge page and free the excess pages
87 split_page(page
, order
);
88 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
92 * Ensure that the allocated pages are zeroed, and that any data
93 * lurking in the kernel direct-mapped region is invalidated.
95 ptr
= page_address(page
);
97 dmac_flush_range(ptr
, ptr
+ size
);
98 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
104 * Free a DMA buffer. 'size' must be page aligned.
106 static void __dma_free_buffer(struct page
*page
, size_t size
)
108 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
117 /* Sanity check size */
118 #if (CONSISTENT_DMA_SIZE % SZ_2M)
119 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
122 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
123 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
124 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
129 static pte_t
*consistent_pte
[NUM_CONSISTENT_PTES
];
131 #include "vmregion.h"
133 static struct arm_vmregion_head consistent_head
= {
134 .vm_lock
= __SPIN_LOCK_UNLOCKED(&consistent_head
.vm_lock
),
135 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
136 .vm_start
= CONSISTENT_BASE
,
137 .vm_end
= CONSISTENT_END
,
140 #ifdef CONFIG_HUGETLB_PAGE
141 #error ARM Coherent DMA allocator does not (yet) support huge TLB
145 * Initialise the consistent memory allocation.
147 static int __init
consistent_init(void)
154 u32 base
= CONSISTENT_BASE
;
157 pgd
= pgd_offset(&init_mm
, base
);
158 pmd
= pmd_alloc(&init_mm
, pgd
, base
);
160 printk(KERN_ERR
"%s: no pmd tables\n", __func__
);
164 WARN_ON(!pmd_none(*pmd
));
166 pte
= pte_alloc_kernel(pmd
, base
);
168 printk(KERN_ERR
"%s: no pte tables\n", __func__
);
173 consistent_pte
[i
++] = pte
;
174 base
+= (1 << PGDIR_SHIFT
);
175 } while (base
< CONSISTENT_END
);
180 core_initcall(consistent_init
);
183 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
)
185 struct arm_vmregion
*c
;
189 if (!consistent_pte
[0]) {
190 printk(KERN_ERR
"%s: not initialised\n", __func__
);
196 * Align the virtual region allocation - maximum alignment is
197 * a section size, minimum is a page size. This helps reduce
198 * fragmentation of the DMA space, and also prevents allocations
199 * smaller than a section from crossing a section boundary.
202 if (bit
> SECTION_SHIFT
)
207 * Allocate a virtual address in the consistent mapping region.
209 c
= arm_vmregion_alloc(&consistent_head
, align
, size
,
210 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
213 int idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
214 u32 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
216 pte
= consistent_pte
[idx
] + off
;
220 BUG_ON(!pte_none(*pte
));
222 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
226 if (off
>= PTRS_PER_PTE
) {
228 pte
= consistent_pte
[++idx
];
230 } while (size
-= PAGE_SIZE
);
234 return (void *)c
->vm_start
;
239 static void __dma_free_remap(void *cpu_addr
, size_t size
)
241 struct arm_vmregion
*c
;
247 c
= arm_vmregion_find_remove(&consistent_head
, (unsigned long)cpu_addr
);
249 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
255 if ((c
->vm_end
- c
->vm_start
) != size
) {
256 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
257 __func__
, c
->vm_end
- c
->vm_start
, size
);
259 size
= c
->vm_end
- c
->vm_start
;
262 idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
263 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
264 ptep
= consistent_pte
[idx
] + off
;
267 pte_t pte
= ptep_get_and_clear(&init_mm
, addr
, ptep
);
272 if (off
>= PTRS_PER_PTE
) {
274 ptep
= consistent_pte
[++idx
];
277 if (pte_none(pte
) || !pte_present(pte
))
278 printk(KERN_CRIT
"%s: bad page in kernel page table\n",
280 } while (size
-= PAGE_SIZE
);
282 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
284 arm_vmregion_free(&consistent_head
, c
);
287 #else /* !CONFIG_MMU */
289 #define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
290 #define __dma_free_remap(addr, size) do { } while (0)
292 #endif /* CONFIG_MMU */
295 __dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
,
302 size
= PAGE_ALIGN(size
);
304 page
= __dma_alloc_buffer(dev
, size
, gfp
);
308 if (!arch_is_coherent())
309 addr
= __dma_alloc_remap(page
, size
, gfp
, prot
);
311 addr
= page_address(page
);
314 *handle
= page_to_dma(dev
, page
);
320 * Allocate DMA-coherent memory space and return both the kernel remapped
321 * virtual and bus address for that space.
324 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
328 if (dma_alloc_from_coherent(dev
, size
, handle
, &memory
))
331 return __dma_alloc(dev
, size
, handle
, gfp
,
332 pgprot_dmacoherent(pgprot_kernel
));
334 EXPORT_SYMBOL(dma_alloc_coherent
);
337 * Allocate a writecombining region, in much the same way as
338 * dma_alloc_coherent above.
341 dma_alloc_writecombine(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
343 return __dma_alloc(dev
, size
, handle
, gfp
,
344 pgprot_writecombine(pgprot_kernel
));
346 EXPORT_SYMBOL(dma_alloc_writecombine
);
348 static int dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
349 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
353 unsigned long user_size
, kern_size
;
354 struct arm_vmregion
*c
;
356 user_size
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
358 c
= arm_vmregion_find(&consistent_head
, (unsigned long)cpu_addr
);
360 unsigned long off
= vma
->vm_pgoff
;
362 kern_size
= (c
->vm_end
- c
->vm_start
) >> PAGE_SHIFT
;
364 if (off
< kern_size
&&
365 user_size
<= (kern_size
- off
)) {
366 ret
= remap_pfn_range(vma
, vma
->vm_start
,
367 page_to_pfn(c
->vm_pages
) + off
,
368 user_size
<< PAGE_SHIFT
,
372 #endif /* CONFIG_MMU */
377 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
378 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
380 vma
->vm_page_prot
= pgprot_dmacoherent(vma
->vm_page_prot
);
381 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
383 EXPORT_SYMBOL(dma_mmap_coherent
);
385 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
386 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
388 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
389 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
391 EXPORT_SYMBOL(dma_mmap_writecombine
);
394 * free a page as defined by the above mapping.
395 * Must not be called with IRQs disabled.
397 void dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
, dma_addr_t handle
)
399 WARN_ON(irqs_disabled());
401 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
404 size
= PAGE_ALIGN(size
);
406 if (!arch_is_coherent())
407 __dma_free_remap(cpu_addr
, size
);
409 __dma_free_buffer(dma_to_page(dev
, handle
), size
);
411 EXPORT_SYMBOL(dma_free_coherent
);
414 * Make an area consistent for devices.
415 * Note: Drivers should NOT use this function directly, as it will break
416 * platforms with CONFIG_DMABOUNCE.
417 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
419 void ___dma_single_cpu_to_dev(const void *kaddr
, size_t size
,
420 enum dma_data_direction dir
)
424 BUG_ON(!virt_addr_valid(kaddr
) || !virt_addr_valid(kaddr
+ size
- 1));
426 dmac_map_area(kaddr
, size
, dir
);
429 if (dir
== DMA_FROM_DEVICE
) {
430 outer_inv_range(paddr
, paddr
+ size
);
432 outer_clean_range(paddr
, paddr
+ size
);
434 /* FIXME: non-speculating: flush on bidirectional mappings? */
436 EXPORT_SYMBOL(___dma_single_cpu_to_dev
);
438 void ___dma_single_dev_to_cpu(const void *kaddr
, size_t size
,
439 enum dma_data_direction dir
)
441 BUG_ON(!virt_addr_valid(kaddr
) || !virt_addr_valid(kaddr
+ size
- 1));
443 /* FIXME: non-speculating: not required */
444 /* don't bother invalidating if DMA to device */
445 if (dir
!= DMA_TO_DEVICE
) {
446 unsigned long paddr
= __pa(kaddr
);
447 outer_inv_range(paddr
, paddr
+ size
);
450 dmac_unmap_area(kaddr
, size
, dir
);
452 EXPORT_SYMBOL(___dma_single_dev_to_cpu
);
454 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
455 size_t size
, enum dma_data_direction dir
,
456 void (*op
)(const void *, size_t, int))
459 * A single sg entry may refer to multiple physically contiguous
460 * pages. But we still need to process highmem pages individually.
461 * If highmem is not configured then the bulk of this loop gets
469 if (PageHighMem(page
)) {
470 if (len
+ offset
> PAGE_SIZE
) {
471 if (offset
>= PAGE_SIZE
) {
472 page
+= offset
/ PAGE_SIZE
;
475 len
= PAGE_SIZE
- offset
;
477 vaddr
= kmap_high_get(page
);
482 } else if (cache_is_vipt()) {
484 vaddr
= kmap_high_l1_vipt(page
, &saved_pte
);
485 op(vaddr
+ offset
, len
, dir
);
486 kunmap_high_l1_vipt(page
, saved_pte
);
489 vaddr
= page_address(page
) + offset
;
498 void ___dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
499 size_t size
, enum dma_data_direction dir
)
503 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
505 paddr
= page_to_phys(page
) + off
;
506 if (dir
== DMA_FROM_DEVICE
) {
507 outer_inv_range(paddr
, paddr
+ size
);
509 outer_clean_range(paddr
, paddr
+ size
);
511 /* FIXME: non-speculating: flush on bidirectional mappings? */
513 EXPORT_SYMBOL(___dma_page_cpu_to_dev
);
515 void ___dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
516 size_t size
, enum dma_data_direction dir
)
518 unsigned long paddr
= page_to_phys(page
) + off
;
520 /* FIXME: non-speculating: not required */
521 /* don't bother invalidating if DMA to device */
522 if (dir
!= DMA_TO_DEVICE
)
523 outer_inv_range(paddr
, paddr
+ size
);
525 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
528 * Mark the D-cache clean for this page to avoid extra flushing.
530 if (dir
!= DMA_TO_DEVICE
&& off
== 0 && size
>= PAGE_SIZE
)
531 set_bit(PG_dcache_clean
, &page
->flags
);
533 EXPORT_SYMBOL(___dma_page_dev_to_cpu
);
536 * dma_map_sg - map a set of SG buffers for streaming mode DMA
537 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
538 * @sg: list of buffers
539 * @nents: number of buffers to map
540 * @dir: DMA transfer direction
542 * Map a set of buffers described by scatterlist in streaming mode for DMA.
543 * This is the scatter-gather version of the dma_map_single interface.
544 * Here the scatter gather list elements are each tagged with the
545 * appropriate dma address and length. They are obtained via
546 * sg_dma_{address,length}.
548 * Device ownership issues as mentioned for dma_map_single are the same
551 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
552 enum dma_data_direction dir
)
554 struct scatterlist
*s
;
557 for_each_sg(sg
, s
, nents
, i
) {
558 s
->dma_address
= dma_map_page(dev
, sg_page(s
), s
->offset
,
560 if (dma_mapping_error(dev
, s
->dma_address
))
566 for_each_sg(sg
, s
, i
, j
)
567 dma_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
);
570 EXPORT_SYMBOL(dma_map_sg
);
573 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
574 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
575 * @sg: list of buffers
576 * @nents: number of buffers to unmap (returned from dma_map_sg)
577 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
579 * Unmap a set of streaming mode DMA translations. Again, CPU access
580 * rules concerning calls here are the same as for dma_unmap_single().
582 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
583 enum dma_data_direction dir
)
585 struct scatterlist
*s
;
588 for_each_sg(sg
, s
, nents
, i
)
589 dma_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
);
591 EXPORT_SYMBOL(dma_unmap_sg
);
594 * dma_sync_sg_for_cpu
595 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
596 * @sg: list of buffers
597 * @nents: number of buffers to map (returned from dma_map_sg)
598 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
600 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
601 int nents
, enum dma_data_direction dir
)
603 struct scatterlist
*s
;
606 for_each_sg(sg
, s
, nents
, i
) {
607 if (!dmabounce_sync_for_cpu(dev
, sg_dma_address(s
), 0,
611 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
615 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
618 * dma_sync_sg_for_device
619 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
620 * @sg: list of buffers
621 * @nents: number of buffers to map (returned from dma_map_sg)
622 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
624 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
625 int nents
, enum dma_data_direction dir
)
627 struct scatterlist
*s
;
630 for_each_sg(sg
, s
, nents
, i
) {
631 if (!dmabounce_sync_for_device(dev
, sg_dma_address(s
), 0,
635 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
,
639 EXPORT_SYMBOL(dma_sync_sg_for_device
);