2 * linux/arch/arm/mm/consistent.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/memory.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sizes.h>
26 /* Sanity check size */
27 #if (CONSISTENT_DMA_SIZE % SZ_2M)
28 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
31 #define CONSISTENT_END (0xffe00000)
32 #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
34 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
35 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
36 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
40 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
42 static pte_t
*consistent_pte
[NUM_CONSISTENT_PTES
];
43 static DEFINE_SPINLOCK(consistent_lock
);
46 * VM region handling support.
48 * This should become something generic, handling VM region allocations for
49 * vmalloc and similar (ioremap, module space, etc).
51 * I envisage vmalloc()'s supporting vm_struct becoming:
54 * struct vm_region region;
55 * unsigned long flags;
56 * struct page **pages;
57 * unsigned int nr_pages;
58 * unsigned long phys_addr;
61 * get_vm_area() would then call vm_region_alloc with an appropriate
62 * struct vm_region head (eg):
64 * struct vm_region vmalloc_head = {
65 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
66 * .vm_start = VMALLOC_START,
67 * .vm_end = VMALLOC_END,
70 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
71 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
72 * would have to initialise this each time prior to calling vm_region_alloc().
75 struct list_head vm_list
;
76 unsigned long vm_start
;
78 struct page
*vm_pages
;
82 static struct vm_region consistent_head
= {
83 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
84 .vm_start
= CONSISTENT_BASE
,
85 .vm_end
= CONSISTENT_END
,
88 static struct vm_region
*
89 vm_region_alloc(struct vm_region
*head
, size_t size
, gfp_t gfp
)
91 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
93 struct vm_region
*c
, *new;
95 new = kmalloc(sizeof(struct vm_region
), gfp
);
99 spin_lock_irqsave(&consistent_lock
, flags
);
101 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
102 if ((addr
+ size
) < addr
)
104 if ((addr
+ size
) <= c
->vm_start
)
113 * Insert this entry _before_ the one we found.
115 list_add_tail(&new->vm_list
, &c
->vm_list
);
116 new->vm_start
= addr
;
117 new->vm_end
= addr
+ size
;
120 spin_unlock_irqrestore(&consistent_lock
, flags
);
124 spin_unlock_irqrestore(&consistent_lock
, flags
);
130 static struct vm_region
*vm_region_find(struct vm_region
*head
, unsigned long addr
)
134 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
135 if (c
->vm_active
&& c
->vm_start
== addr
)
143 #ifdef CONFIG_HUGETLB_PAGE
144 #error ARM Coherent DMA allocator does not (yet) support huge TLB
148 __dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
,
154 u64 mask
= ISA_DMA_THRESHOLD
, limit
;
156 if (!consistent_pte
[0]) {
157 printk(KERN_ERR
"%s: not initialised\n", __func__
);
163 mask
= dev
->coherent_dma_mask
;
166 * Sanity check the DMA mask - it must be non-zero, and
167 * must be able to be satisfied by a DMA allocation.
170 dev_warn(dev
, "coherent DMA mask is unset\n");
174 if ((~mask
) & ISA_DMA_THRESHOLD
) {
175 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
176 "than system GFP_DMA mask %#llx\n",
177 mask
, (unsigned long long)ISA_DMA_THRESHOLD
);
183 * Sanity check the allocation size.
185 size
= PAGE_ALIGN(size
);
186 limit
= (mask
+ 1) & ~mask
;
187 if ((limit
&& size
>= limit
) ||
188 size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
189 printk(KERN_WARNING
"coherent allocation too big "
190 "(requested %#x mask %#llx)\n", size
, mask
);
194 order
= get_order(size
);
196 if (mask
!= 0xffffffff)
199 page
= alloc_pages(gfp
, order
);
204 * Invalidate any data that might be lurking in the
205 * kernel direct-mapped region for device DMA.
208 void *ptr
= page_address(page
);
209 memset(ptr
, 0, size
);
210 dmac_flush_range(ptr
, ptr
+ size
);
211 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
215 * Allocate a virtual address in the consistent mapping region.
217 c
= vm_region_alloc(&consistent_head
, size
,
218 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
221 struct page
*end
= page
+ (1 << order
);
222 int idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
223 u32 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
225 pte
= consistent_pte
[idx
] + off
;
228 split_page(page
, order
);
231 * Set the "dma handle"
233 *handle
= page_to_dma(dev
, page
);
236 BUG_ON(!pte_none(*pte
));
239 * x86 does not mark the pages reserved...
241 SetPageReserved(page
);
242 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
246 if (off
>= PTRS_PER_PTE
) {
248 pte
= consistent_pte
[++idx
];
250 } while (size
-= PAGE_SIZE
);
253 * Free the otherwise unused pages.
260 return (void *)c
->vm_start
;
264 __free_pages(page
, order
);
271 * Allocate DMA-coherent memory space and return both the kernel remapped
272 * virtual and bus address for that space.
275 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
277 if (arch_is_coherent()) {
280 virt
= kmalloc(size
, gfp
);
283 *handle
= virt_to_dma(dev
, virt
);
288 return __dma_alloc(dev
, size
, handle
, gfp
,
289 pgprot_noncached(pgprot_kernel
));
291 EXPORT_SYMBOL(dma_alloc_coherent
);
294 * Allocate a writecombining region, in much the same way as
295 * dma_alloc_coherent above.
298 dma_alloc_writecombine(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
300 return __dma_alloc(dev
, size
, handle
, gfp
,
301 pgprot_writecombine(pgprot_kernel
));
303 EXPORT_SYMBOL(dma_alloc_writecombine
);
305 static int dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
306 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
308 unsigned long flags
, user_size
, kern_size
;
312 user_size
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
314 spin_lock_irqsave(&consistent_lock
, flags
);
315 c
= vm_region_find(&consistent_head
, (unsigned long)cpu_addr
);
316 spin_unlock_irqrestore(&consistent_lock
, flags
);
319 unsigned long off
= vma
->vm_pgoff
;
321 kern_size
= (c
->vm_end
- c
->vm_start
) >> PAGE_SHIFT
;
323 if (off
< kern_size
&&
324 user_size
<= (kern_size
- off
)) {
325 vma
->vm_flags
|= VM_RESERVED
;
326 ret
= remap_pfn_range(vma
, vma
->vm_start
,
327 page_to_pfn(c
->vm_pages
) + off
,
328 user_size
<< PAGE_SHIFT
,
336 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
337 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
339 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
340 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
342 EXPORT_SYMBOL(dma_mmap_coherent
);
344 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
345 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
347 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
348 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
350 EXPORT_SYMBOL(dma_mmap_writecombine
);
353 * free a page as defined by the above mapping.
354 * Must not be called with IRQs disabled.
356 void dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
, dma_addr_t handle
)
359 unsigned long flags
, addr
;
364 WARN_ON(irqs_disabled());
366 if (arch_is_coherent()) {
371 size
= PAGE_ALIGN(size
);
373 spin_lock_irqsave(&consistent_lock
, flags
);
374 c
= vm_region_find(&consistent_head
, (unsigned long)cpu_addr
);
379 spin_unlock_irqrestore(&consistent_lock
, flags
);
381 if ((c
->vm_end
- c
->vm_start
) != size
) {
382 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
383 __func__
, c
->vm_end
- c
->vm_start
, size
);
385 size
= c
->vm_end
- c
->vm_start
;
388 idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
389 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
390 ptep
= consistent_pte
[idx
] + off
;
393 pte_t pte
= ptep_get_and_clear(&init_mm
, addr
, ptep
);
399 if (off
>= PTRS_PER_PTE
) {
401 ptep
= consistent_pte
[++idx
];
404 if (!pte_none(pte
) && pte_present(pte
)) {
407 if (pfn_valid(pfn
)) {
408 struct page
*page
= pfn_to_page(pfn
);
411 * x86 does not mark the pages reserved...
413 ClearPageReserved(page
);
420 printk(KERN_CRIT
"%s: bad page in kernel page table\n",
422 } while (size
-= PAGE_SIZE
);
424 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
426 spin_lock_irqsave(&consistent_lock
, flags
);
427 list_del(&c
->vm_list
);
428 spin_unlock_irqrestore(&consistent_lock
, flags
);
434 spin_unlock_irqrestore(&consistent_lock
, flags
);
435 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
439 EXPORT_SYMBOL(dma_free_coherent
);
442 * Initialise the consistent memory allocation.
444 static int __init
consistent_init(void)
450 u32 base
= CONSISTENT_BASE
;
453 pgd
= pgd_offset(&init_mm
, base
);
454 pmd
= pmd_alloc(&init_mm
, pgd
, base
);
456 printk(KERN_ERR
"%s: no pmd tables\n", __func__
);
460 WARN_ON(!pmd_none(*pmd
));
462 pte
= pte_alloc_kernel(pmd
, base
);
464 printk(KERN_ERR
"%s: no pte tables\n", __func__
);
469 consistent_pte
[i
++] = pte
;
470 base
+= (1 << PGDIR_SHIFT
);
471 } while (base
< CONSISTENT_END
);
476 core_initcall(consistent_init
);
479 * Make an area consistent for devices.
480 * Note: Drivers should NOT use this function directly, as it will break
481 * platforms with CONFIG_DMABOUNCE.
482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
484 void dma_cache_maint(const void *start
, size_t size
, int direction
)
486 const void *end
= start
+ size
;
488 BUG_ON(!virt_addr_valid(start
) || !virt_addr_valid(end
- 1));
491 case DMA_FROM_DEVICE
: /* invalidate only */
492 dmac_inv_range(start
, end
);
493 outer_inv_range(__pa(start
), __pa(end
));
495 case DMA_TO_DEVICE
: /* writeback only */
496 dmac_clean_range(start
, end
);
497 outer_clean_range(__pa(start
), __pa(end
));
499 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
500 dmac_flush_range(start
, end
);
501 outer_flush_range(__pa(start
), __pa(end
));
507 EXPORT_SYMBOL(dma_cache_maint
);