Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / arch / microblaze / mm / consistent.c
blobc9a278ac795a88885d151ebdf3bd27dd559006c4
1 /*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/export.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/swap.h>
27 #include <linux/stddef.h>
28 #include <linux/vmalloc.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/bootmem.h>
32 #include <linux/highmem.h>
33 #include <linux/pci.h>
34 #include <linux/interrupt.h>
35 #include <linux/gfp.h>
36 #include <linux/dma-noncoherent.h>
38 #include <asm/pgalloc.h>
39 #include <linux/io.h>
40 #include <linux/hardirq.h>
41 #include <linux/mmu_context.h>
42 #include <asm/mmu.h>
43 #include <linux/uaccess.h>
44 #include <asm/pgtable.h>
45 #include <asm/cpuinfo.h>
46 #include <asm/tlbflush.h>
48 #ifndef CONFIG_MMU
49 /* I have to use dcache values because I can't relate on ram size */
50 # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
51 #endif
54 * Consistent memory allocators. Used for DMA devices that want to
55 * share uncached memory with the processor core.
56 * My crufty no-MMU approach is simple. In the HW platform we can optionally
57 * mirror the DDR up above the processor cacheable region. So, memory accessed
58 * in this mirror region will not be cached. It's alloced from the same
59 * pool as normal memory, but the handle we return is shifted up into the
60 * uncached region. This will no doubt cause big problems if memory allocated
61 * here is not also freed properly. -- JW
63 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
64 gfp_t gfp, unsigned long attrs)
66 unsigned long order, vaddr;
67 void *ret;
68 unsigned int i, err = 0;
69 struct page *page, *end;
71 #ifdef CONFIG_MMU
72 phys_addr_t pa;
73 struct vm_struct *area;
74 unsigned long va;
75 #endif
77 if (in_interrupt())
78 BUG();
80 /* Only allocate page size areas. */
81 size = PAGE_ALIGN(size);
82 order = get_order(size);
84 vaddr = __get_free_pages(gfp, order);
85 if (!vaddr)
86 return NULL;
89 * we need to ensure that there are no cachelines in use,
90 * or worse dirty in this area.
92 flush_dcache_range(virt_to_phys((void *)vaddr),
93 virt_to_phys((void *)vaddr) + size);
95 #ifndef CONFIG_MMU
96 ret = (void *)vaddr;
98 * Here's the magic! Note if the uncached shadow is not implemented,
99 * it's up to the calling code to also test that condition and make
100 * other arranegments, such as manually flushing the cache and so on.
102 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
103 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
104 # endif
105 if ((unsigned int)ret > cpuinfo.dcache_base &&
106 (unsigned int)ret < cpuinfo.dcache_high)
107 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
109 /* dma_handle is same as physical (shadowed) address */
110 *dma_handle = (dma_addr_t)ret;
111 #else
112 /* Allocate some common virtual space to map the new pages. */
113 area = get_vm_area(size, VM_ALLOC);
114 if (!area) {
115 free_pages(vaddr, order);
116 return NULL;
118 va = (unsigned long) area->addr;
119 ret = (void *)va;
121 /* This gives us the real physical address of the first page. */
122 *dma_handle = pa = __virt_to_phys(vaddr);
123 #endif
126 * free wasted pages. We skip the first page since we know
127 * that it will have count = 1 and won't require freeing.
128 * We also mark the pages in use as reserved so that
129 * remap_page_range works.
131 page = virt_to_page(vaddr);
132 end = page + (1 << order);
134 split_page(page, order);
136 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
137 #ifdef CONFIG_MMU
138 /* MS: This is the whole magic - use cache inhibit pages */
139 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
140 #endif
142 SetPageReserved(page);
143 page++;
146 /* Free the otherwise unused pages. */
147 while (page < end) {
148 __free_page(page);
149 page++;
152 if (err) {
153 free_pages(vaddr, order);
154 return NULL;
157 return ret;
160 #ifdef CONFIG_MMU
161 static pte_t *consistent_virt_to_pte(void *vaddr)
163 unsigned long addr = (unsigned long)vaddr;
165 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
168 unsigned long consistent_virt_to_pfn(void *vaddr)
170 pte_t *ptep = consistent_virt_to_pte(vaddr);
172 if (pte_none(*ptep) || !pte_present(*ptep))
173 return 0;
175 return pte_pfn(*ptep);
177 #endif
180 * free page(s) as defined by the above mapping.
182 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
183 dma_addr_t dma_addr, unsigned long attrs)
185 struct page *page;
187 if (in_interrupt())
188 BUG();
190 size = PAGE_ALIGN(size);
192 #ifndef CONFIG_MMU
193 /* Clear SHADOW_MASK bit in address, and free as per usual */
194 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
195 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
196 # endif
197 page = virt_to_page(vaddr);
199 do {
200 __free_reserved_page(page);
201 page++;
202 } while (size -= PAGE_SIZE);
203 #else
204 do {
205 pte_t *ptep = consistent_virt_to_pte(vaddr);
206 unsigned long pfn;
208 if (!pte_none(*ptep) && pte_present(*ptep)) {
209 pfn = pte_pfn(*ptep);
210 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
211 if (pfn_valid(pfn)) {
212 page = pfn_to_page(pfn);
213 __free_reserved_page(page);
216 vaddr += PAGE_SIZE;
217 } while (size -= PAGE_SIZE);
219 /* flush tlb */
220 flush_tlb_all();
221 #endif