RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / mips / mm / ioremap.c
blob8f8f6f59289a908765c87fe64637dcbe4a357496
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
8 */
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/sched.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hardirq.h>
15 #include <linux/highmem.h>
16 #include <asm/cacheflush.h>
17 #include <asm/io.h>
18 #include <asm/tlbflush.h>
20 static inline void remap_area_pte(pte_t * pte, unsigned long address,
21 phys_t size, phys_t phys_addr, unsigned long flags)
23 phys_t end;
24 unsigned long pfn;
25 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
26 | __WRITEABLE | flags);
28 address &= ~PMD_MASK;
29 end = address + size;
30 if (end > PMD_SIZE)
31 end = PMD_SIZE;
32 if (address >= end)
33 BUG();
34 pfn = phys_addr >> PAGE_SHIFT;
35 do {
36 if (!pte_none(*pte)) {
37 printk("remap_area_pte: page already exists\n");
38 BUG();
40 set_pte(pte, pfn_pte(pfn, pgprot));
41 address += PAGE_SIZE;
42 pfn++;
43 pte++;
44 } while (address && (address < end));
47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
48 phys_t size, phys_t phys_addr, unsigned long flags)
50 phys_t end;
52 address &= ~PGDIR_MASK;
53 end = address + size;
54 if (end > PGDIR_SIZE)
55 end = PGDIR_SIZE;
56 phys_addr -= address;
57 if (address >= end)
58 BUG();
59 do {
60 pte_t * pte = pte_alloc_kernel(pmd, address);
61 if (!pte)
62 return -ENOMEM;
63 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
64 address = (address + PMD_SIZE) & PMD_MASK;
65 pmd++;
66 } while (address && (address < end));
67 return 0;
70 static int remap_area_pages(unsigned long address, phys_t phys_addr,
71 phys_t size, unsigned long flags)
73 int error;
74 pgd_t * dir;
75 unsigned long end = address + size;
77 phys_addr -= address;
78 dir = pgd_offset(&init_mm, address);
79 flush_cache_all();
80 if (address >= end)
81 BUG();
82 do {
83 pud_t *pud;
84 pmd_t *pmd;
86 error = -ENOMEM;
87 pud = pud_alloc(&init_mm, dir, address);
88 if (!pud)
89 break;
90 pmd = pmd_alloc(&init_mm, pud, address);
91 if (!pmd)
92 break;
93 if (remap_area_pmd(pmd, address, end - address,
94 phys_addr + address, flags))
95 break;
96 error = 0;
97 address = (address + PGDIR_SIZE) & PGDIR_MASK;
98 dir++;
99 } while (address && (address < end));
100 flush_tlb_all();
101 return error;
105 * Generic mapping function (not visible outside):
109 * Remap an arbitrary physical address space into the kernel virtual
110 * address space. Needed when the kernel wants to access high addresses
111 * directly.
113 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
114 * have to convert them into an offset in a page-aligned mapping, but the
115 * caller shouldn't need to know that small detail.
118 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
120 void *kmap_atomic_pfn_prot(unsigned long pfn, enum km_type type, pgprot_t prot)
122 pgprot_t old_kmap_prot = kmap_prot;
123 void * vaddr;
125 kmap_prot = prot;
126 vaddr = kmap_atomic_pfn(pfn, type);
127 kmap_prot = old_kmap_prot;
128 return vaddr;
131 void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
133 struct vm_struct * area;
134 unsigned long offset;
135 phys_t last_addr;
136 void * addr;
138 phys_addr = fixup_bigphys_addr(phys_addr, size);
140 /* Don't allow wraparound or zero size */
141 last_addr = phys_addr + size - 1;
142 if (!size || last_addr < phys_addr)
143 return NULL;
146 * Map uncached objects in the low 512mb of address space using KSEG1,
147 * otherwise map using page tables.
149 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
150 flags == _CACHE_UNCACHED)
151 return (void __iomem *) CKSEG1ADDR(phys_addr);
154 * Don't allow anybody to remap normal RAM that we're using..
156 if (phys_addr < virt_to_phys(high_memory)) {
157 char *t_addr, *t_end;
158 struct page *page;
160 t_addr = __va(phys_addr);
161 t_end = t_addr + (size - 1);
163 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
164 if(!PageReserved(page))
165 return NULL;
169 * Mappings have to be page-aligned
171 offset = phys_addr & ~PAGE_MASK;
172 phys_addr &= PAGE_MASK;
173 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
175 /* If we are in interrupt/Bottom half context, try to use fixed temporary
176 * map, which we can get atomically. However we are limited by one page only.
178 if (in_interrupt() && (size <= PAGE_SIZE))
179 return (void __iomem *) (kmap_atomic_pfn_prot(phys_addr >> PAGE_SHIFT, KM_PCIE, PAGE_KERNEL_UNCACHED) + offset);
182 * Ok, go for it..
184 area = get_vm_area(size, VM_IOREMAP);
186 if (!area)
187 return NULL;
188 addr = area->addr;
189 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
190 vunmap(addr);
191 return NULL;
194 return (void __iomem *) (offset + (char *)addr);
197 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
199 void __iounmap(const volatile void __iomem *addr)
201 struct vm_struct *p;
203 if (IS_KSEG1(addr))
204 return;
206 if (in_interrupt()) {
207 kunmap_atomic((void *)addr, KM_PCIE);
208 return;
211 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
212 if (!p)
213 printk(KERN_ERR "iounmap: bad address %p\n", addr);
215 kfree(p);
218 EXPORT_SYMBOL(__ioremap);
219 EXPORT_SYMBOL(__iounmap);