[JFFS2] Fix more breakage caused by janitorial meddling.
[linux-2.6/kmemtrace.git] / arch / x86_64 / mm / ioremap.c
blobae207064201e19697160a332a3399f9a0594b2f8
1 /*
2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/io.h>
15 #include <asm/pgalloc.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/proto.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
25 unsigned long phys_addr, unsigned long flags)
27 unsigned long end;
28 unsigned long pfn;
30 address &= ~PMD_MASK;
31 end = address + size;
32 if (end > PMD_SIZE)
33 end = PMD_SIZE;
34 if (address >= end)
35 BUG();
36 pfn = phys_addr >> PAGE_SHIFT;
37 do {
38 if (!pte_none(*pte)) {
39 printk("remap_area_pte: page already exists\n");
40 BUG();
42 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
43 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
44 address += PAGE_SIZE;
45 pfn++;
46 pte++;
47 } while (address && (address < end));
50 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
51 unsigned long phys_addr, unsigned long flags)
53 unsigned long end;
55 address &= ~PUD_MASK;
56 end = address + size;
57 if (end > PUD_SIZE)
58 end = PUD_SIZE;
59 phys_addr -= address;
60 if (address >= end)
61 BUG();
62 do {
63 pte_t * pte = pte_alloc_kernel(pmd, address);
64 if (!pte)
65 return -ENOMEM;
66 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
67 address = (address + PMD_SIZE) & PMD_MASK;
68 pmd++;
69 } while (address && (address < end));
70 return 0;
73 static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
74 unsigned long phys_addr, unsigned long flags)
76 unsigned long end;
78 address &= ~PGDIR_MASK;
79 end = address + size;
80 if (end > PGDIR_SIZE)
81 end = PGDIR_SIZE;
82 phys_addr -= address;
83 if (address >= end)
84 BUG();
85 do {
86 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
87 if (!pmd)
88 return -ENOMEM;
89 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
90 address = (address + PUD_SIZE) & PUD_MASK;
91 pud++;
92 } while (address && (address < end));
93 return 0;
96 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
97 unsigned long size, unsigned long flags)
99 int error;
100 pgd_t *pgd;
101 unsigned long end = address + size;
103 phys_addr -= address;
104 pgd = pgd_offset_k(address);
105 flush_cache_all();
106 if (address >= end)
107 BUG();
108 do {
109 pud_t *pud;
110 pud = pud_alloc(&init_mm, pgd, address);
111 error = -ENOMEM;
112 if (!pud)
113 break;
114 if (remap_area_pud(pud, address, end - address,
115 phys_addr + address, flags))
116 break;
117 error = 0;
118 address = (address + PGDIR_SIZE) & PGDIR_MASK;
119 pgd++;
120 } while (address && (address < end));
121 flush_tlb_all();
122 return error;
126 * Fix up the linear direct mapping of the kernel to avoid cache attribute
127 * conflicts.
129 static int
130 ioremap_change_attr(unsigned long phys_addr, unsigned long size,
131 unsigned long flags)
133 int err = 0;
134 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
135 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
136 unsigned long vaddr = (unsigned long) __va(phys_addr);
139 * Must use a address here and not struct page because the phys addr
140 * can be a in hole between nodes and not have an memmap entry.
142 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
143 if (!err)
144 global_flush_tlb();
146 return err;
150 * Generic mapping function
154 * Remap an arbitrary physical address space into the kernel virtual
155 * address space. Needed when the kernel wants to access high addresses
156 * directly.
158 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
159 * have to convert them into an offset in a page-aligned mapping, but the
160 * caller shouldn't need to know that small detail.
162 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
164 void * addr;
165 struct vm_struct * area;
166 unsigned long offset, last_addr;
168 /* Don't allow wraparound or zero size */
169 last_addr = phys_addr + size - 1;
170 if (!size || last_addr < phys_addr)
171 return NULL;
174 * Don't remap the low PCI/ISA area, it's always mapped..
176 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
177 return (__force void __iomem *)phys_to_virt(phys_addr);
179 #ifdef CONFIG_FLATMEM
181 * Don't allow anybody to remap normal RAM that we're using..
183 if (last_addr < virt_to_phys(high_memory)) {
184 char *t_addr, *t_end;
185 struct page *page;
187 t_addr = __va(phys_addr);
188 t_end = t_addr + (size - 1);
190 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
191 if(!PageReserved(page))
192 return NULL;
194 #endif
197 * Mappings have to be page-aligned
199 offset = phys_addr & ~PAGE_MASK;
200 phys_addr &= PAGE_MASK;
201 size = PAGE_ALIGN(last_addr+1) - phys_addr;
204 * Ok, go for it..
206 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
207 if (!area)
208 return NULL;
209 area->phys_addr = phys_addr;
210 addr = area->addr;
211 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
212 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
213 return NULL;
215 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
216 area->flags &= 0xffffff;
217 vunmap(addr);
218 return NULL;
220 return (__force void __iomem *) (offset + (char *)addr);
224 * ioremap_nocache - map bus memory into CPU space
225 * @offset: bus address of the memory
226 * @size: size of the resource to map
228 * ioremap_nocache performs a platform specific sequence of operations to
229 * make bus memory CPU accessible via the readb/readw/readl/writeb/
230 * writew/writel functions and the other mmio helpers. The returned
231 * address is not guaranteed to be usable directly as a virtual
232 * address.
234 * This version of ioremap ensures that the memory is marked uncachable
235 * on the CPU as well as honouring existing caching rules from things like
236 * the PCI bus. Note that there are other caches and buffers on many
237 * busses. In particular driver authors should read up on PCI writes
239 * It's useful if some control registers are in such an area and
240 * write combining or read caching is not desirable:
242 * Must be freed with iounmap.
245 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
247 return __ioremap(phys_addr, size, _PAGE_PCD);
251 * iounmap - Free a IO remapping
252 * @addr: virtual address from ioremap_*
254 * Caller must ensure there is only one unmapping for the same pointer.
256 void iounmap(volatile void __iomem *addr)
258 struct vm_struct *p, *o;
260 if (addr <= high_memory)
261 return;
262 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
263 addr < phys_to_virt(ISA_END_ADDRESS))
264 return;
266 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
267 /* Use the vm area unlocked, assuming the caller
268 ensures there isn't another iounmap for the same address
269 in parallel. Reuse of the virtual address is prevented by
270 leaving it in the global lists until we're done with it.
271 cpa takes care of the direct mappings. */
272 read_lock(&vmlist_lock);
273 for (p = vmlist; p; p = p->next) {
274 if (p->addr == addr)
275 break;
277 read_unlock(&vmlist_lock);
279 if (!p) {
280 printk("iounmap: bad address %p\n", addr);
281 dump_stack();
282 return;
285 /* Reset the direct mapping. Can block */
286 if (p->flags >> 20)
287 ioremap_change_attr(p->phys_addr, p->size, 0);
289 /* Finally remove it */
290 o = remove_vm_area((void *)addr);
291 BUG_ON(p != o || o == NULL);
292 kfree(p);