x86: cleanup ioremap includes
[linux-2.6/mini2440.git] / arch / x86 / mm / ioremap_32.c
blobd43251c9c1c6cd39fd793c28c3229ffea97c7e97
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/init.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <asm/cacheflush.h>
16 #include <asm/e820.h>
17 #include <asm/fixmap.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
22 * Remap an arbitrary physical address space into the kernel virtual
23 * address space. Needed when the kernel wants to access high addresses
24 * directly.
26 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
27 * have to convert them into an offset in a page-aligned mapping, but the
28 * caller shouldn't need to know that small detail.
30 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
31 unsigned long flags)
33 void __iomem *addr;
34 struct vm_struct *area;
35 unsigned long offset, last_addr;
36 pgprot_t prot;
38 /* Don't allow wraparound or zero size */
39 last_addr = phys_addr + size - 1;
40 if (!size || last_addr < phys_addr)
41 return NULL;
44 * Don't remap the low PCI/ISA area, it's always mapped..
46 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
47 return (void __iomem *) phys_to_virt(phys_addr);
50 * Don't allow anybody to remap normal RAM that we're using..
52 if (phys_addr <= virt_to_phys(high_memory - 1)) {
53 char *t_addr, *t_end;
54 struct page *page;
56 t_addr = __va(phys_addr);
57 t_end = t_addr + (size - 1);
59 for (page = virt_to_page(t_addr);
60 page <= virt_to_page(t_end); page++)
61 if (!PageReserved(page))
62 return NULL;
65 prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
68 * Mappings have to be page-aligned
70 offset = phys_addr & ~PAGE_MASK;
71 phys_addr &= PAGE_MASK;
72 size = PAGE_ALIGN(last_addr+1) - phys_addr;
75 * Ok, go for it..
77 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
78 if (!area)
79 return NULL;
80 area->phys_addr = phys_addr;
81 addr = (void __iomem *) area->addr;
82 if (ioremap_page_range((unsigned long) addr,
83 (unsigned long) addr + size, phys_addr, prot)) {
84 vunmap((void __force *) addr);
85 return NULL;
87 return (void __iomem *) (offset + (char __iomem *)addr);
89 EXPORT_SYMBOL(__ioremap);
91 /**
92 * ioremap_nocache - map bus memory into CPU space
93 * @offset: bus address of the memory
94 * @size: size of the resource to map
96 * ioremap_nocache performs a platform specific sequence of operations to
97 * make bus memory CPU accessible via the readb/readw/readl/writeb/
98 * writew/writel functions and the other mmio helpers. The returned
99 * address is not guaranteed to be usable directly as a virtual
100 * address.
102 * This version of ioremap ensures that the memory is marked uncachable
103 * on the CPU as well as honouring existing caching rules from things like
104 * the PCI bus. Note that there are other caches and buffers on many
105 * busses. In particular driver authors should read up on PCI writes
107 * It's useful if some control registers are in such an area and
108 * write combining or read caching is not desirable:
110 * Must be freed with iounmap.
112 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
114 unsigned long last_addr;
115 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
117 if (!p)
118 return p;
120 /* Guaranteed to be > phys_addr, as per __ioremap() */
121 last_addr = phys_addr + size - 1;
123 if (last_addr < virt_to_phys(high_memory) - 1) {
124 struct page *ppage = virt_to_page(__va(phys_addr));
125 unsigned long npages;
127 phys_addr &= PAGE_MASK;
129 /* This might overflow and become zero.. */
130 last_addr = PAGE_ALIGN(last_addr);
132 /* .. but that's ok, because modulo-2**n arithmetic will make
133 * the page-aligned "last - first" come out right.
135 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
137 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
138 iounmap(p);
139 p = NULL;
141 global_flush_tlb();
144 return p;
146 EXPORT_SYMBOL(ioremap_nocache);
149 * iounmap - Free a IO remapping
150 * @addr: virtual address from ioremap_*
152 * Caller must ensure there is only one unmapping for the same pointer.
154 void iounmap(volatile void __iomem *addr)
156 struct vm_struct *p, *o;
158 if ((void __force *)addr <= high_memory)
159 return;
162 * __ioremap special-cases the PCI/ISA range by not instantiating a
163 * vm_area and by simply returning an address into the kernel mapping
164 * of ISA space. So handle that here.
166 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
167 addr < phys_to_virt(ISA_END_ADDRESS))
168 return;
170 addr = (volatile void __iomem *)
171 (PAGE_MASK & (unsigned long __force)addr);
173 /* Use the vm area unlocked, assuming the caller
174 ensures there isn't another iounmap for the same address
175 in parallel. Reuse of the virtual address is prevented by
176 leaving it in the global lists until we're done with it.
177 cpa takes care of the direct mappings. */
178 read_lock(&vmlist_lock);
179 for (p = vmlist; p; p = p->next) {
180 if (p->addr == addr)
181 break;
183 read_unlock(&vmlist_lock);
185 if (!p) {
186 printk(KERN_ERR "iounmap: bad address %p\n", addr);
187 dump_stack();
188 return;
191 /* Reset the direct mapping. Can block */
192 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
193 change_page_attr(virt_to_page(__va(p->phys_addr)),
194 get_vm_area_size(p) >> PAGE_SHIFT,
195 PAGE_KERNEL);
196 global_flush_tlb();
199 /* Finally remove it */
200 o = remove_vm_area((void *)addr);
201 BUG_ON(p != o || o == NULL);
202 kfree(p);
204 EXPORT_SYMBOL(iounmap);
207 int __initdata early_ioremap_debug;
209 static int __init early_ioremap_debug_setup(char *str)
211 early_ioremap_debug = 1;
213 return 0;
215 early_param("early_ioremap_debug", early_ioremap_debug_setup);
217 static __initdata int after_paging_init;
218 static __initdata unsigned long bm_pte[1024]
219 __attribute__((aligned(PAGE_SIZE)));
221 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
223 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
226 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
228 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
231 void __init early_ioremap_init(void)
233 unsigned long *pgd;
235 if (early_ioremap_debug)
236 printk(KERN_DEBUG "early_ioremap_init()\n");
238 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
239 *pgd = __pa(bm_pte) | _PAGE_TABLE;
240 memset(bm_pte, 0, sizeof(bm_pte));
242 * The boot-ioremap range spans multiple pgds, for which
243 * we are not prepared:
245 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
246 WARN_ON(1);
247 printk(KERN_WARNING "pgd %p != %p\n",
248 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
249 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
250 fix_to_virt(FIX_BTMAP_BEGIN));
251 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
252 fix_to_virt(FIX_BTMAP_END));
254 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
255 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
256 FIX_BTMAP_BEGIN);
260 void __init early_ioremap_clear(void)
262 unsigned long *pgd;
264 if (early_ioremap_debug)
265 printk(KERN_DEBUG "early_ioremap_clear()\n");
267 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
268 *pgd = 0;
269 __flush_tlb_all();
272 void __init early_ioremap_reset(void)
274 enum fixed_addresses idx;
275 unsigned long *pte, phys, addr;
277 after_paging_init = 1;
278 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
279 addr = fix_to_virt(idx);
280 pte = early_ioremap_pte(addr);
281 if (!*pte & _PAGE_PRESENT) {
282 phys = *pte & PAGE_MASK;
283 set_fixmap(idx, phys);
288 static void __init __early_set_fixmap(enum fixed_addresses idx,
289 unsigned long phys, pgprot_t flags)
291 unsigned long *pte, addr = __fix_to_virt(idx);
293 if (idx >= __end_of_fixed_addresses) {
294 BUG();
295 return;
297 pte = early_ioremap_pte(addr);
298 if (pgprot_val(flags))
299 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
300 else
301 *pte = 0;
302 __flush_tlb_one(addr);
305 static inline void __init early_set_fixmap(enum fixed_addresses idx,
306 unsigned long phys)
308 if (after_paging_init)
309 set_fixmap(idx, phys);
310 else
311 __early_set_fixmap(idx, phys, PAGE_KERNEL);
314 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
316 if (after_paging_init)
317 clear_fixmap(idx);
318 else
319 __early_set_fixmap(idx, 0, __pgprot(0));
323 int __initdata early_ioremap_nested;
325 static int __init check_early_ioremap_leak(void)
327 if (!early_ioremap_nested)
328 return 0;
330 printk(KERN_WARNING
331 "Debug warning: early ioremap leak of %d areas detected.\n",
332 early_ioremap_nested);
333 printk(KERN_WARNING
334 "please boot with early_ioremap_debug and report the dmesg.\n");
335 WARN_ON(1);
337 return 1;
339 late_initcall(check_early_ioremap_leak);
341 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
343 unsigned long offset, last_addr;
344 unsigned int nrpages, nesting;
345 enum fixed_addresses idx0, idx;
347 WARN_ON(system_state != SYSTEM_BOOTING);
349 nesting = early_ioremap_nested;
350 if (early_ioremap_debug) {
351 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
352 phys_addr, size, nesting);
353 dump_stack();
356 /* Don't allow wraparound or zero size */
357 last_addr = phys_addr + size - 1;
358 if (!size || last_addr < phys_addr) {
359 WARN_ON(1);
360 return NULL;
363 if (nesting >= FIX_BTMAPS_NESTING) {
364 WARN_ON(1);
365 return NULL;
367 early_ioremap_nested++;
369 * Mappings have to be page-aligned
371 offset = phys_addr & ~PAGE_MASK;
372 phys_addr &= PAGE_MASK;
373 size = PAGE_ALIGN(last_addr) - phys_addr;
376 * Mappings have to fit in the FIX_BTMAP area.
378 nrpages = size >> PAGE_SHIFT;
379 if (nrpages > NR_FIX_BTMAPS) {
380 WARN_ON(1);
381 return NULL;
385 * Ok, go for it..
387 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
388 idx = idx0;
389 while (nrpages > 0) {
390 early_set_fixmap(idx, phys_addr);
391 phys_addr += PAGE_SIZE;
392 --idx;
393 --nrpages;
395 if (early_ioremap_debug)
396 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
398 return (void *) (offset + fix_to_virt(idx0));
401 void __init early_iounmap(void *addr, unsigned long size)
403 unsigned long virt_addr;
404 unsigned long offset;
405 unsigned int nrpages;
406 enum fixed_addresses idx;
407 unsigned int nesting;
409 nesting = --early_ioremap_nested;
410 WARN_ON(nesting < 0);
412 if (early_ioremap_debug) {
413 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
414 size, nesting);
415 dump_stack();
418 virt_addr = (unsigned long)addr;
419 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
420 WARN_ON(1);
421 return;
423 offset = virt_addr & ~PAGE_MASK;
424 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
426 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
427 while (nrpages > 0) {
428 early_clear_fixmap(idx);
429 --idx;
430 --nrpages;
434 void __this_fixmap_does_not_exist(void)
436 WARN_ON(1);