x86: no CPA on iounmap
[linux-2.6/x86.git] / arch / x86 / mm / ioremap.c
blobee6648fe6b15de75a4e0fe53a90f6311fa8b917a
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
23 enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
28 #ifdef CONFIG_X86_64
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
38 #endif
40 int page_is_ram(unsigned long pagenr)
42 unsigned long addr, end;
43 int i;
45 for (i = 0; i < e820.nr_map; i++) {
47 * Not usable memory:
49 if (e820.map[i].type != E820_RAM)
50 continue;
51 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
52 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
55 * Sanity check: Some BIOSen report areas as RAM that
56 * are not. Notably the 640->1Mb area, which is the
57 * PCI BIOS area.
59 if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
60 end < (BIOS_END >> PAGE_SHIFT))
61 continue;
63 if ((pagenr >= addr) && (pagenr < end))
64 return 1;
66 return 0;
70 * Fix up the linear direct mapping of the kernel to avoid cache attribute
71 * conflicts.
73 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
74 enum ioremap_mode mode)
76 unsigned long nrpages = size >> PAGE_SHIFT;
77 int err;
79 switch (mode) {
80 case IOR_MODE_UNCACHED:
81 default:
82 err = set_memory_uc(vaddr, nrpages);
83 break;
84 case IOR_MODE_CACHED:
85 err = set_memory_wb(vaddr, nrpages);
86 break;
89 return err;
93 * Remap an arbitrary physical address space into the kernel virtual
94 * address space. Needed when the kernel wants to access high addresses
95 * directly.
97 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
98 * have to convert them into an offset in a page-aligned mapping, but the
99 * caller shouldn't need to know that small detail.
101 static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
102 enum ioremap_mode mode)
104 unsigned long pfn, offset, last_addr, vaddr;
105 struct vm_struct *area;
106 pgprot_t prot;
108 /* Don't allow wraparound or zero size */
109 last_addr = phys_addr + size - 1;
110 if (!size || last_addr < phys_addr)
111 return NULL;
114 * Don't remap the low PCI/ISA area, it's always mapped..
116 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
117 return (__force void __iomem *)phys_to_virt(phys_addr);
120 * Don't allow anybody to remap normal RAM that we're using..
122 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
123 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
124 if (page_is_ram(pfn) && pfn_valid(pfn) &&
125 !PageReserved(pfn_to_page(pfn)))
126 return NULL;
129 switch (mode) {
130 case IOR_MODE_UNCACHED:
131 default:
132 prot = PAGE_KERNEL_NOCACHE;
133 break;
134 case IOR_MODE_CACHED:
135 prot = PAGE_KERNEL;
136 break;
140 * Mappings have to be page-aligned
142 offset = phys_addr & ~PAGE_MASK;
143 phys_addr &= PAGE_MASK;
144 size = PAGE_ALIGN(last_addr+1) - phys_addr;
147 * Ok, go for it..
149 area = get_vm_area(size, VM_IOREMAP);
150 if (!area)
151 return NULL;
152 area->phys_addr = phys_addr;
153 vaddr = (unsigned long) area->addr;
154 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
155 remove_vm_area((void *)(vaddr & PAGE_MASK));
156 return NULL;
159 if (ioremap_change_attr(vaddr, size, mode) < 0) {
160 vunmap(area->addr);
161 return NULL;
164 return (void __iomem *) (vaddr + offset);
168 * ioremap_nocache - map bus memory into CPU space
169 * @offset: bus address of the memory
170 * @size: size of the resource to map
172 * ioremap_nocache performs a platform specific sequence of operations to
173 * make bus memory CPU accessible via the readb/readw/readl/writeb/
174 * writew/writel functions and the other mmio helpers. The returned
175 * address is not guaranteed to be usable directly as a virtual
176 * address.
178 * This version of ioremap ensures that the memory is marked uncachable
179 * on the CPU as well as honouring existing caching rules from things like
180 * the PCI bus. Note that there are other caches and buffers on many
181 * busses. In particular driver authors should read up on PCI writes
183 * It's useful if some control registers are in such an area and
184 * write combining or read caching is not desirable:
186 * Must be freed with iounmap.
188 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
190 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
192 EXPORT_SYMBOL(ioremap_nocache);
194 void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
196 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
198 EXPORT_SYMBOL(ioremap_cache);
201 * iounmap - Free a IO remapping
202 * @addr: virtual address from ioremap_*
204 * Caller must ensure there is only one unmapping for the same pointer.
206 void iounmap(volatile void __iomem *addr)
208 struct vm_struct *p, *o;
210 if ((void __force *)addr <= high_memory)
211 return;
214 * __ioremap special-cases the PCI/ISA range by not instantiating a
215 * vm_area and by simply returning an address into the kernel mapping
216 * of ISA space. So handle that here.
218 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
219 addr < phys_to_virt(ISA_END_ADDRESS))
220 return;
222 addr = (volatile void __iomem *)
223 (PAGE_MASK & (unsigned long __force)addr);
225 /* Use the vm area unlocked, assuming the caller
226 ensures there isn't another iounmap for the same address
227 in parallel. Reuse of the virtual address is prevented by
228 leaving it in the global lists until we're done with it.
229 cpa takes care of the direct mappings. */
230 read_lock(&vmlist_lock);
231 for (p = vmlist; p; p = p->next) {
232 if (p->addr == addr)
233 break;
235 read_unlock(&vmlist_lock);
237 if (!p) {
238 printk(KERN_ERR "iounmap: bad address %p\n", addr);
239 dump_stack();
240 return;
243 /* Finally remove it */
244 o = remove_vm_area((void *)addr);
245 BUG_ON(p != o || o == NULL);
246 kfree(p);
248 EXPORT_SYMBOL(iounmap);
250 #ifdef CONFIG_X86_32
252 int __initdata early_ioremap_debug;
254 static int __init early_ioremap_debug_setup(char *str)
256 early_ioremap_debug = 1;
258 return 0;
260 early_param("early_ioremap_debug", early_ioremap_debug_setup);
262 static __initdata int after_paging_init;
263 static __initdata unsigned long bm_pte[1024]
264 __attribute__((aligned(PAGE_SIZE)));
266 static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
268 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
271 static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
273 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
276 void __init early_ioremap_init(void)
278 unsigned long *pgd;
280 if (early_ioremap_debug)
281 printk(KERN_INFO "early_ioremap_init()\n");
283 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
284 *pgd = __pa(bm_pte) | _PAGE_TABLE;
285 memset(bm_pte, 0, sizeof(bm_pte));
287 * The boot-ioremap range spans multiple pgds, for which
288 * we are not prepared:
290 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
291 WARN_ON(1);
292 printk(KERN_WARNING "pgd %p != %p\n",
293 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
294 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
295 fix_to_virt(FIX_BTMAP_BEGIN));
296 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
297 fix_to_virt(FIX_BTMAP_END));
299 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
300 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
301 FIX_BTMAP_BEGIN);
305 void __init early_ioremap_clear(void)
307 unsigned long *pgd;
309 if (early_ioremap_debug)
310 printk(KERN_INFO "early_ioremap_clear()\n");
312 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
313 *pgd = 0;
314 paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
315 __flush_tlb_all();
318 void __init early_ioremap_reset(void)
320 enum fixed_addresses idx;
321 unsigned long *pte, phys, addr;
323 after_paging_init = 1;
324 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
325 addr = fix_to_virt(idx);
326 pte = early_ioremap_pte(addr);
327 if (*pte & _PAGE_PRESENT) {
328 phys = *pte & PAGE_MASK;
329 set_fixmap(idx, phys);
334 static void __init __early_set_fixmap(enum fixed_addresses idx,
335 unsigned long phys, pgprot_t flags)
337 unsigned long *pte, addr = __fix_to_virt(idx);
339 if (idx >= __end_of_fixed_addresses) {
340 BUG();
341 return;
343 pte = early_ioremap_pte(addr);
344 if (pgprot_val(flags))
345 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
346 else
347 *pte = 0;
348 __flush_tlb_one(addr);
351 static inline void __init early_set_fixmap(enum fixed_addresses idx,
352 unsigned long phys)
354 if (after_paging_init)
355 set_fixmap(idx, phys);
356 else
357 __early_set_fixmap(idx, phys, PAGE_KERNEL);
360 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
362 if (after_paging_init)
363 clear_fixmap(idx);
364 else
365 __early_set_fixmap(idx, 0, __pgprot(0));
369 int __initdata early_ioremap_nested;
371 static int __init check_early_ioremap_leak(void)
373 if (!early_ioremap_nested)
374 return 0;
376 printk(KERN_WARNING
377 "Debug warning: early ioremap leak of %d areas detected.\n",
378 early_ioremap_nested);
379 printk(KERN_WARNING
380 "please boot with early_ioremap_debug and report the dmesg.\n");
381 WARN_ON(1);
383 return 1;
385 late_initcall(check_early_ioremap_leak);
387 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
389 unsigned long offset, last_addr;
390 unsigned int nrpages, nesting;
391 enum fixed_addresses idx0, idx;
393 WARN_ON(system_state != SYSTEM_BOOTING);
395 nesting = early_ioremap_nested;
396 if (early_ioremap_debug) {
397 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
398 phys_addr, size, nesting);
399 dump_stack();
402 /* Don't allow wraparound or zero size */
403 last_addr = phys_addr + size - 1;
404 if (!size || last_addr < phys_addr) {
405 WARN_ON(1);
406 return NULL;
409 if (nesting >= FIX_BTMAPS_NESTING) {
410 WARN_ON(1);
411 return NULL;
413 early_ioremap_nested++;
415 * Mappings have to be page-aligned
417 offset = phys_addr & ~PAGE_MASK;
418 phys_addr &= PAGE_MASK;
419 size = PAGE_ALIGN(last_addr) - phys_addr;
422 * Mappings have to fit in the FIX_BTMAP area.
424 nrpages = size >> PAGE_SHIFT;
425 if (nrpages > NR_FIX_BTMAPS) {
426 WARN_ON(1);
427 return NULL;
431 * Ok, go for it..
433 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
434 idx = idx0;
435 while (nrpages > 0) {
436 early_set_fixmap(idx, phys_addr);
437 phys_addr += PAGE_SIZE;
438 --idx;
439 --nrpages;
441 if (early_ioremap_debug)
442 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
444 return (void *) (offset + fix_to_virt(idx0));
447 void __init early_iounmap(void *addr, unsigned long size)
449 unsigned long virt_addr;
450 unsigned long offset;
451 unsigned int nrpages;
452 enum fixed_addresses idx;
453 unsigned int nesting;
455 nesting = --early_ioremap_nested;
456 WARN_ON(nesting < 0);
458 if (early_ioremap_debug) {
459 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
460 size, nesting);
461 dump_stack();
464 virt_addr = (unsigned long)addr;
465 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
466 WARN_ON(1);
467 return;
469 offset = virt_addr & ~PAGE_MASK;
470 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
472 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
473 while (nrpages > 0) {
474 early_clear_fixmap(idx);
475 --idx;
476 --nrpages;
480 void __this_fixmap_does_not_exist(void)
482 WARN_ON(1);
485 #endif /* CONFIG_X86_32 */