x86: Use the generic page_is_ram()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / ioremap.c
blob1bf9e08ed733102e3e6db1475a03e66ffd811eec
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
25 #include "physaddr.h"
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
34 unsigned long nrpages = size >> PAGE_SHIFT;
35 int err;
37 switch (prot_val) {
38 case _PAGE_CACHE_UC:
39 default:
40 err = _set_memory_uc(vaddr, nrpages);
41 break;
42 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
45 case _PAGE_CACHE_WB:
46 err = _set_memory_wb(vaddr, nrpages);
47 break;
50 return err;
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
56 * directly.
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
62 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 unsigned long size, unsigned long prot_val, void *caller)
65 unsigned long pfn, offset, vaddr;
66 resource_size_t last_addr;
67 const resource_size_t unaligned_phys_addr = phys_addr;
68 const unsigned long unaligned_size = size;
69 struct vm_struct *area;
70 unsigned long new_prot_val;
71 pgprot_t prot;
72 int retval;
73 void __iomem *ret_addr;
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
78 return NULL;
80 if (!phys_addr_valid(phys_addr)) {
81 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82 (unsigned long long)phys_addr);
83 WARN_ON_ONCE(1);
84 return NULL;
88 * Don't remap the low PCI/ISA area, it's always mapped..
90 if (is_ISA_range(phys_addr, last_addr))
91 return (__force void __iomem *)phys_to_virt(phys_addr);
94 * Check if the request spans more than any BAR in the iomem resource
95 * tree.
97 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
98 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
101 * Don't allow anybody to remap normal RAM that we're using..
103 for (pfn = phys_addr >> PAGE_SHIFT;
104 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
105 pfn++) {
107 int is_ram = page_is_ram(pfn);
109 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
110 return NULL;
111 WARN_ON_ONCE(is_ram);
115 * Mappings have to be page-aligned
117 offset = phys_addr & ~PAGE_MASK;
118 phys_addr &= PAGE_MASK;
119 size = PAGE_ALIGN(last_addr+1) - phys_addr;
121 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
122 prot_val, &new_prot_val);
123 if (retval) {
124 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
125 return NULL;
128 if (prot_val != new_prot_val) {
129 if (!is_new_memtype_allowed(phys_addr, size,
130 prot_val, new_prot_val)) {
131 printk(KERN_ERR
132 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
133 (unsigned long long)phys_addr,
134 (unsigned long long)(phys_addr + size),
135 prot_val, new_prot_val);
136 free_memtype(phys_addr, phys_addr + size);
137 return NULL;
139 prot_val = new_prot_val;
142 switch (prot_val) {
143 case _PAGE_CACHE_UC:
144 default:
145 prot = PAGE_KERNEL_IO_NOCACHE;
146 break;
147 case _PAGE_CACHE_UC_MINUS:
148 prot = PAGE_KERNEL_IO_UC_MINUS;
149 break;
150 case _PAGE_CACHE_WC:
151 prot = PAGE_KERNEL_IO_WC;
152 break;
153 case _PAGE_CACHE_WB:
154 prot = PAGE_KERNEL_IO;
155 break;
159 * Ok, go for it..
161 area = get_vm_area_caller(size, VM_IOREMAP, caller);
162 if (!area)
163 return NULL;
164 area->phys_addr = phys_addr;
165 vaddr = (unsigned long) area->addr;
167 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
168 free_memtype(phys_addr, phys_addr + size);
169 free_vm_area(area);
170 return NULL;
173 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
174 free_memtype(phys_addr, phys_addr + size);
175 free_vm_area(area);
176 return NULL;
179 ret_addr = (void __iomem *) (vaddr + offset);
180 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
182 return ret_addr;
186 * ioremap_nocache - map bus memory into CPU space
187 * @offset: bus address of the memory
188 * @size: size of the resource to map
190 * ioremap_nocache performs a platform specific sequence of operations to
191 * make bus memory CPU accessible via the readb/readw/readl/writeb/
192 * writew/writel functions and the other mmio helpers. The returned
193 * address is not guaranteed to be usable directly as a virtual
194 * address.
196 * This version of ioremap ensures that the memory is marked uncachable
197 * on the CPU as well as honouring existing caching rules from things like
198 * the PCI bus. Note that there are other caches and buffers on many
199 * busses. In particular driver authors should read up on PCI writes
201 * It's useful if some control registers are in such an area and
202 * write combining or read caching is not desirable:
204 * Must be freed with iounmap.
206 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
209 * Ideally, this should be:
210 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
212 * Till we fix all X drivers to use ioremap_wc(), we will use
213 * UC MINUS.
215 unsigned long val = _PAGE_CACHE_UC_MINUS;
217 return __ioremap_caller(phys_addr, size, val,
218 __builtin_return_address(0));
220 EXPORT_SYMBOL(ioremap_nocache);
223 * ioremap_wc - map memory into CPU space write combined
224 * @offset: bus address of the memory
225 * @size: size of the resource to map
227 * This version of ioremap ensures that the memory is marked write combining.
228 * Write combining allows faster writes to some hardware devices.
230 * Must be freed with iounmap.
232 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
234 if (pat_enabled)
235 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
236 __builtin_return_address(0));
237 else
238 return ioremap_nocache(phys_addr, size);
240 EXPORT_SYMBOL(ioremap_wc);
242 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
244 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
245 __builtin_return_address(0));
247 EXPORT_SYMBOL(ioremap_cache);
249 static void __iomem *ioremap_default(resource_size_t phys_addr,
250 unsigned long size)
252 unsigned long flags;
253 void __iomem *ret;
254 int err;
257 * - WB for WB-able memory and no other conflicting mappings
258 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
259 * - Inherit from confliting mappings otherwise
261 err = reserve_memtype(phys_addr, phys_addr + size,
262 _PAGE_CACHE_WB, &flags);
263 if (err < 0)
264 return NULL;
266 ret = __ioremap_caller(phys_addr, size, flags,
267 __builtin_return_address(0));
269 free_memtype(phys_addr, phys_addr + size);
270 return ret;
273 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
274 unsigned long prot_val)
276 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
277 __builtin_return_address(0));
279 EXPORT_SYMBOL(ioremap_prot);
282 * iounmap - Free a IO remapping
283 * @addr: virtual address from ioremap_*
285 * Caller must ensure there is only one unmapping for the same pointer.
287 void iounmap(volatile void __iomem *addr)
289 struct vm_struct *p, *o;
291 if ((void __force *)addr <= high_memory)
292 return;
295 * __ioremap special-cases the PCI/ISA range by not instantiating a
296 * vm_area and by simply returning an address into the kernel mapping
297 * of ISA space. So handle that here.
299 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
300 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
301 return;
303 addr = (volatile void __iomem *)
304 (PAGE_MASK & (unsigned long __force)addr);
306 mmiotrace_iounmap(addr);
308 /* Use the vm area unlocked, assuming the caller
309 ensures there isn't another iounmap for the same address
310 in parallel. Reuse of the virtual address is prevented by
311 leaving it in the global lists until we're done with it.
312 cpa takes care of the direct mappings. */
313 read_lock(&vmlist_lock);
314 for (p = vmlist; p; p = p->next) {
315 if (p->addr == (void __force *)addr)
316 break;
318 read_unlock(&vmlist_lock);
320 if (!p) {
321 printk(KERN_ERR "iounmap: bad address %p\n", addr);
322 dump_stack();
323 return;
326 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
328 /* Finally remove it */
329 o = remove_vm_area((void __force *)addr);
330 BUG_ON(p != o || o == NULL);
331 kfree(p);
333 EXPORT_SYMBOL(iounmap);
336 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
337 * access
339 void *xlate_dev_mem_ptr(unsigned long phys)
341 void *addr;
342 unsigned long start = phys & PAGE_MASK;
344 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
345 if (page_is_ram(start >> PAGE_SHIFT))
346 return __va(phys);
348 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
349 if (addr)
350 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
352 return addr;
355 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
357 if (page_is_ram(phys >> PAGE_SHIFT))
358 return;
360 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
361 return;
364 static int __initdata early_ioremap_debug;
366 static int __init early_ioremap_debug_setup(char *str)
368 early_ioremap_debug = 1;
370 return 0;
372 early_param("early_ioremap_debug", early_ioremap_debug_setup);
374 static __initdata int after_paging_init;
375 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
377 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
379 /* Don't assume we're using swapper_pg_dir at this point */
380 pgd_t *base = __va(read_cr3());
381 pgd_t *pgd = &base[pgd_index(addr)];
382 pud_t *pud = pud_offset(pgd, addr);
383 pmd_t *pmd = pmd_offset(pud, addr);
385 return pmd;
388 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
390 return &bm_pte[pte_index(addr)];
393 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
395 void __init early_ioremap_init(void)
397 pmd_t *pmd;
398 int i;
400 if (early_ioremap_debug)
401 printk(KERN_INFO "early_ioremap_init()\n");
403 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
404 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
406 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
407 memset(bm_pte, 0, sizeof(bm_pte));
408 pmd_populate_kernel(&init_mm, pmd, bm_pte);
411 * The boot-ioremap range spans multiple pmds, for which
412 * we are not prepared:
414 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
415 WARN_ON(1);
416 printk(KERN_WARNING "pmd %p != %p\n",
417 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
418 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
419 fix_to_virt(FIX_BTMAP_BEGIN));
420 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
421 fix_to_virt(FIX_BTMAP_END));
423 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
424 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
425 FIX_BTMAP_BEGIN);
429 void __init early_ioremap_reset(void)
431 after_paging_init = 1;
434 static void __init __early_set_fixmap(enum fixed_addresses idx,
435 phys_addr_t phys, pgprot_t flags)
437 unsigned long addr = __fix_to_virt(idx);
438 pte_t *pte;
440 if (idx >= __end_of_fixed_addresses) {
441 BUG();
442 return;
444 pte = early_ioremap_pte(addr);
446 if (pgprot_val(flags))
447 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
448 else
449 pte_clear(&init_mm, addr, pte);
450 __flush_tlb_one(addr);
453 static inline void __init early_set_fixmap(enum fixed_addresses idx,
454 phys_addr_t phys, pgprot_t prot)
456 if (after_paging_init)
457 __set_fixmap(idx, phys, prot);
458 else
459 __early_set_fixmap(idx, phys, prot);
462 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
464 if (after_paging_init)
465 clear_fixmap(idx);
466 else
467 __early_set_fixmap(idx, 0, __pgprot(0));
470 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
471 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
473 static int __init check_early_ioremap_leak(void)
475 int count = 0;
476 int i;
478 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
479 if (prev_map[i])
480 count++;
482 if (!count)
483 return 0;
484 WARN(1, KERN_WARNING
485 "Debug warning: early ioremap leak of %d areas detected.\n",
486 count);
487 printk(KERN_WARNING
488 "please boot with early_ioremap_debug and report the dmesg.\n");
490 return 1;
492 late_initcall(check_early_ioremap_leak);
494 static void __init __iomem *
495 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
497 unsigned long offset;
498 resource_size_t last_addr;
499 unsigned int nrpages;
500 enum fixed_addresses idx0, idx;
501 int i, slot;
503 WARN_ON(system_state != SYSTEM_BOOTING);
505 slot = -1;
506 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
507 if (!prev_map[i]) {
508 slot = i;
509 break;
513 if (slot < 0) {
514 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
515 (u64)phys_addr, size);
516 WARN_ON(1);
517 return NULL;
520 if (early_ioremap_debug) {
521 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
522 (u64)phys_addr, size, slot);
523 dump_stack();
526 /* Don't allow wraparound or zero size */
527 last_addr = phys_addr + size - 1;
528 if (!size || last_addr < phys_addr) {
529 WARN_ON(1);
530 return NULL;
533 prev_size[slot] = size;
535 * Mappings have to be page-aligned
537 offset = phys_addr & ~PAGE_MASK;
538 phys_addr &= PAGE_MASK;
539 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
542 * Mappings have to fit in the FIX_BTMAP area.
544 nrpages = size >> PAGE_SHIFT;
545 if (nrpages > NR_FIX_BTMAPS) {
546 WARN_ON(1);
547 return NULL;
551 * Ok, go for it..
553 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
554 idx = idx0;
555 while (nrpages > 0) {
556 early_set_fixmap(idx, phys_addr, prot);
557 phys_addr += PAGE_SIZE;
558 --idx;
559 --nrpages;
561 if (early_ioremap_debug)
562 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
564 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
565 return prev_map[slot];
568 /* Remap an IO device */
569 void __init __iomem *
570 early_ioremap(resource_size_t phys_addr, unsigned long size)
572 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
575 /* Remap memory */
576 void __init __iomem *
577 early_memremap(resource_size_t phys_addr, unsigned long size)
579 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
582 void __init early_iounmap(void __iomem *addr, unsigned long size)
584 unsigned long virt_addr;
585 unsigned long offset;
586 unsigned int nrpages;
587 enum fixed_addresses idx;
588 int i, slot;
590 slot = -1;
591 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
592 if (prev_map[i] == addr) {
593 slot = i;
594 break;
598 if (slot < 0) {
599 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
600 addr, size);
601 WARN_ON(1);
602 return;
605 if (prev_size[slot] != size) {
606 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
607 addr, size, slot, prev_size[slot]);
608 WARN_ON(1);
609 return;
612 if (early_ioremap_debug) {
613 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
614 size, slot);
615 dump_stack();
618 virt_addr = (unsigned long)addr;
619 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
620 WARN_ON(1);
621 return;
623 offset = virt_addr & ~PAGE_MASK;
624 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
626 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
627 while (nrpages > 0) {
628 early_clear_fixmap(idx);
629 --idx;
630 --nrpages;
632 prev_map[slot] = NULL;