x86: don't define __this_fixmap_does_not_exist()
[firewire-audio.git] / arch / x86 / mm / ioremap.c
blob96786ef2c9a9d4247942cfb6b0f71014e7d5497c
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
25 #ifdef CONFIG_X86_64
27 static inline int phys_addr_valid(unsigned long addr)
29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
32 unsigned long __phys_addr(unsigned long x)
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(!phys_addr_valid(x));
43 return x;
45 EXPORT_SYMBOL(__phys_addr);
47 bool __virt_addr_valid(unsigned long x)
49 if (x >= __START_KERNEL_map) {
50 x -= __START_KERNEL_map;
51 if (x >= KERNEL_IMAGE_SIZE)
52 return false;
53 x += phys_base;
54 } else {
55 if (x < PAGE_OFFSET)
56 return false;
57 x -= PAGE_OFFSET;
58 if (!phys_addr_valid(x))
59 return false;
62 return pfn_valid(x >> PAGE_SHIFT);
64 EXPORT_SYMBOL(__virt_addr_valid);
66 #else
68 static inline int phys_addr_valid(unsigned long addr)
70 return 1;
73 #ifdef CONFIG_DEBUG_VIRTUAL
74 unsigned long __phys_addr(unsigned long x)
76 /* VMALLOC_* aren't constants */
77 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
78 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
79 return x - PAGE_OFFSET;
81 EXPORT_SYMBOL(__phys_addr);
82 #endif
84 bool __virt_addr_valid(unsigned long x)
86 if (x < PAGE_OFFSET)
87 return false;
88 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
89 return false;
90 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92 EXPORT_SYMBOL(__virt_addr_valid);
94 #endif
96 int page_is_ram(unsigned long pagenr)
98 resource_size_t addr, end;
99 int i;
102 * A special case is the first 4Kb of memory;
103 * This is a BIOS owned area, not kernel ram, but generally
104 * not listed as such in the E820 table.
106 if (pagenr == 0)
107 return 0;
110 * Second special case: Some BIOSen report the PC BIOS
111 * area (640->1Mb) as ram even though it is not.
113 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
114 pagenr < (BIOS_END >> PAGE_SHIFT))
115 return 0;
117 for (i = 0; i < e820.nr_map; i++) {
119 * Not usable memory:
121 if (e820.map[i].type != E820_RAM)
122 continue;
123 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
124 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
127 if ((pagenr >= addr) && (pagenr < end))
128 return 1;
130 return 0;
134 * Fix up the linear direct mapping of the kernel to avoid cache attribute
135 * conflicts.
137 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
138 unsigned long prot_val)
140 unsigned long nrpages = size >> PAGE_SHIFT;
141 int err;
143 switch (prot_val) {
144 case _PAGE_CACHE_UC:
145 default:
146 err = _set_memory_uc(vaddr, nrpages);
147 break;
148 case _PAGE_CACHE_WC:
149 err = _set_memory_wc(vaddr, nrpages);
150 break;
151 case _PAGE_CACHE_WB:
152 err = _set_memory_wb(vaddr, nrpages);
153 break;
156 return err;
160 * Remap an arbitrary physical address space into the kernel virtual
161 * address space. Needed when the kernel wants to access high addresses
162 * directly.
164 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
165 * have to convert them into an offset in a page-aligned mapping, but the
166 * caller shouldn't need to know that small detail.
168 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
169 unsigned long size, unsigned long prot_val, void *caller)
171 unsigned long pfn, offset, vaddr;
172 resource_size_t last_addr;
173 const resource_size_t unaligned_phys_addr = phys_addr;
174 const unsigned long unaligned_size = size;
175 struct vm_struct *area;
176 unsigned long new_prot_val;
177 pgprot_t prot;
178 int retval;
179 void __iomem *ret_addr;
181 /* Don't allow wraparound or zero size */
182 last_addr = phys_addr + size - 1;
183 if (!size || last_addr < phys_addr)
184 return NULL;
186 if (!phys_addr_valid(phys_addr)) {
187 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
188 (unsigned long long)phys_addr);
189 WARN_ON_ONCE(1);
190 return NULL;
194 * Don't remap the low PCI/ISA area, it's always mapped..
196 if (is_ISA_range(phys_addr, last_addr))
197 return (__force void __iomem *)phys_to_virt(phys_addr);
200 * Check if the request spans more than any BAR in the iomem resource
201 * tree.
203 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
204 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
207 * Don't allow anybody to remap normal RAM that we're using..
209 for (pfn = phys_addr >> PAGE_SHIFT;
210 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
211 pfn++) {
213 int is_ram = page_is_ram(pfn);
215 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
216 return NULL;
217 WARN_ON_ONCE(is_ram);
221 * Mappings have to be page-aligned
223 offset = phys_addr & ~PAGE_MASK;
224 phys_addr &= PAGE_MASK;
225 size = PAGE_ALIGN(last_addr+1) - phys_addr;
227 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
228 prot_val, &new_prot_val);
229 if (retval) {
230 pr_debug("Warning: reserve_memtype returned %d\n", retval);
231 return NULL;
234 if (prot_val != new_prot_val) {
236 * Do not fallback to certain memory types with certain
237 * requested type:
238 * - request is uc-, return cannot be write-back
239 * - request is uc-, return cannot be write-combine
240 * - request is write-combine, return cannot be write-back
242 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
243 (new_prot_val == _PAGE_CACHE_WB ||
244 new_prot_val == _PAGE_CACHE_WC)) ||
245 (prot_val == _PAGE_CACHE_WC &&
246 new_prot_val == _PAGE_CACHE_WB)) {
247 pr_debug(
248 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
249 (unsigned long long)phys_addr,
250 (unsigned long long)(phys_addr + size),
251 prot_val, new_prot_val);
252 free_memtype(phys_addr, phys_addr + size);
253 return NULL;
255 prot_val = new_prot_val;
258 switch (prot_val) {
259 case _PAGE_CACHE_UC:
260 default:
261 prot = PAGE_KERNEL_IO_NOCACHE;
262 break;
263 case _PAGE_CACHE_UC_MINUS:
264 prot = PAGE_KERNEL_IO_UC_MINUS;
265 break;
266 case _PAGE_CACHE_WC:
267 prot = PAGE_KERNEL_IO_WC;
268 break;
269 case _PAGE_CACHE_WB:
270 prot = PAGE_KERNEL_IO;
271 break;
275 * Ok, go for it..
277 area = get_vm_area_caller(size, VM_IOREMAP, caller);
278 if (!area)
279 return NULL;
280 area->phys_addr = phys_addr;
281 vaddr = (unsigned long) area->addr;
282 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
283 free_memtype(phys_addr, phys_addr + size);
284 free_vm_area(area);
285 return NULL;
288 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
289 free_memtype(phys_addr, phys_addr + size);
290 vunmap(area->addr);
291 return NULL;
294 ret_addr = (void __iomem *) (vaddr + offset);
295 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
297 return ret_addr;
301 * ioremap_nocache - map bus memory into CPU space
302 * @offset: bus address of the memory
303 * @size: size of the resource to map
305 * ioremap_nocache performs a platform specific sequence of operations to
306 * make bus memory CPU accessible via the readb/readw/readl/writeb/
307 * writew/writel functions and the other mmio helpers. The returned
308 * address is not guaranteed to be usable directly as a virtual
309 * address.
311 * This version of ioremap ensures that the memory is marked uncachable
312 * on the CPU as well as honouring existing caching rules from things like
313 * the PCI bus. Note that there are other caches and buffers on many
314 * busses. In particular driver authors should read up on PCI writes
316 * It's useful if some control registers are in such an area and
317 * write combining or read caching is not desirable:
319 * Must be freed with iounmap.
321 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
324 * Ideally, this should be:
325 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
327 * Till we fix all X drivers to use ioremap_wc(), we will use
328 * UC MINUS.
330 unsigned long val = _PAGE_CACHE_UC_MINUS;
332 return __ioremap_caller(phys_addr, size, val,
333 __builtin_return_address(0));
335 EXPORT_SYMBOL(ioremap_nocache);
338 * ioremap_wc - map memory into CPU space write combined
339 * @offset: bus address of the memory
340 * @size: size of the resource to map
342 * This version of ioremap ensures that the memory is marked write combining.
343 * Write combining allows faster writes to some hardware devices.
345 * Must be freed with iounmap.
347 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
349 if (pat_enabled)
350 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
351 __builtin_return_address(0));
352 else
353 return ioremap_nocache(phys_addr, size);
355 EXPORT_SYMBOL(ioremap_wc);
357 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
359 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
360 __builtin_return_address(0));
362 EXPORT_SYMBOL(ioremap_cache);
364 static void __iomem *ioremap_default(resource_size_t phys_addr,
365 unsigned long size)
367 unsigned long flags;
368 void __iomem *ret;
369 int err;
372 * - WB for WB-able memory and no other conflicting mappings
373 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
374 * - Inherit from confliting mappings otherwise
376 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
377 if (err < 0)
378 return NULL;
380 ret = __ioremap_caller(phys_addr, size, flags,
381 __builtin_return_address(0));
383 free_memtype(phys_addr, phys_addr + size);
384 return ret;
387 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
388 unsigned long prot_val)
390 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
391 __builtin_return_address(0));
393 EXPORT_SYMBOL(ioremap_prot);
396 * iounmap - Free a IO remapping
397 * @addr: virtual address from ioremap_*
399 * Caller must ensure there is only one unmapping for the same pointer.
401 void iounmap(volatile void __iomem *addr)
403 struct vm_struct *p, *o;
405 if ((void __force *)addr <= high_memory)
406 return;
409 * __ioremap special-cases the PCI/ISA range by not instantiating a
410 * vm_area and by simply returning an address into the kernel mapping
411 * of ISA space. So handle that here.
413 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
414 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
415 return;
417 addr = (volatile void __iomem *)
418 (PAGE_MASK & (unsigned long __force)addr);
420 mmiotrace_iounmap(addr);
422 /* Use the vm area unlocked, assuming the caller
423 ensures there isn't another iounmap for the same address
424 in parallel. Reuse of the virtual address is prevented by
425 leaving it in the global lists until we're done with it.
426 cpa takes care of the direct mappings. */
427 read_lock(&vmlist_lock);
428 for (p = vmlist; p; p = p->next) {
429 if (p->addr == (void __force *)addr)
430 break;
432 read_unlock(&vmlist_lock);
434 if (!p) {
435 printk(KERN_ERR "iounmap: bad address %p\n", addr);
436 dump_stack();
437 return;
440 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
442 /* Finally remove it */
443 o = remove_vm_area((void __force *)addr);
444 BUG_ON(p != o || o == NULL);
445 kfree(p);
447 EXPORT_SYMBOL(iounmap);
450 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
451 * access
453 void *xlate_dev_mem_ptr(unsigned long phys)
455 void *addr;
456 unsigned long start = phys & PAGE_MASK;
458 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
459 if (page_is_ram(start >> PAGE_SHIFT))
460 return __va(phys);
462 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
463 if (addr)
464 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
466 return addr;
469 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
471 if (page_is_ram(phys >> PAGE_SHIFT))
472 return;
474 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
475 return;
478 static int __initdata early_ioremap_debug;
480 static int __init early_ioremap_debug_setup(char *str)
482 early_ioremap_debug = 1;
484 return 0;
486 early_param("early_ioremap_debug", early_ioremap_debug_setup);
488 static __initdata int after_paging_init;
489 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
491 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
493 /* Don't assume we're using swapper_pg_dir at this point */
494 pgd_t *base = __va(read_cr3());
495 pgd_t *pgd = &base[pgd_index(addr)];
496 pud_t *pud = pud_offset(pgd, addr);
497 pmd_t *pmd = pmd_offset(pud, addr);
499 return pmd;
502 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
504 return &bm_pte[pte_index(addr)];
507 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
509 void __init early_ioremap_init(void)
511 pmd_t *pmd;
512 int i;
514 if (early_ioremap_debug)
515 printk(KERN_INFO "early_ioremap_init()\n");
517 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
518 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
520 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
521 memset(bm_pte, 0, sizeof(bm_pte));
522 pmd_populate_kernel(&init_mm, pmd, bm_pte);
525 * The boot-ioremap range spans multiple pmds, for which
526 * we are not prepared:
528 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
529 WARN_ON(1);
530 printk(KERN_WARNING "pmd %p != %p\n",
531 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
532 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
533 fix_to_virt(FIX_BTMAP_BEGIN));
534 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
535 fix_to_virt(FIX_BTMAP_END));
537 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
538 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
539 FIX_BTMAP_BEGIN);
543 void __init early_ioremap_reset(void)
545 after_paging_init = 1;
548 static void __init __early_set_fixmap(enum fixed_addresses idx,
549 unsigned long phys, pgprot_t flags)
551 unsigned long addr = __fix_to_virt(idx);
552 pte_t *pte;
554 if (idx >= __end_of_fixed_addresses) {
555 BUG();
556 return;
558 pte = early_ioremap_pte(addr);
560 if (pgprot_val(flags))
561 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
562 else
563 pte_clear(&init_mm, addr, pte);
564 __flush_tlb_one(addr);
567 static inline void __init early_set_fixmap(enum fixed_addresses idx,
568 unsigned long phys, pgprot_t prot)
570 if (after_paging_init)
571 __set_fixmap(idx, phys, prot);
572 else
573 __early_set_fixmap(idx, phys, prot);
576 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
578 if (after_paging_init)
579 clear_fixmap(idx);
580 else
581 __early_set_fixmap(idx, 0, __pgprot(0));
584 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
585 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
587 static int __init check_early_ioremap_leak(void)
589 int count = 0;
590 int i;
592 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
593 if (prev_map[i])
594 count++;
596 if (!count)
597 return 0;
598 WARN(1, KERN_WARNING
599 "Debug warning: early ioremap leak of %d areas detected.\n",
600 count);
601 printk(KERN_WARNING
602 "please boot with early_ioremap_debug and report the dmesg.\n");
604 return 1;
606 late_initcall(check_early_ioremap_leak);
608 static void __init __iomem *
609 __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
611 unsigned long offset, last_addr;
612 unsigned int nrpages;
613 enum fixed_addresses idx0, idx;
614 int i, slot;
616 WARN_ON(system_state != SYSTEM_BOOTING);
618 slot = -1;
619 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
620 if (!prev_map[i]) {
621 slot = i;
622 break;
626 if (slot < 0) {
627 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
628 phys_addr, size);
629 WARN_ON(1);
630 return NULL;
633 if (early_ioremap_debug) {
634 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
635 phys_addr, size, slot);
636 dump_stack();
639 /* Don't allow wraparound or zero size */
640 last_addr = phys_addr + size - 1;
641 if (!size || last_addr < phys_addr) {
642 WARN_ON(1);
643 return NULL;
646 prev_size[slot] = size;
648 * Mappings have to be page-aligned
650 offset = phys_addr & ~PAGE_MASK;
651 phys_addr &= PAGE_MASK;
652 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
655 * Mappings have to fit in the FIX_BTMAP area.
657 nrpages = size >> PAGE_SHIFT;
658 if (nrpages > NR_FIX_BTMAPS) {
659 WARN_ON(1);
660 return NULL;
664 * Ok, go for it..
666 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
667 idx = idx0;
668 while (nrpages > 0) {
669 early_set_fixmap(idx, phys_addr, prot);
670 phys_addr += PAGE_SIZE;
671 --idx;
672 --nrpages;
674 if (early_ioremap_debug)
675 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
677 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
678 return prev_map[slot];
681 /* Remap an IO device */
682 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
684 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
687 /* Remap memory */
688 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
690 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
693 void __init early_iounmap(void __iomem *addr, unsigned long size)
695 unsigned long virt_addr;
696 unsigned long offset;
697 unsigned int nrpages;
698 enum fixed_addresses idx;
699 int i, slot;
701 slot = -1;
702 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
703 if (prev_map[i] == addr) {
704 slot = i;
705 break;
709 if (slot < 0) {
710 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
711 addr, size);
712 WARN_ON(1);
713 return;
716 if (prev_size[slot] != size) {
717 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
718 addr, size, slot, prev_size[slot]);
719 WARN_ON(1);
720 return;
723 if (early_ioremap_debug) {
724 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
725 size, slot);
726 dump_stack();
729 virt_addr = (unsigned long)addr;
730 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
731 WARN_ON(1);
732 return;
734 offset = virt_addr & ~PAGE_MASK;
735 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
737 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
738 while (nrpages > 0) {
739 early_clear_fixmap(idx);
740 --idx;
741 --nrpages;
743 prev_map[slot] = NULL;