[PATCH] Generic ioremap_page_range: sh conversion
[linux-2.6/kvm.git] / arch / sh / mm / ioremap.c
blob90b494a0cf45b5f78c1c98c5d8b9c7b28c8868ac
1 /*
2 * arch/sh/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2005, 2006 Paul Mundt
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/pci.h>
19 #include <linux/io.h>
20 #include <asm/page.h>
21 #include <asm/pgalloc.h>
22 #include <asm/addrspace.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
27 * Remap an arbitrary physical address space into the kernel virtual
28 * address space. Needed when the kernel wants to access high addresses
29 * directly.
31 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
32 * have to convert them into an offset in a page-aligned mapping, but the
33 * caller shouldn't need to know that small detail.
35 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
36 unsigned long flags)
38 struct vm_struct * area;
39 unsigned long offset, last_addr, addr, orig_addr;
40 pgprot_t pgprot;
42 /* Don't allow wraparound or zero size */
43 last_addr = phys_addr + size - 1;
44 if (!size || last_addr < phys_addr)
45 return NULL;
48 * Don't remap the low PCI/ISA area, it's always mapped..
50 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
51 return (void __iomem *)phys_to_virt(phys_addr);
54 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
55 * mapped at the end of the address space (typically 0xfd000000)
56 * in a non-translatable area, so mapping through page tables for
57 * this area is not only pointless, but also fundamentally
58 * broken. Just return the physical address instead.
60 * For boards that map a small PCI memory aperture somewhere in
61 * P1/P2 space, ioremap() will already do the right thing,
62 * and we'll never get this far.
64 if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
65 return (void __iomem *)phys_addr;
68 * Don't allow anybody to remap normal RAM that we're using..
70 if (phys_addr < virt_to_phys(high_memory))
71 return NULL;
74 * Mappings have to be page-aligned
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
81 * Ok, go for it..
83 area = get_vm_area(size, VM_IOREMAP);
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 orig_addr = addr = (unsigned long)area->addr;
89 #ifdef CONFIG_32BIT
91 * First try to remap through the PMB once a valid VMA has been
92 * established. Smaller allocations (or the rest of the size
93 * remaining after a PMB mapping due to the size not being
94 * perfectly aligned on a PMB size boundary) are then mapped
95 * through the UTLB using conventional page tables.
97 * PMB entries are all pre-faulted.
99 if (unlikely(size >= 0x1000000)) {
100 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
102 if (likely(mapped)) {
103 addr += mapped;
104 phys_addr += mapped;
105 size -= mapped;
108 #endif
110 pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
111 if (likely(size))
112 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
113 vunmap((void *)orig_addr);
114 return NULL;
117 return (void __iomem *)(offset + (char *)orig_addr);
119 EXPORT_SYMBOL(__ioremap);
121 void __iounmap(void __iomem *addr)
123 unsigned long vaddr = (unsigned long __force)addr;
124 struct vm_struct *p;
126 if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
127 return;
129 #ifdef CONFIG_32BIT
131 * Purge any PMB entries that may have been established for this
132 * mapping, then proceed with conventional VMA teardown.
134 * XXX: Note that due to the way that remove_vm_area() does
135 * matching of the resultant VMA, we aren't able to fast-forward
136 * the address past the PMB space until the end of the VMA where
137 * the page tables reside. As such, unmap_vm_area() will be
138 * forced to linearly scan over the area until it finds the page
139 * tables where PTEs that need to be unmapped actually reside,
140 * which is far from optimal. Perhaps we need to use a separate
141 * VMA for the PMB mappings?
142 * -- PFM.
144 pmb_unmap(vaddr);
145 #endif
147 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
148 if (!p) {
149 printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
150 return;
153 kfree(p);
155 EXPORT_SYMBOL(__iounmap);