4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2005, 2006 Paul Mundt
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
18 #include <linux/pci.h>
21 #include <asm/pgalloc.h>
22 #include <asm/addrspace.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
,
27 unsigned long size
, unsigned long phys_addr
, unsigned long flags
)
31 pgprot_t pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
|
32 _PAGE_DIRTY
| _PAGE_ACCESSED
|
33 _PAGE_HW_SHARED
| _PAGE_FLAGS_HARD
| flags
);
41 pfn
= phys_addr
>> PAGE_SHIFT
;
43 if (!pte_none(*pte
)) {
44 printk("remap_area_pte: page already exists\n");
47 set_pte(pte
, pfn_pte(pfn
, pgprot
));
51 } while (address
&& (address
< end
));
54 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
,
55 unsigned long size
, unsigned long phys_addr
, unsigned long flags
)
59 address
&= ~PGDIR_MASK
;
67 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
70 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
71 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
73 } while (address
&& (address
< end
));
77 int remap_area_pages(unsigned long address
, unsigned long phys_addr
,
78 unsigned long size
, unsigned long flags
)
82 unsigned long end
= address
+ size
;
85 dir
= pgd_offset_k(address
);
95 pud
= pud_alloc(&init_mm
, dir
, address
);
98 pmd
= pmd_alloc(&init_mm
, pud
, address
);
101 if (remap_area_pmd(pmd
, address
, end
- address
,
102 phys_addr
+ address
, flags
))
105 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
107 } while (address
&& (address
< end
));
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
121 void __iomem
*__ioremap(unsigned long phys_addr
, unsigned long size
,
124 struct vm_struct
* area
;
125 unsigned long offset
, last_addr
, addr
, orig_addr
;
127 /* Don't allow wraparound or zero size */
128 last_addr
= phys_addr
+ size
- 1;
129 if (!size
|| last_addr
< phys_addr
)
133 * Don't remap the low PCI/ISA area, it's always mapped..
135 if (phys_addr
>= 0xA0000 && last_addr
< 0x100000)
136 return (void __iomem
*)phys_to_virt(phys_addr
);
139 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
140 * mapped at the end of the address space (typically 0xfd000000)
141 * in a non-translatable area, so mapping through page tables for
142 * this area is not only pointless, but also fundamentally
143 * broken. Just return the physical address instead.
145 * For boards that map a small PCI memory aperture somewhere in
146 * P1/P2 space, ioremap() will already do the right thing,
147 * and we'll never get this far.
149 if (is_pci_memaddr(phys_addr
) && is_pci_memaddr(last_addr
))
150 return (void __iomem
*)phys_addr
;
153 * Don't allow anybody to remap normal RAM that we're using..
155 if (phys_addr
< virt_to_phys(high_memory
))
159 * Mappings have to be page-aligned
161 offset
= phys_addr
& ~PAGE_MASK
;
162 phys_addr
&= PAGE_MASK
;
163 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
168 area
= get_vm_area(size
, VM_IOREMAP
);
171 area
->phys_addr
= phys_addr
;
172 orig_addr
= addr
= (unsigned long)area
->addr
;
176 * First try to remap through the PMB once a valid VMA has been
177 * established. Smaller allocations (or the rest of the size
178 * remaining after a PMB mapping due to the size not being
179 * perfectly aligned on a PMB size boundary) are then mapped
180 * through the UTLB using conventional page tables.
182 * PMB entries are all pre-faulted.
184 if (unlikely(size
>= 0x1000000)) {
185 unsigned long mapped
= pmb_remap(addr
, phys_addr
, size
, flags
);
187 if (likely(mapped
)) {
196 if (remap_area_pages(addr
, phys_addr
, size
, flags
)) {
197 vunmap((void *)orig_addr
);
201 return (void __iomem
*)(offset
+ (char *)orig_addr
);
203 EXPORT_SYMBOL(__ioremap
);
205 void __iounmap(void __iomem
*addr
)
207 unsigned long vaddr
= (unsigned long __force
)addr
;
210 if (PXSEG(vaddr
) < P3SEG
|| is_pci_memaddr(vaddr
))
215 * Purge any PMB entries that may have been established for this
216 * mapping, then proceed with conventional VMA teardown.
218 * XXX: Note that due to the way that remove_vm_area() does
219 * matching of the resultant VMA, we aren't able to fast-forward
220 * the address past the PMB space until the end of the VMA where
221 * the page tables reside. As such, unmap_vm_area() will be
222 * forced to linearly scan over the area until it finds the page
223 * tables where PTEs that need to be unmapped actually reside,
224 * which is far from optimal. Perhaps we need to use a separate
225 * VMA for the PMB mappings?
231 p
= remove_vm_area((void *)(vaddr
& PAGE_MASK
));
233 printk(KERN_ERR
"%s: bad address %p\n", __FUNCTION__
, addr
);
239 EXPORT_SYMBOL(__iounmap
);