2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 static inline void remap_area_pte(pte_t
* pte
, unsigned long address
,
20 phys_t size
, phys_t phys_addr
, unsigned long flags
)
24 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| __READABLE
25 | __WRITEABLE
| flags
);
31 BUG_ON(address
>= end
);
32 pfn
= phys_addr
>> PAGE_SHIFT
;
34 if (!pte_none(*pte
)) {
35 printk("remap_area_pte: page already exists\n");
38 set_pte(pte
, pfn_pte(pfn
, pgprot
));
42 } while (address
&& (address
< end
));
45 static inline int remap_area_pmd(pmd_t
* pmd
, unsigned long address
,
46 phys_t size
, phys_t phys_addr
, unsigned long flags
)
50 address
&= ~PGDIR_MASK
;
55 BUG_ON(address
>= end
);
57 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
60 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
61 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
63 } while (address
&& (address
< end
));
67 static int remap_area_pages(unsigned long address
, phys_t phys_addr
,
68 phys_t size
, unsigned long flags
)
72 unsigned long end
= address
+ size
;
75 dir
= pgd_offset(&init_mm
, address
);
77 BUG_ON(address
>= end
);
83 pud
= pud_alloc(&init_mm
, dir
, address
);
86 pmd
= pmd_alloc(&init_mm
, pud
, address
);
89 if (remap_area_pmd(pmd
, address
, end
- address
,
90 phys_addr
+ address
, flags
))
93 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
95 } while (address
&& (address
< end
));
101 * Generic mapping function (not visible outside):
105 * Remap an arbitrary physical address space into the kernel virtual
106 * address space. Needed when the kernel wants to access high addresses
109 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
110 * have to convert them into an offset in a page-aligned mapping, but the
111 * caller shouldn't need to know that small detail.
114 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
116 void __iomem
* __ioremap(phys_t phys_addr
, phys_t size
, unsigned long flags
)
118 struct vm_struct
* area
;
119 unsigned long offset
;
123 phys_addr
= fixup_bigphys_addr(phys_addr
, size
);
125 /* Don't allow wraparound or zero size */
126 last_addr
= phys_addr
+ size
- 1;
127 if (!size
|| last_addr
< phys_addr
)
131 * Map uncached objects in the low 512mb of address space using KSEG1,
132 * otherwise map using page tables.
134 if (IS_LOW512(phys_addr
) && IS_LOW512(last_addr
) &&
135 flags
== _CACHE_UNCACHED
)
136 return (void __iomem
*) CKSEG1ADDR(phys_addr
);
139 * Don't allow anybody to remap normal RAM that we're using..
141 if (phys_addr
< virt_to_phys(high_memory
)) {
142 char *t_addr
, *t_end
;
145 t_addr
= __va(phys_addr
);
146 t_end
= t_addr
+ (size
- 1);
148 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
149 if(!PageReserved(page
))
154 * Mappings have to be page-aligned
156 offset
= phys_addr
& ~PAGE_MASK
;
157 phys_addr
&= PAGE_MASK
;
158 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
163 area
= get_vm_area(size
, VM_IOREMAP
);
167 if (remap_area_pages((unsigned long) addr
, phys_addr
, size
, flags
)) {
172 return (void __iomem
*) (offset
+ (char *)addr
);
175 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
177 void __iounmap(const volatile void __iomem
*addr
)
184 p
= remove_vm_area((void *) (PAGE_MASK
& (unsigned long __force
) addr
));
186 printk(KERN_ERR
"iounmap: bad address %p\n", addr
);
191 EXPORT_SYMBOL(__ioremap
);
192 EXPORT_SYMBOL(__iounmap
);