2 * linux/arch/m32r/mm/ioremap.c
4 * Copyright (c) 2001, 2002 Hiroyuki Kondo
6 * Taken from mips version.
7 * (C) Copyright 1995 1996 Linus Torvalds
8 * (C) Copyright 2001 Ralf Baechle
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
18 #include <linux/module.h>
19 #include <asm/addrspace.h>
20 #include <asm/byteorder.h>
22 #include <linux/vmalloc.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
29 remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
30 unsigned long phys_addr
, unsigned long flags
)
34 pgprot_t pgprot
= __pgprot(_PAGE_GLOBAL
| _PAGE_PRESENT
| _PAGE_READ
35 | _PAGE_WRITE
| flags
);
43 pfn
= phys_addr
>> PAGE_SHIFT
;
45 if (!pte_none(*pte
)) {
46 printk("remap_area_pte: page already exists\n");
49 set_pte(pte
, pfn_pte(pfn
, pgprot
));
53 } while (address
&& (address
< end
));
57 remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
58 unsigned long phys_addr
, unsigned long flags
)
62 address
&= ~PGDIR_MASK
;
70 pte_t
* pte
= pte_alloc_kernel(&init_mm
, pmd
, address
);
73 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, flags
);
74 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
76 } while (address
&& (address
< end
));
81 remap_area_pages(unsigned long address
, unsigned long phys_addr
,
82 unsigned long size
, unsigned long flags
)
86 unsigned long end
= address
+ size
;
89 dir
= pgd_offset(&init_mm
, address
);
93 spin_lock(&init_mm
.page_table_lock
);
96 pmd
= pmd_alloc(&init_mm
, dir
, address
);
100 if (remap_area_pmd(pmd
, address
, end
- address
,
101 phys_addr
+ address
, flags
))
104 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
106 } while (address
&& (address
< end
));
107 spin_unlock(&init_mm
.page_table_lock
);
113 * Generic mapping function (not visible outside):
117 * Remap an arbitrary physical address space into the kernel virtual
118 * address space. Needed when the kernel wants to access high addresses
121 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
122 * have to convert them into an offset in a page-aligned mapping, but the
123 * caller shouldn't need to know that small detail.
126 #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL))
129 __ioremap(unsigned long phys_addr
, unsigned long size
, unsigned long flags
)
132 struct vm_struct
* area
;
133 unsigned long offset
, last_addr
;
135 /* Don't allow wraparound or zero size */
136 last_addr
= phys_addr
+ size
- 1;
137 if (!size
|| last_addr
< phys_addr
)
141 * Map objects in the low 512mb of address space using KSEG1, otherwise
142 * map using page tables.
144 if (IS_LOW512(phys_addr
) && IS_LOW512(phys_addr
+ size
- 1))
145 return (void *) KSEG1ADDR(phys_addr
);
148 * Don't allow anybody to remap normal RAM that we're using..
150 if (phys_addr
< virt_to_phys(high_memory
)) {
151 char *t_addr
, *t_end
;
154 t_addr
= __va(phys_addr
);
155 t_end
= t_addr
+ (size
- 1);
157 for(page
= virt_to_page(t_addr
); page
<= virt_to_page(t_end
); page
++)
158 if(!PageReserved(page
))
163 * Mappings have to be page-aligned
165 offset
= phys_addr
& ~PAGE_MASK
;
166 phys_addr
&= PAGE_MASK
;
167 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
172 area
= get_vm_area(size
, VM_IOREMAP
);
175 area
->phys_addr
= phys_addr
;
176 addr
= (void __iomem
*) area
->addr
;
177 if (remap_area_pages((unsigned long)addr
, phys_addr
, size
, flags
)) {
178 vunmap((void __force
*) addr
);
182 return (void __iomem
*) (offset
+ (char __iomem
*)addr
);
185 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
187 void iounmap(volatile void __iomem
*addr
)
190 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));