x86: rename the struct pt_regs members for 32/64-bit consistency
[linux-2.6/x86.git] / arch / x86 / mm / ioremap_32.c
blob0b278315d73796e1707b94fdaa1bbba35a088881
1 /*
2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
25 * Generic mapping function (not visible outside):
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
37 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
42 pgprot_t prot;
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
50 * Don't remap the low PCI/ISA area, it's always mapped..
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
56 * Don't allow anybody to remap normal RAM that we're using..
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
70 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71 | _PAGE_ACCESSED | flags);
74 * Mappings have to be page-aligned
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
81 * Ok, go for it..
83 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 addr = (void __iomem *) area->addr;
88 if (ioremap_page_range((unsigned long) addr,
89 (unsigned long) addr + size, phys_addr, prot)) {
90 vunmap((void __force *) addr);
91 return NULL;
93 return (void __iomem *) (offset + (char __iomem *)addr);
95 EXPORT_SYMBOL(__ioremap);
97 /**
98 * ioremap_nocache - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
102 * ioremap_nocache performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
108 * This version of ioremap ensures that the memory is marked uncachable
109 * on the CPU as well as honouring existing caching rules from things like
110 * the PCI bus. Note that there are other caches and buffers on many
111 * busses. In particular driver authors should read up on PCI writes
113 * It's useful if some control registers are in such an area and
114 * write combining or read caching is not desirable:
116 * Must be freed with iounmap.
119 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
121 unsigned long last_addr;
122 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
123 if (!p)
124 return p;
126 /* Guaranteed to be > phys_addr, as per __ioremap() */
127 last_addr = phys_addr + size - 1;
129 if (last_addr < virt_to_phys(high_memory) - 1) {
130 struct page *ppage = virt_to_page(__va(phys_addr));
131 unsigned long npages;
133 phys_addr &= PAGE_MASK;
135 /* This might overflow and become zero.. */
136 last_addr = PAGE_ALIGN(last_addr);
138 /* .. but that's ok, because modulo-2**n arithmetic will make
139 * the page-aligned "last - first" come out right.
141 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
143 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144 iounmap(p);
145 p = NULL;
147 global_flush_tlb();
150 return p;
152 EXPORT_SYMBOL(ioremap_nocache);
155 * iounmap - Free a IO remapping
156 * @addr: virtual address from ioremap_*
158 * Caller must ensure there is only one unmapping for the same pointer.
160 void iounmap(volatile void __iomem *addr)
162 struct vm_struct *p, *o;
164 if ((void __force *)addr <= high_memory)
165 return;
168 * __ioremap special-cases the PCI/ISA range by not instantiating a
169 * vm_area and by simply returning an address into the kernel mapping
170 * of ISA space. So handle that here.
172 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173 addr < phys_to_virt(ISA_END_ADDRESS))
174 return;
176 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
178 /* Use the vm area unlocked, assuming the caller
179 ensures there isn't another iounmap for the same address
180 in parallel. Reuse of the virtual address is prevented by
181 leaving it in the global lists until we're done with it.
182 cpa takes care of the direct mappings. */
183 read_lock(&vmlist_lock);
184 for (p = vmlist; p; p = p->next) {
185 if (p->addr == addr)
186 break;
188 read_unlock(&vmlist_lock);
190 if (!p) {
191 printk("iounmap: bad address %p\n", addr);
192 dump_stack();
193 return;
196 /* Reset the direct mapping. Can block */
197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
198 change_page_attr(virt_to_page(__va(p->phys_addr)),
199 get_vm_area_size(p) >> PAGE_SHIFT,
200 PAGE_KERNEL);
201 global_flush_tlb();
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
207 kfree(p);
209 EXPORT_SYMBOL(iounmap);
211 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
213 unsigned long offset, last_addr;
214 unsigned int nrpages;
215 enum fixed_addresses idx;
217 /* Don't allow wraparound or zero size */
218 last_addr = phys_addr + size - 1;
219 if (!size || last_addr < phys_addr)
220 return NULL;
223 * Don't remap the low PCI/ISA area, it's always mapped..
225 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
226 return phys_to_virt(phys_addr);
229 * Mappings have to be page-aligned
231 offset = phys_addr & ~PAGE_MASK;
232 phys_addr &= PAGE_MASK;
233 size = PAGE_ALIGN(last_addr) - phys_addr;
236 * Mappings have to fit in the FIX_BTMAP area.
238 nrpages = size >> PAGE_SHIFT;
239 if (nrpages > NR_FIX_BTMAPS)
240 return NULL;
243 * Ok, go for it..
245 idx = FIX_BTMAP_BEGIN;
246 while (nrpages > 0) {
247 set_fixmap(idx, phys_addr);
248 phys_addr += PAGE_SIZE;
249 --idx;
250 --nrpages;
252 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
255 void __init bt_iounmap(void *addr, unsigned long size)
257 unsigned long virt_addr;
258 unsigned long offset;
259 unsigned int nrpages;
260 enum fixed_addresses idx;
262 virt_addr = (unsigned long)addr;
263 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
264 return;
265 offset = virt_addr & ~PAGE_MASK;
266 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
268 idx = FIX_BTMAP_BEGIN;
269 while (nrpages > 0) {
270 clear_fixmap(idx);
271 --idx;
272 --nrpages;