x86: cleanup ioremap includes
[linux-2.6/pdupreez.git] / arch / x86 / mm / ioremap_64.c
blob8862a19f39c8ff642464cf9d3f0c1726a29c93f2
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/init.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <asm/cacheflush.h>
16 #include <asm/e820.h>
17 #include <asm/fixmap.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
21 unsigned long __phys_addr(unsigned long x)
23 if (x >= __START_KERNEL_map)
24 return x - __START_KERNEL_map + phys_base;
25 return x - PAGE_OFFSET;
27 EXPORT_SYMBOL(__phys_addr);
30 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 * conflicts.
33 static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
34 unsigned long flags)
36 int err = 0;
37 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
38 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
39 unsigned long vaddr = (unsigned long) __va(phys_addr);
40 int level;
43 * If there is no identity map for this address,
44 * change_page_attr_addr is unnecessary
46 if (!lookup_address(vaddr, &level))
47 return err;
49 * Must use a address here and not struct page because
50 * the phys addr can be a in hole between nodes and
51 * not have an memmap entry.
53 err = change_page_attr_addr(vaddr,npages,
54 MAKE_GLOBAL(__PAGE_KERNEL|flags));
55 if (!err)
56 global_flush_tlb();
58 return err;
62 * Remap an arbitrary physical address space into the kernel virtual
63 * address space. Needed when the kernel wants to access high addresses
64 * directly.
66 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
67 * have to convert them into an offset in a page-aligned mapping, but the
68 * caller shouldn't need to know that small detail.
70 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
71 unsigned long flags)
73 void *addr;
74 struct vm_struct *area;
75 unsigned long offset, last_addr;
76 pgprot_t pgprot;
78 /* Don't allow wraparound or zero size */
79 last_addr = phys_addr + size - 1;
80 if (!size || last_addr < phys_addr)
81 return NULL;
84 * Don't remap the low PCI/ISA area, it's always mapped..
86 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
87 return (__force void __iomem *)phys_to_virt(phys_addr);
89 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
91 * Mappings have to be page-aligned
93 offset = phys_addr & ~PAGE_MASK;
94 phys_addr &= PAGE_MASK;
95 size = PAGE_ALIGN(last_addr+1) - phys_addr;
98 * Ok, go for it..
100 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
101 if (!area)
102 return NULL;
103 area->phys_addr = phys_addr;
104 addr = area->addr;
105 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
106 phys_addr, pgprot)) {
107 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
108 return NULL;
110 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
111 area->flags &= 0xffffff;
112 vunmap(addr);
113 return NULL;
115 return (__force void __iomem *) (offset + (char *)addr);
117 EXPORT_SYMBOL(__ioremap);
120 * ioremap_nocache - map bus memory into CPU space
121 * @offset: bus address of the memory
122 * @size: size of the resource to map
124 * ioremap_nocache performs a platform specific sequence of operations to
125 * make bus memory CPU accessible via the readb/readw/readl/writeb/
126 * writew/writel functions and the other mmio helpers. The returned
127 * address is not guaranteed to be usable directly as a virtual
128 * address.
130 * This version of ioremap ensures that the memory is marked uncachable
131 * on the CPU as well as honouring existing caching rules from things like
132 * the PCI bus. Note that there are other caches and buffers on many
133 * busses. In particular driver authors should read up on PCI writes
135 * It's useful if some control registers are in such an area and
136 * write combining or read caching is not desirable:
138 * Must be freed with iounmap.
140 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
142 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
144 EXPORT_SYMBOL(ioremap_nocache);
147 * iounmap - Free a IO remapping
148 * @addr: virtual address from ioremap_*
150 * Caller must ensure there is only one unmapping for the same pointer.
152 void iounmap(volatile void __iomem *addr)
154 struct vm_struct *p, *o;
156 if (addr <= high_memory)
157 return;
158 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
159 addr < phys_to_virt(ISA_END_ADDRESS))
160 return;
162 addr = (volatile void __iomem *)
163 (PAGE_MASK & (unsigned long __force)addr);
164 /* Use the vm area unlocked, assuming the caller
165 ensures there isn't another iounmap for the same address
166 in parallel. Reuse of the virtual address is prevented by
167 leaving it in the global lists until we're done with it.
168 cpa takes care of the direct mappings. */
169 read_lock(&vmlist_lock);
170 for (p = vmlist; p; p = p->next) {
171 if (p->addr == addr)
172 break;
174 read_unlock(&vmlist_lock);
176 if (!p) {
177 printk(KERN_ERR "iounmap: bad address %p\n", addr);
178 dump_stack();
179 return;
182 /* Reset the direct mapping. Can block */
183 if (p->flags >> 20)
184 ioremap_change_attr(p->phys_addr, p->size, 0);
186 /* Finally remove it */
187 o = remove_vm_area((void *)addr);
188 BUG_ON(p != o || o == NULL);
189 kfree(p);
191 EXPORT_SYMBOL(iounmap);