[POWERPC] Add spinlock to request_phb_iospace()
[linux-2.6/openmoko-kernel/knife-kernel.git] / arch / powerpc / mm / pgtable_64.c
blobad6e135bf212ac1b77c87ac1ed8cd8b704b93e28
1 /*
2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/init.h>
37 #include <linux/delay.h>
38 #include <linux/bootmem.h>
39 #include <linux/highmem.h>
40 #include <linux/idr.h>
41 #include <linux/nodemask.h>
42 #include <linux/module.h>
44 #include <asm/pgalloc.h>
45 #include <asm/page.h>
46 #include <asm/prom.h>
47 #include <asm/lmb.h>
48 #include <asm/rtas.h>
49 #include <asm/io.h>
50 #include <asm/mmu_context.h>
51 #include <asm/pgtable.h>
52 #include <asm/mmu.h>
53 #include <asm/uaccess.h>
54 #include <asm/smp.h>
55 #include <asm/machdep.h>
56 #include <asm/tlb.h>
57 #include <asm/eeh.h>
58 #include <asm/processor.h>
59 #include <asm/mmzone.h>
60 #include <asm/cputable.h>
61 #include <asm/sections.h>
62 #include <asm/system.h>
63 #include <asm/iommu.h>
64 #include <asm/abs_addr.h>
65 #include <asm/vdso.h>
66 #include <asm/firmware.h>
68 #include "mmu_decl.h"
70 unsigned long ioremap_bot = IMALLOC_BASE;
71 static unsigned long phbs_io_bot = PHBS_IO_BASE;
74 * map_io_page currently only called by __ioremap
75 * map_io_page adds an entry to the ioremap page table
76 * and adds an entry to the HPT, possibly bolting it
78 static int map_io_page(unsigned long ea, unsigned long pa, int flags)
80 pgd_t *pgdp;
81 pud_t *pudp;
82 pmd_t *pmdp;
83 pte_t *ptep;
85 if (mem_init_done) {
86 pgdp = pgd_offset_k(ea);
87 pudp = pud_alloc(&init_mm, pgdp, ea);
88 if (!pudp)
89 return -ENOMEM;
90 pmdp = pmd_alloc(&init_mm, pudp, ea);
91 if (!pmdp)
92 return -ENOMEM;
93 ptep = pte_alloc_kernel(pmdp, ea);
94 if (!ptep)
95 return -ENOMEM;
96 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
97 __pgprot(flags)));
98 } else {
100 * If the mm subsystem is not fully up, we cannot create a
101 * linux page table entry for this mapping. Simply bolt an
102 * entry in the hardware page table.
105 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
106 mmu_io_psize)) {
107 printk(KERN_ERR "Failed to do bolted mapping IO "
108 "memory at %016lx !\n", pa);
109 return -ENOMEM;
112 return 0;
116 static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
117 unsigned long ea, unsigned long size,
118 unsigned long flags)
120 unsigned long i;
122 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL);
125 for (i = 0; i < size; i += PAGE_SIZE)
126 if (map_io_page(ea+i, pa+i, flags))
127 return NULL;
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
132 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
133 unsigned long flags)
135 unsigned long pa, ea;
136 void __iomem *ret;
139 * Choose an address to map it to.
140 * Once the imalloc system is running, we use it.
141 * Before that, we map using addresses going
142 * up from ioremap_bot. imalloc will use
143 * the addresses from ioremap_bot through
144 * IMALLOC_END
147 pa = addr & PAGE_MASK;
148 size = PAGE_ALIGN(addr + size) - pa;
150 if ((size == 0) || (pa == 0))
151 return NULL;
153 if (mem_init_done) {
154 struct vm_struct *area;
155 area = im_get_free_area(size);
156 if (area == NULL)
157 return NULL;
158 ea = (unsigned long)(area->addr);
159 ret = __ioremap_com(addr, pa, ea, size, flags);
160 if (!ret)
161 im_free(area->addr);
162 } else {
163 ea = ioremap_bot;
164 ret = __ioremap_com(addr, pa, ea, size, flags);
165 if (ret)
166 ioremap_bot += size;
168 return ret;
172 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
174 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
176 if (ppc_md.ioremap)
177 return ppc_md.ioremap(addr, size, flags);
178 return __ioremap(addr, size, flags);
181 void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
182 unsigned long flags)
184 if (ppc_md.ioremap)
185 return ppc_md.ioremap(addr, size, flags);
186 return __ioremap(addr, size, flags);
190 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
192 int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
193 unsigned long size, unsigned long flags)
195 struct vm_struct *area;
196 void __iomem *ret;
198 /* For now, require page-aligned values for pa, ea, and size */
199 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
200 !IS_PAGE_ALIGNED(size)) {
201 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
202 return 1;
205 if (!mem_init_done) {
206 /* Two things to consider in this case:
207 * 1) No records will be kept (imalloc, etc) that the region
208 * has been remapped
209 * 2) It won't be easy to iounmap() the region later (because
210 * of 1)
213 } else {
214 area = im_get_area(ea, size,
215 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
216 if (area == NULL) {
217 /* Expected when PHB-dlpar is in play */
218 return 1;
220 if (ea != (unsigned long) area->addr) {
221 printk(KERN_ERR "unexpected addr return from "
222 "im_get_area\n");
223 return 1;
227 ret = __ioremap_com(pa, pa, ea, size, flags);
228 if (ret == NULL) {
229 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
230 return 1;
232 if (ret != (void *) ea) {
233 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
234 return 1;
237 return 0;
241 * Unmap an IO region and remove it from imalloc'd list.
242 * Access to IO memory should be serialized by driver.
243 * This code is modeled after vmalloc code - unmap_vm_area()
245 * XXX what about calls before mem_init_done (ie python_countermeasures())
247 void __iounmap(volatile void __iomem *token)
249 void *addr;
251 if (!mem_init_done)
252 return;
254 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
256 im_free(addr);
259 void iounmap(volatile void __iomem *token)
261 if (ppc_md.iounmap)
262 ppc_md.iounmap(token);
263 else
264 __iounmap(token);
267 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
269 struct vm_struct *area;
271 /* Check whether subsets of this region exist */
272 area = im_get_area(addr, size, IM_REGION_SUPERSET);
273 if (area == NULL)
274 return 1;
276 while (area) {
277 iounmap((void __iomem *) area->addr);
278 area = im_get_area(addr, size,
279 IM_REGION_SUPERSET);
282 return 0;
285 int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
287 struct vm_struct *area;
288 unsigned long addr;
289 int rc;
291 addr = (unsigned long __force) start & PAGE_MASK;
293 /* Verify that the region either exists or is a subset of an existing
294 * region. In the latter case, split the parent region to create
295 * the exact region
297 area = im_get_area(addr, size,
298 IM_REGION_EXISTS | IM_REGION_SUBSET);
299 if (area == NULL) {
300 /* Determine whether subset regions exist. If so, unmap */
301 rc = iounmap_subset_regions(addr, size);
302 if (rc) {
303 printk(KERN_ERR
304 "%s() cannot unmap nonexistent range 0x%lx\n",
305 __FUNCTION__, addr);
306 return 1;
308 } else {
309 iounmap((void __iomem *) area->addr);
312 * FIXME! This can't be right:
313 iounmap(area->addr);
314 * Maybe it should be "iounmap(area);"
316 return 0;
319 EXPORT_SYMBOL(ioremap);
320 EXPORT_SYMBOL(ioremap_flags);
321 EXPORT_SYMBOL(__ioremap);
322 EXPORT_SYMBOL(iounmap);
323 EXPORT_SYMBOL(__iounmap);
325 static DEFINE_SPINLOCK(phb_io_lock);
327 void __iomem * reserve_phb_iospace(unsigned long size)
329 void __iomem *virt_addr;
331 if (phbs_io_bot >= IMALLOC_BASE)
332 panic("reserve_phb_iospace(): phb io space overflow\n");
334 spin_lock(&phb_io_lock);
335 virt_addr = (void __iomem *) phbs_io_bot;
336 phbs_io_bot += size;
337 spin_unlock(&phb_io_lock);
339 return virt_addr;