Staging: vme: fix sched.h build breakage
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / powerpc / mm / init_64.c
blob335c578b9cc324261a29a3a435036ef5178e9f96
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #undef DEBUG
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/lmb.h>
45 #include <asm/pgalloc.h>
46 #include <asm/page.h>
47 #include <asm/prom.h>
48 #include <asm/rtas.h>
49 #include <asm/io.h>
50 #include <asm/mmu_context.h>
51 #include <asm/pgtable.h>
52 #include <asm/mmu.h>
53 #include <asm/uaccess.h>
54 #include <asm/smp.h>
55 #include <asm/machdep.h>
56 #include <asm/tlb.h>
57 #include <asm/eeh.h>
58 #include <asm/processor.h>
59 #include <asm/mmzone.h>
60 #include <asm/cputable.h>
61 #include <asm/sections.h>
62 #include <asm/system.h>
63 #include <asm/iommu.h>
64 #include <asm/abs_addr.h>
65 #include <asm/vdso.h>
67 #include "mmu_decl.h"
69 #ifdef CONFIG_PPC_STD_MMU_64
70 #if PGTABLE_RANGE > USER_VSID_RANGE
71 #warning Limited user VSID range means pagetable space is wasted
72 #endif
74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75 #warning TASK_SIZE is smaller than it needs to be.
76 #endif
77 #endif /* CONFIG_PPC_STD_MMU_64 */
79 phys_addr_t memstart_addr = ~0;
80 phys_addr_t kernstart_addr;
82 void free_initmem(void)
84 unsigned long addr;
86 addr = (unsigned long)__init_begin;
87 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
88 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
89 ClearPageReserved(virt_to_page(addr));
90 init_page_count(virt_to_page(addr));
91 free_page(addr);
92 totalram_pages++;
94 printk ("Freeing unused kernel memory: %luk freed\n",
95 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
98 #ifdef CONFIG_BLK_DEV_INITRD
99 void free_initrd_mem(unsigned long start, unsigned long end)
101 if (start < end)
102 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
103 for (; start < end; start += PAGE_SIZE) {
104 ClearPageReserved(virt_to_page(start));
105 init_page_count(virt_to_page(start));
106 free_page(start);
107 totalram_pages++;
110 #endif
112 static void pgd_ctor(void *addr)
114 memset(addr, 0, PGD_TABLE_SIZE);
117 static void pmd_ctor(void *addr)
119 memset(addr, 0, PMD_TABLE_SIZE);
122 static const unsigned int pgtable_cache_size[2] = {
123 PGD_TABLE_SIZE, PMD_TABLE_SIZE
125 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
126 #ifdef CONFIG_PPC_64K_PAGES
127 "pgd_cache", "pmd_cache",
128 #else
129 "pgd_cache", "pud_pmd_cache",
130 #endif /* CONFIG_PPC_64K_PAGES */
133 #ifdef CONFIG_HUGETLB_PAGE
134 /* Hugepages need an extra cache per hugepagesize, initialized in
135 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
136 * is not compile time constant. */
137 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
138 #else
139 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
140 #endif
142 void pgtable_cache_init(void)
144 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
145 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
148 #ifdef CONFIG_SPARSEMEM_VMEMMAP
150 * Given an address within the vmemmap, determine the pfn of the page that
151 * represents the start of the section it is within. Note that we have to
152 * do this by hand as the proffered address may not be correctly aligned.
153 * Subtraction of non-aligned pointers produces undefined results.
155 static unsigned long __meminit vmemmap_section_start(unsigned long page)
157 unsigned long offset = page - ((unsigned long)(vmemmap));
159 /* Return the pfn of the start of the section. */
160 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
164 * Check if this vmemmap page is already initialised. If any section
165 * which overlaps this vmemmap page is initialised then this page is
166 * initialised already.
168 static int __meminit vmemmap_populated(unsigned long start, int page_size)
170 unsigned long end = start + page_size;
172 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
173 if (pfn_valid(vmemmap_section_start(start)))
174 return 1;
176 return 0;
179 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
181 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
182 * the vmalloc space using normal page tables, though the size of
183 * pages encoded in the PTEs can be different
186 #ifdef CONFIG_PPC_BOOK3E
187 static void __meminit vmemmap_create_mapping(unsigned long start,
188 unsigned long page_size,
189 unsigned long phys)
191 /* Create a PTE encoding without page size */
192 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
193 _PAGE_KERNEL_RW;
195 /* PTEs only contain page size encodings up to 32M */
196 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
198 /* Encode the size in the PTE */
199 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
201 /* For each PTE for that area, map things. Note that we don't
202 * increment phys because all PTEs are of the large size and
203 * thus must have the low bits clear
205 for (i = 0; i < page_size; i += PAGE_SIZE)
206 BUG_ON(map_kernel_page(start + i, phys, flags));
208 #else /* CONFIG_PPC_BOOK3E */
209 static void __meminit vmemmap_create_mapping(unsigned long start,
210 unsigned long page_size,
211 unsigned long phys)
213 int mapped = htab_bolt_mapping(start, start + page_size, phys,
214 PAGE_KERNEL, mmu_vmemmap_psize,
215 mmu_kernel_ssize);
216 BUG_ON(mapped < 0);
218 #endif /* CONFIG_PPC_BOOK3E */
220 int __meminit vmemmap_populate(struct page *start_page,
221 unsigned long nr_pages, int node)
223 unsigned long start = (unsigned long)start_page;
224 unsigned long end = (unsigned long)(start_page + nr_pages);
225 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
227 /* Align to the page size of the linear mapping. */
228 start = _ALIGN_DOWN(start, page_size);
230 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
231 start_page, nr_pages, node);
232 pr_debug(" -> map %lx..%lx\n", start, end);
234 for (; start < end; start += page_size) {
235 void *p;
237 if (vmemmap_populated(start, page_size))
238 continue;
240 p = vmemmap_alloc_block(page_size, node);
241 if (!p)
242 return -ENOMEM;
244 pr_debug(" * %016lx..%016lx allocated at %p\n",
245 start, start + page_size, p);
247 vmemmap_create_mapping(start, page_size, __pa(p));
250 return 0;
252 #endif /* CONFIG_SPARSEMEM_VMEMMAP */