[PATCH] Additions to .data.read_mostly section
[firewire-audio.git] / arch / i386 / mm / init.c
blob2ebaf75f732e92e8e901ea408e490718794b8821
1 /*
2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
43 unsigned int __VMALLOC_RESERVE = 128 << 20;
45 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
46 unsigned long highstart_pfn, highend_pfn;
48 static int noinline do_test_wp_bit(void);
51 * Creates a middle page table and puts a pointer to it in the
52 * given global directory entry. This only returns the gd entry
53 * in non-PAE compilation mode, since the middle layer is folded.
55 static pmd_t * __init one_md_table_init(pgd_t *pgd)
57 pud_t *pud;
58 pmd_t *pmd_table;
60 #ifdef CONFIG_X86_PAE
61 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
62 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
63 pud = pud_offset(pgd, 0);
64 if (pmd_table != pmd_offset(pud, 0))
65 BUG();
66 #else
67 pud = pud_offset(pgd, 0);
68 pmd_table = pmd_offset(pud, 0);
69 #endif
71 return pmd_table;
75 * Create a page table and place a pointer to it in a middle page
76 * directory entry.
78 static pte_t * __init one_page_table_init(pmd_t *pmd)
80 if (pmd_none(*pmd)) {
81 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
82 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
83 if (page_table != pte_offset_kernel(pmd, 0))
84 BUG();
86 return page_table;
89 return pte_offset_kernel(pmd, 0);
93 * This function initializes a certain range of kernel virtual memory
94 * with new bootmem page tables, everywhere page tables are missing in
95 * the given range.
99 * NOTE: The pagetables are allocated contiguous on the physical space
100 * so we can cache the place of the first one and move around without
101 * checking the pgd every time.
103 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
105 pgd_t *pgd;
106 pud_t *pud;
107 pmd_t *pmd;
108 int pgd_idx, pmd_idx;
109 unsigned long vaddr;
111 vaddr = start;
112 pgd_idx = pgd_index(vaddr);
113 pmd_idx = pmd_index(vaddr);
114 pgd = pgd_base + pgd_idx;
116 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
117 if (pgd_none(*pgd))
118 one_md_table_init(pgd);
119 pud = pud_offset(pgd, vaddr);
120 pmd = pmd_offset(pud, vaddr);
121 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
122 if (pmd_none(*pmd))
123 one_page_table_init(pmd);
125 vaddr += PMD_SIZE;
127 pmd_idx = 0;
131 static inline int is_kernel_text(unsigned long addr)
133 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
134 return 1;
135 return 0;
139 * This maps the physical memory to kernel virtual address space, a total
140 * of max_low_pfn pages, by creating page tables starting from address
141 * PAGE_OFFSET.
143 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
145 unsigned long pfn;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int pgd_idx, pmd_idx, pte_ofs;
151 pgd_idx = pgd_index(PAGE_OFFSET);
152 pgd = pgd_base + pgd_idx;
153 pfn = 0;
155 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
156 pmd = one_md_table_init(pgd);
157 if (pfn >= max_low_pfn)
158 continue;
159 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
160 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
162 /* Map with big pages if possible, otherwise create normal page tables. */
163 if (cpu_has_pse) {
164 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
166 if (is_kernel_text(address) || is_kernel_text(address2))
167 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
168 else
169 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
170 pfn += PTRS_PER_PTE;
171 } else {
172 pte = one_page_table_init(pmd);
174 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
175 if (is_kernel_text(address))
176 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
177 else
178 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
185 static inline int page_kills_ppro(unsigned long pagenr)
187 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
188 return 1;
189 return 0;
192 extern int is_available_memory(efi_memory_desc_t *);
194 int page_is_ram(unsigned long pagenr)
196 int i;
197 unsigned long addr, end;
199 if (efi_enabled) {
200 efi_memory_desc_t *md;
201 void *p;
203 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
204 md = p;
205 if (!is_available_memory(md))
206 continue;
207 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
208 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
210 if ((pagenr >= addr) && (pagenr < end))
211 return 1;
213 return 0;
216 for (i = 0; i < e820.nr_map; i++) {
218 if (e820.map[i].type != E820_RAM) /* not usable memory */
219 continue;
221 * !!!FIXME!!! Some BIOSen report areas as RAM that
222 * are not. Notably the 640->1Mb area. We need a sanity
223 * check here.
225 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
226 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
227 if ((pagenr >= addr) && (pagenr < end))
228 return 1;
230 return 0;
233 #ifdef CONFIG_HIGHMEM
234 pte_t *kmap_pte;
235 pgprot_t kmap_prot;
237 #define kmap_get_fixmap_pte(vaddr) \
238 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
240 static void __init kmap_init(void)
242 unsigned long kmap_vstart;
244 /* cache the first kmap pte */
245 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
246 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
248 kmap_prot = PAGE_KERNEL;
251 static void __init permanent_kmaps_init(pgd_t *pgd_base)
253 pgd_t *pgd;
254 pud_t *pud;
255 pmd_t *pmd;
256 pte_t *pte;
257 unsigned long vaddr;
259 vaddr = PKMAP_BASE;
260 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
262 pgd = swapper_pg_dir + pgd_index(vaddr);
263 pud = pud_offset(pgd, vaddr);
264 pmd = pmd_offset(pud, vaddr);
265 pte = pte_offset_kernel(pmd, vaddr);
266 pkmap_page_table = pte;
269 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
271 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
272 ClearPageReserved(page);
273 set_page_count(page, 1);
274 __free_page(page);
275 totalhigh_pages++;
276 } else
277 SetPageReserved(page);
280 #ifdef CONFIG_NUMA
281 extern void set_highmem_pages_init(int);
282 #else
283 static void __init set_highmem_pages_init(int bad_ppro)
285 int pfn;
286 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
287 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
288 totalram_pages += totalhigh_pages;
290 #endif /* CONFIG_FLATMEM */
292 #else
293 #define kmap_init() do { } while (0)
294 #define permanent_kmaps_init(pgd_base) do { } while (0)
295 #define set_highmem_pages_init(bad_ppro) do { } while (0)
296 #endif /* CONFIG_HIGHMEM */
298 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
299 EXPORT_SYMBOL(__PAGE_KERNEL);
300 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
302 #ifdef CONFIG_NUMA
303 extern void __init remap_numa_kva(void);
304 #else
305 #define remap_numa_kva() do {} while (0)
306 #endif
308 static void __init pagetable_init (void)
310 unsigned long vaddr;
311 pgd_t *pgd_base = swapper_pg_dir;
313 #ifdef CONFIG_X86_PAE
314 int i;
315 /* Init entries of the first-level page table to the zero page */
316 for (i = 0; i < PTRS_PER_PGD; i++)
317 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
318 #endif
320 /* Enable PSE if available */
321 if (cpu_has_pse) {
322 set_in_cr4(X86_CR4_PSE);
325 /* Enable PGE if available */
326 if (cpu_has_pge) {
327 set_in_cr4(X86_CR4_PGE);
328 __PAGE_KERNEL |= _PAGE_GLOBAL;
329 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
332 kernel_physical_mapping_init(pgd_base);
333 remap_numa_kva();
336 * Fixed mappings, only the page table structure has to be
337 * created - mappings will be set by set_fixmap():
339 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
340 page_table_range_init(vaddr, 0, pgd_base);
342 permanent_kmaps_init(pgd_base);
344 #ifdef CONFIG_X86_PAE
346 * Add low memory identity-mappings - SMP needs it when
347 * starting up on an AP from real-mode. In the non-PAE
348 * case we already have these mappings through head.S.
349 * All user-space mappings are explicitly cleared after
350 * SMP startup.
352 set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
353 #endif
356 #ifdef CONFIG_SOFTWARE_SUSPEND
358 * Swap suspend & friends need this for resume because things like the intel-agp
359 * driver might have split up a kernel 4MB mapping.
361 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
362 __attribute__ ((aligned (PAGE_SIZE)));
364 static inline void save_pg_dir(void)
366 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
368 #else
369 static inline void save_pg_dir(void)
372 #endif
374 void zap_low_mappings (void)
376 int i;
378 save_pg_dir();
381 * Zap initial low-memory mappings.
383 * Note that "pgd_clear()" doesn't do it for
384 * us, because pgd_clear() is a no-op on i386.
386 for (i = 0; i < USER_PTRS_PER_PGD; i++)
387 #ifdef CONFIG_X86_PAE
388 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
389 #else
390 set_pgd(swapper_pg_dir+i, __pgd(0));
391 #endif
392 flush_tlb_all();
395 static int disable_nx __initdata = 0;
396 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
399 * noexec = on|off
401 * Control non executable mappings.
403 * on Enable
404 * off Disable
406 void __init noexec_setup(const char *str)
408 if (!strncmp(str, "on",2) && cpu_has_nx) {
409 __supported_pte_mask |= _PAGE_NX;
410 disable_nx = 0;
411 } else if (!strncmp(str,"off",3)) {
412 disable_nx = 1;
413 __supported_pte_mask &= ~_PAGE_NX;
417 int nx_enabled = 0;
418 #ifdef CONFIG_X86_PAE
420 static void __init set_nx(void)
422 unsigned int v[4], l, h;
424 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
425 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
426 if ((v[3] & (1 << 20)) && !disable_nx) {
427 rdmsr(MSR_EFER, l, h);
428 l |= EFER_NX;
429 wrmsr(MSR_EFER, l, h);
430 nx_enabled = 1;
431 __supported_pte_mask |= _PAGE_NX;
437 * Enables/disables executability of a given kernel page and
438 * returns the previous setting.
440 int __init set_kernel_exec(unsigned long vaddr, int enable)
442 pte_t *pte;
443 int ret = 1;
445 if (!nx_enabled)
446 goto out;
448 pte = lookup_address(vaddr);
449 BUG_ON(!pte);
451 if (!pte_exec_kernel(*pte))
452 ret = 0;
454 if (enable)
455 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
456 else
457 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
458 __flush_tlb_all();
459 out:
460 return ret;
463 #endif
466 * paging_init() sets up the page tables - note that the first 8MB are
467 * already mapped by head.S.
469 * This routines also unmaps the page at virtual kernel address 0, so
470 * that we can trap those pesky NULL-reference errors in the kernel.
472 void __init paging_init(void)
474 #ifdef CONFIG_X86_PAE
475 set_nx();
476 if (nx_enabled)
477 printk("NX (Execute Disable) protection: active\n");
478 #endif
480 pagetable_init();
482 load_cr3(swapper_pg_dir);
484 #ifdef CONFIG_X86_PAE
486 * We will bail out later - printk doesn't work right now so
487 * the user would just see a hanging kernel.
489 if (cpu_has_pae)
490 set_in_cr4(X86_CR4_PAE);
491 #endif
492 __flush_tlb_all();
494 kmap_init();
498 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
499 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
500 * used to involve black magic jumps to work around some nasty CPU bugs,
501 * but fortunately the switch to using exceptions got rid of all that.
504 static void __init test_wp_bit(void)
506 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
508 /* Any page-aligned address will do, the test is non-destructive */
509 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
510 boot_cpu_data.wp_works_ok = do_test_wp_bit();
511 clear_fixmap(FIX_WP_TEST);
513 if (!boot_cpu_data.wp_works_ok) {
514 printk("No.\n");
515 #ifdef CONFIG_X86_WP_WORKS_OK
516 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
517 #endif
518 } else {
519 printk("Ok.\n");
523 static void __init set_max_mapnr_init(void)
525 #ifdef CONFIG_HIGHMEM
526 num_physpages = highend_pfn;
527 #else
528 num_physpages = max_low_pfn;
529 #endif
530 #ifdef CONFIG_FLATMEM
531 max_mapnr = num_physpages;
532 #endif
535 static struct kcore_list kcore_mem, kcore_vmalloc;
537 void __init mem_init(void)
539 extern int ppro_with_ram_bug(void);
540 int codesize, reservedpages, datasize, initsize;
541 int tmp;
542 int bad_ppro;
544 #ifdef CONFIG_FLATMEM
545 if (!mem_map)
546 BUG();
547 #endif
549 bad_ppro = ppro_with_ram_bug();
551 #ifdef CONFIG_HIGHMEM
552 /* check that fixmap and pkmap do not overlap */
553 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
554 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
555 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
556 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
557 BUG();
559 #endif
561 set_max_mapnr_init();
563 #ifdef CONFIG_HIGHMEM
564 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
565 #else
566 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
567 #endif
569 /* this will put all low memory onto the freelists */
570 totalram_pages += free_all_bootmem();
572 reservedpages = 0;
573 for (tmp = 0; tmp < max_low_pfn; tmp++)
575 * Only count reserved RAM pages
577 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
578 reservedpages++;
580 set_highmem_pages_init(bad_ppro);
582 codesize = (unsigned long) &_etext - (unsigned long) &_text;
583 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
584 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
586 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
587 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
588 VMALLOC_END-VMALLOC_START);
590 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
591 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
592 num_physpages << (PAGE_SHIFT-10),
593 codesize >> 10,
594 reservedpages << (PAGE_SHIFT-10),
595 datasize >> 10,
596 initsize >> 10,
597 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
600 #ifdef CONFIG_X86_PAE
601 if (!cpu_has_pae)
602 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
603 #endif
604 if (boot_cpu_data.wp_works_ok < 0)
605 test_wp_bit();
608 * Subtle. SMP is doing it's boot stuff late (because it has to
609 * fork idle threads) - but it also needs low mappings for the
610 * protected-mode entry to work. We zap these entries only after
611 * the WP-bit has been tested.
613 #ifndef CONFIG_SMP
614 zap_low_mappings();
615 #endif
618 kmem_cache_t *pgd_cache;
619 kmem_cache_t *pmd_cache;
621 void __init pgtable_cache_init(void)
623 if (PTRS_PER_PMD > 1) {
624 pmd_cache = kmem_cache_create("pmd",
625 PTRS_PER_PMD*sizeof(pmd_t),
626 PTRS_PER_PMD*sizeof(pmd_t),
628 pmd_ctor,
629 NULL);
630 if (!pmd_cache)
631 panic("pgtable_cache_init(): cannot create pmd cache");
633 pgd_cache = kmem_cache_create("pgd",
634 PTRS_PER_PGD*sizeof(pgd_t),
635 PTRS_PER_PGD*sizeof(pgd_t),
637 pgd_ctor,
638 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
639 if (!pgd_cache)
640 panic("pgtable_cache_init(): Cannot create pgd cache");
644 * This function cannot be __init, since exceptions don't work in that
645 * section. Put this after the callers, so that it cannot be inlined.
647 static int noinline do_test_wp_bit(void)
649 char tmp_reg;
650 int flag;
652 __asm__ __volatile__(
653 " movb %0,%1 \n"
654 "1: movb %1,%0 \n"
655 " xorl %2,%2 \n"
656 "2: \n"
657 ".section __ex_table,\"a\"\n"
658 " .align 4 \n"
659 " .long 1b,2b \n"
660 ".previous \n"
661 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
662 "=q" (tmp_reg),
663 "=r" (flag)
664 :"2" (1)
665 :"memory");
667 return flag;
670 void free_initmem(void)
672 unsigned long addr;
674 addr = (unsigned long)(&__init_begin);
675 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
676 ClearPageReserved(virt_to_page(addr));
677 set_page_count(virt_to_page(addr), 1);
678 memset((void *)addr, 0xcc, PAGE_SIZE);
679 free_page(addr);
680 totalram_pages++;
682 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
685 #ifdef CONFIG_BLK_DEV_INITRD
686 void free_initrd_mem(unsigned long start, unsigned long end)
688 if (start < end)
689 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
690 for (; start < end; start += PAGE_SIZE) {
691 ClearPageReserved(virt_to_page(start));
692 set_page_count(virt_to_page(start), 1);
693 free_page(start);
694 totalram_pages++;
697 #endif