x86: move page_is_ram() function
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / init_32.c
blobf7b941c3b2c3872a3ec8b122e6a5def9fefcad53
1 /*
2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
34 #include <asm/processor.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/tlb.h>
43 #include <asm/tlbflush.h>
44 #include <asm/pgalloc.h>
45 #include <asm/sections.h>
46 #include <asm/paravirt.h>
48 unsigned int __VMALLOC_RESERVE = 128 << 20;
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51 unsigned long highstart_pfn, highend_pfn;
53 static int noinline do_test_wp_bit(void);
56 * Creates a middle page table and puts a pointer to it in the
57 * given global directory entry. This only returns the gd entry
58 * in non-PAE compilation mode, since the middle layer is folded.
60 static pmd_t * __init one_md_table_init(pgd_t *pgd)
62 pud_t *pud;
63 pmd_t *pmd_table;
65 #ifdef CONFIG_X86_PAE
66 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
67 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
69 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
70 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
71 pud = pud_offset(pgd, 0);
72 if (pmd_table != pmd_offset(pud, 0))
73 BUG();
75 #endif
76 pud = pud_offset(pgd, 0);
77 pmd_table = pmd_offset(pud, 0);
78 return pmd_table;
82 * Create a page table and place a pointer to it in a middle page
83 * directory entry.
85 static pte_t * __init one_page_table_init(pmd_t *pmd)
87 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
88 pte_t *page_table = NULL;
90 #ifdef CONFIG_DEBUG_PAGEALLOC
91 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
92 #endif
93 if (!page_table)
94 page_table =
95 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
98 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
99 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
102 return pte_offset_kernel(pmd, 0);
106 * This function initializes a certain range of kernel virtual memory
107 * with new bootmem page tables, everywhere page tables are missing in
108 * the given range.
112 * NOTE: The pagetables are allocated contiguous on the physical space
113 * so we can cache the place of the first one and move around without
114 * checking the pgd every time.
116 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
118 pgd_t *pgd;
119 pmd_t *pmd;
120 int pgd_idx, pmd_idx;
121 unsigned long vaddr;
123 vaddr = start;
124 pgd_idx = pgd_index(vaddr);
125 pmd_idx = pmd_index(vaddr);
126 pgd = pgd_base + pgd_idx;
128 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
129 pmd = one_md_table_init(pgd);
130 pmd = pmd + pmd_index(vaddr);
131 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
132 one_page_table_init(pmd);
134 vaddr += PMD_SIZE;
136 pmd_idx = 0;
140 static inline int is_kernel_text(unsigned long addr)
142 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
143 return 1;
144 return 0;
148 * This maps the physical memory to kernel virtual address space, a total
149 * of max_low_pfn pages, by creating page tables starting from address
150 * PAGE_OFFSET.
152 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
154 unsigned long pfn;
155 pgd_t *pgd;
156 pmd_t *pmd;
157 pte_t *pte;
158 int pgd_idx, pmd_idx, pte_ofs;
160 pgd_idx = pgd_index(PAGE_OFFSET);
161 pgd = pgd_base + pgd_idx;
162 pfn = 0;
164 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
165 pmd = one_md_table_init(pgd);
166 if (pfn >= max_low_pfn)
167 continue;
168 for (pmd_idx = 0;
169 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
170 pmd++, pmd_idx++) {
171 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
173 /* Map with big pages if possible, otherwise
174 create normal page tables. */
175 if (cpu_has_pse) {
176 unsigned int address2;
177 pgprot_t prot = PAGE_KERNEL_LARGE;
179 address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
180 PAGE_OFFSET + PAGE_SIZE-1;
182 if (is_kernel_text(address) ||
183 is_kernel_text(address2))
184 prot = PAGE_KERNEL_LARGE_EXEC;
186 set_pmd(pmd, pfn_pmd(pfn, prot));
188 pfn += PTRS_PER_PTE;
189 } else {
190 pte = one_page_table_init(pmd);
192 for (pte_ofs = 0;
193 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
194 pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
195 pgprot_t prot = PAGE_KERNEL;
197 if (is_kernel_text(address))
198 prot = PAGE_KERNEL_EXEC;
200 set_pte(pte, pfn_pte(pfn, prot));
207 static inline int page_kills_ppro(unsigned long pagenr)
209 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
210 return 1;
211 return 0;
214 #ifdef CONFIG_HIGHMEM
215 pte_t *kmap_pte;
216 pgprot_t kmap_prot;
218 #define kmap_get_fixmap_pte(vaddr) \
219 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
221 static void __init kmap_init(void)
223 unsigned long kmap_vstart;
225 /* cache the first kmap pte */
226 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
227 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
229 kmap_prot = PAGE_KERNEL;
232 static void __init permanent_kmaps_init(pgd_t *pgd_base)
234 pgd_t *pgd;
235 pud_t *pud;
236 pmd_t *pmd;
237 pte_t *pte;
238 unsigned long vaddr;
240 vaddr = PKMAP_BASE;
241 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
243 pgd = swapper_pg_dir + pgd_index(vaddr);
244 pud = pud_offset(pgd, vaddr);
245 pmd = pmd_offset(pud, vaddr);
246 pte = pte_offset_kernel(pmd, vaddr);
247 pkmap_page_table = pte;
250 static void __meminit free_new_highpage(struct page *page)
252 init_page_count(page);
253 __free_page(page);
254 totalhigh_pages++;
257 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
259 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
260 ClearPageReserved(page);
261 free_new_highpage(page);
262 } else
263 SetPageReserved(page);
266 static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
268 free_new_highpage(page);
269 totalram_pages++;
270 #ifdef CONFIG_FLATMEM
271 max_mapnr = max(pfn, max_mapnr);
272 #endif
273 num_physpages++;
274 return 0;
278 * Not currently handling the NUMA case.
279 * Assuming single node and all memory that
280 * has been added dynamically that would be
281 * onlined here is in HIGHMEM
283 void __meminit online_page(struct page *page)
285 ClearPageReserved(page);
286 add_one_highpage_hotplug(page, page_to_pfn(page));
290 #ifdef CONFIG_NUMA
291 extern void set_highmem_pages_init(int);
292 #else
293 static void __init set_highmem_pages_init(int bad_ppro)
295 int pfn;
296 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
298 * Holes under sparsemem might not have no mem_map[]:
300 if (pfn_valid(pfn))
301 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
303 totalram_pages += totalhigh_pages;
305 #endif /* CONFIG_FLATMEM */
307 #else
308 #define kmap_init() do { } while (0)
309 #define permanent_kmaps_init(pgd_base) do { } while (0)
310 #define set_highmem_pages_init(bad_ppro) do { } while (0)
311 #endif /* CONFIG_HIGHMEM */
313 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
314 EXPORT_SYMBOL(__PAGE_KERNEL);
315 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
317 #ifdef CONFIG_NUMA
318 extern void __init remap_numa_kva(void);
319 #else
320 #define remap_numa_kva() do {} while (0)
321 #endif
323 void __init native_pagetable_setup_start(pgd_t *base)
325 #ifdef CONFIG_X86_PAE
326 int i;
329 * Init entries of the first-level page table to the
330 * zero page, if they haven't already been set up.
332 * In a normal native boot, we'll be running on a
333 * pagetable rooted in swapper_pg_dir, but not in PAE
334 * mode, so this will end up clobbering the mappings
335 * for the lower 24Mbytes of the address space,
336 * without affecting the kernel address space.
338 for (i = 0; i < USER_PTRS_PER_PGD; i++)
339 set_pgd(&base[i],
340 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
342 /* Make sure kernel address space is empty so that a pagetable
343 will be allocated for it. */
344 memset(&base[USER_PTRS_PER_PGD], 0,
345 KERNEL_PGD_PTRS * sizeof(pgd_t));
346 #else
347 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
348 #endif
351 void __init native_pagetable_setup_done(pgd_t *base)
353 #ifdef CONFIG_X86_PAE
355 * Add low memory identity-mappings - SMP needs it when
356 * starting up on an AP from real-mode. In the non-PAE
357 * case we already have these mappings through head.S.
358 * All user-space mappings are explicitly cleared after
359 * SMP startup.
361 set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
362 #endif
366 * Build a proper pagetable for the kernel mappings. Up until this
367 * point, we've been running on some set of pagetables constructed by
368 * the boot process.
370 * If we're booting on native hardware, this will be a pagetable
371 * constructed in arch/i386/kernel/head.S, and not running in PAE mode
372 * (even if we'll end up running in PAE). The root of the pagetable
373 * will be swapper_pg_dir.
375 * If we're booting paravirtualized under a hypervisor, then there are
376 * more options: we may already be running PAE, and the pagetable may
377 * or may not be based in swapper_pg_dir. In any case,
378 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
379 * appropriately for the rest of the initialization to work.
381 * In general, pagetable_init() assumes that the pagetable may already
382 * be partially populated, and so it avoids stomping on any existing
383 * mappings.
385 static void __init pagetable_init (void)
387 unsigned long vaddr, end;
388 pgd_t *pgd_base = swapper_pg_dir;
390 paravirt_pagetable_setup_start(pgd_base);
392 /* Enable PSE if available */
393 if (cpu_has_pse)
394 set_in_cr4(X86_CR4_PSE);
396 /* Enable PGE if available */
397 if (cpu_has_pge) {
398 set_in_cr4(X86_CR4_PGE);
399 __PAGE_KERNEL |= _PAGE_GLOBAL;
400 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
403 kernel_physical_mapping_init(pgd_base);
404 remap_numa_kva();
407 * Fixed mappings, only the page table structure has to be
408 * created - mappings will be set by set_fixmap():
410 early_ioremap_clear();
411 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
412 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
413 page_table_range_init(vaddr, end, pgd_base);
414 early_ioremap_reset();
416 permanent_kmaps_init(pgd_base);
418 paravirt_pagetable_setup_done(pgd_base);
421 #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
423 * Swap suspend & friends need this for resume because things like the intel-agp
424 * driver might have split up a kernel 4MB mapping.
426 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
427 __attribute__ ((aligned (PAGE_SIZE)));
429 static inline void save_pg_dir(void)
431 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
433 #else
434 static inline void save_pg_dir(void)
437 #endif
439 void zap_low_mappings (void)
441 int i;
443 save_pg_dir();
446 * Zap initial low-memory mappings.
448 * Note that "pgd_clear()" doesn't do it for
449 * us, because pgd_clear() is a no-op on i386.
451 for (i = 0; i < USER_PTRS_PER_PGD; i++)
452 #ifdef CONFIG_X86_PAE
453 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
454 #else
455 set_pgd(swapper_pg_dir+i, __pgd(0));
456 #endif
457 flush_tlb_all();
460 int nx_enabled = 0;
462 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
463 EXPORT_SYMBOL_GPL(__supported_pte_mask);
465 #ifdef CONFIG_X86_PAE
467 static int disable_nx __initdata = 0;
470 * noexec = on|off
472 * Control non executable mappings.
474 * on Enable
475 * off Disable
477 static int __init noexec_setup(char *str)
479 if (!str || !strcmp(str, "on")) {
480 if (cpu_has_nx) {
481 __supported_pte_mask |= _PAGE_NX;
482 disable_nx = 0;
484 } else if (!strcmp(str,"off")) {
485 disable_nx = 1;
486 __supported_pte_mask &= ~_PAGE_NX;
487 } else
488 return -EINVAL;
490 return 0;
492 early_param("noexec", noexec_setup);
494 static void __init set_nx(void)
496 unsigned int v[4], l, h;
498 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
499 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
500 if ((v[3] & (1 << 20)) && !disable_nx) {
501 rdmsr(MSR_EFER, l, h);
502 l |= EFER_NX;
503 wrmsr(MSR_EFER, l, h);
504 nx_enabled = 1;
505 __supported_pte_mask |= _PAGE_NX;
510 #endif
513 * paging_init() sets up the page tables - note that the first 8MB are
514 * already mapped by head.S.
516 * This routines also unmaps the page at virtual kernel address 0, so
517 * that we can trap those pesky NULL-reference errors in the kernel.
519 void __init paging_init(void)
521 #ifdef CONFIG_X86_PAE
522 set_nx();
523 if (nx_enabled)
524 printk("NX (Execute Disable) protection: active\n");
525 #endif
527 pagetable_init();
529 load_cr3(swapper_pg_dir);
531 #ifdef CONFIG_X86_PAE
533 * We will bail out later - printk doesn't work right now so
534 * the user would just see a hanging kernel.
536 if (cpu_has_pae)
537 set_in_cr4(X86_CR4_PAE);
538 #endif
539 __flush_tlb_all();
541 kmap_init();
545 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
546 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
547 * used to involve black magic jumps to work around some nasty CPU bugs,
548 * but fortunately the switch to using exceptions got rid of all that.
551 static void __init test_wp_bit(void)
553 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
555 /* Any page-aligned address will do, the test is non-destructive */
556 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
557 boot_cpu_data.wp_works_ok = do_test_wp_bit();
558 clear_fixmap(FIX_WP_TEST);
560 if (!boot_cpu_data.wp_works_ok) {
561 printk("No.\n");
562 #ifdef CONFIG_X86_WP_WORKS_OK
563 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
564 #endif
565 } else {
566 printk("Ok.\n");
570 static struct kcore_list kcore_mem, kcore_vmalloc;
572 void __init mem_init(void)
574 extern int ppro_with_ram_bug(void);
575 int codesize, reservedpages, datasize, initsize;
576 int tmp;
577 int bad_ppro;
579 #ifdef CONFIG_FLATMEM
580 BUG_ON(!mem_map);
581 #endif
583 bad_ppro = ppro_with_ram_bug();
585 #ifdef CONFIG_HIGHMEM
586 /* check that fixmap and pkmap do not overlap */
587 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
588 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
589 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
590 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
591 BUG();
593 #endif
595 /* this will put all low memory onto the freelists */
596 totalram_pages += free_all_bootmem();
598 reservedpages = 0;
599 for (tmp = 0; tmp < max_low_pfn; tmp++)
601 * Only count reserved RAM pages
603 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
604 reservedpages++;
606 set_highmem_pages_init(bad_ppro);
608 codesize = (unsigned long) &_etext - (unsigned long) &_text;
609 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
610 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
612 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
613 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
614 VMALLOC_END-VMALLOC_START);
616 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
617 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
618 num_physpages << (PAGE_SHIFT-10),
619 codesize >> 10,
620 reservedpages << (PAGE_SHIFT-10),
621 datasize >> 10,
622 initsize >> 10,
623 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
626 #if 1 /* double-sanity-check paranoia */
627 printk("virtual kernel memory layout:\n"
628 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
629 #ifdef CONFIG_HIGHMEM
630 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
631 #endif
632 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
633 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
634 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
635 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
636 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
637 FIXADDR_START, FIXADDR_TOP,
638 (FIXADDR_TOP - FIXADDR_START) >> 10,
640 #ifdef CONFIG_HIGHMEM
641 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
642 (LAST_PKMAP*PAGE_SIZE) >> 10,
643 #endif
645 VMALLOC_START, VMALLOC_END,
646 (VMALLOC_END - VMALLOC_START) >> 20,
648 (unsigned long)__va(0), (unsigned long)high_memory,
649 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
651 (unsigned long)&__init_begin, (unsigned long)&__init_end,
652 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
654 (unsigned long)&_etext, (unsigned long)&_edata,
655 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
657 (unsigned long)&_text, (unsigned long)&_etext,
658 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
660 #ifdef CONFIG_HIGHMEM
661 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
662 BUG_ON(VMALLOC_END > PKMAP_BASE);
663 #endif
664 BUG_ON(VMALLOC_START > VMALLOC_END);
665 BUG_ON((unsigned long)high_memory > VMALLOC_START);
666 #endif /* double-sanity-check paranoia */
668 #ifdef CONFIG_X86_PAE
669 if (!cpu_has_pae)
670 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
671 #endif
672 if (boot_cpu_data.wp_works_ok < 0)
673 test_wp_bit();
676 * Subtle. SMP is doing it's boot stuff late (because it has to
677 * fork idle threads) - but it also needs low mappings for the
678 * protected-mode entry to work. We zap these entries only after
679 * the WP-bit has been tested.
681 #ifndef CONFIG_SMP
682 zap_low_mappings();
683 #endif
686 #ifdef CONFIG_MEMORY_HOTPLUG
687 int arch_add_memory(int nid, u64 start, u64 size)
689 struct pglist_data *pgdata = NODE_DATA(nid);
690 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
691 unsigned long start_pfn = start >> PAGE_SHIFT;
692 unsigned long nr_pages = size >> PAGE_SHIFT;
694 return __add_pages(zone, start_pfn, nr_pages);
697 #endif
699 struct kmem_cache *pmd_cache;
701 void __init pgtable_cache_init(void)
703 if (PTRS_PER_PMD > 1)
704 pmd_cache = kmem_cache_create("pmd",
705 PTRS_PER_PMD*sizeof(pmd_t),
706 PTRS_PER_PMD*sizeof(pmd_t),
707 SLAB_PANIC,
708 pmd_ctor);
712 * This function cannot be __init, since exceptions don't work in that
713 * section. Put this after the callers, so that it cannot be inlined.
715 static int noinline do_test_wp_bit(void)
717 char tmp_reg;
718 int flag;
720 __asm__ __volatile__(
721 " movb %0,%1 \n"
722 "1: movb %1,%0 \n"
723 " xorl %2,%2 \n"
724 "2: \n"
725 ".section __ex_table,\"a\"\n"
726 " .align 4 \n"
727 " .long 1b,2b \n"
728 ".previous \n"
729 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
730 "=q" (tmp_reg),
731 "=r" (flag)
732 :"2" (1)
733 :"memory");
735 return flag;
738 #ifdef CONFIG_DEBUG_RODATA
740 void mark_rodata_ro(void)
742 unsigned long start = PFN_ALIGN(_text);
743 unsigned long size = PFN_ALIGN(_etext) - start;
745 #ifndef CONFIG_KPROBES
746 #ifdef CONFIG_HOTPLUG_CPU
747 /* It must still be possible to apply SMP alternatives. */
748 if (num_possible_cpus() <= 1)
749 #endif
751 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
752 printk("Write protecting the kernel text: %luk\n", size >> 10);
754 #ifdef CONFIG_CPA_DEBUG
755 global_flush_tlb();
757 printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
758 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
759 global_flush_tlb();
761 printk("Testing CPA: write protecting again\n");
762 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
763 global_flush_tlb();
764 #endif
766 #endif
767 start += size;
768 size = (unsigned long)__end_rodata - start;
769 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
770 printk("Write protecting the kernel read-only data: %luk\n",
771 size >> 10);
774 * set_pages_*() requires a global_flush_tlb() call after it.
775 * We do this after the printk so that if something went wrong in the
776 * change, the printk gets out at least to give a better debug hint
777 * of who is the culprit.
779 global_flush_tlb();
781 #ifdef CONFIG_CPA_DEBUG
782 printk("Testing CPA: undo %lx-%lx\n", start, start + size);
783 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
784 global_flush_tlb();
786 printk("Testing CPA: write protecting again\n");
787 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
788 global_flush_tlb();
789 #endif
791 #endif
793 void free_init_pages(char *what, unsigned long begin, unsigned long end)
795 unsigned long addr;
797 for (addr = begin; addr < end; addr += PAGE_SIZE) {
798 ClearPageReserved(virt_to_page(addr));
799 init_page_count(virt_to_page(addr));
800 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
801 free_page(addr);
802 totalram_pages++;
804 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
807 void free_initmem(void)
809 free_init_pages("unused kernel memory",
810 (unsigned long)(&__init_begin),
811 (unsigned long)(&__init_end));
814 #ifdef CONFIG_BLK_DEV_INITRD
815 void free_initrd_mem(unsigned long start, unsigned long end)
817 free_init_pages("initrd memory", start, end);
819 #endif