x86: implement true end_pfn_mapped for 32bit
[linux-2.6/linux-2.6-openrd.git] / arch / x86 / mm / init_32.c
blob73dd0601166a0d7b94475c677f9318342414cb05
1 /*
2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
34 #include <asm/asm.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
39 #include <asm/dma.h>
40 #include <asm/fixmap.h>
41 #include <asm/e820.h>
42 #include <asm/apic.h>
43 #include <asm/bugs.h>
44 #include <asm/tlb.h>
45 #include <asm/tlbflush.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/paravirt.h>
49 #include <asm/setup.h>
50 #include <asm/cacheflush.h>
52 unsigned int __VMALLOC_RESERVE = 128 << 20;
54 unsigned long end_pfn_map;
56 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 unsigned long highstart_pfn, highend_pfn;
59 static noinline int do_test_wp_bit(void);
62 * Creates a middle page table and puts a pointer to it in the
63 * given global directory entry. This only returns the gd entry
64 * in non-PAE compilation mode, since the middle layer is folded.
66 static pmd_t * __init one_md_table_init(pgd_t *pgd)
68 pud_t *pud;
69 pmd_t *pmd_table;
71 #ifdef CONFIG_X86_PAE
72 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
73 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
75 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
76 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
77 pud = pud_offset(pgd, 0);
78 BUG_ON(pmd_table != pmd_offset(pud, 0));
80 #endif
81 pud = pud_offset(pgd, 0);
82 pmd_table = pmd_offset(pud, 0);
84 return pmd_table;
88 * Create a page table and place a pointer to it in a middle page
89 * directory entry:
91 static pte_t * __init one_page_table_init(pmd_t *pmd)
93 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
94 pte_t *page_table = NULL;
96 #ifdef CONFIG_DEBUG_PAGEALLOC
97 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
98 #endif
99 if (!page_table) {
100 page_table =
101 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
104 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
105 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
106 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
109 return pte_offset_kernel(pmd, 0);
113 * This function initializes a certain range of kernel virtual memory
114 * with new bootmem page tables, everywhere page tables are missing in
115 * the given range.
117 * NOTE: The pagetables are allocated contiguous on the physical space
118 * so we can cache the place of the first one and move around without
119 * checking the pgd every time.
121 static void __init
122 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
124 int pgd_idx, pmd_idx;
125 unsigned long vaddr;
126 pgd_t *pgd;
127 pmd_t *pmd;
129 vaddr = start;
130 pgd_idx = pgd_index(vaddr);
131 pmd_idx = pmd_index(vaddr);
132 pgd = pgd_base + pgd_idx;
134 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
135 pmd = one_md_table_init(pgd);
136 pmd = pmd + pmd_index(vaddr);
137 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
138 pmd++, pmd_idx++) {
139 one_page_table_init(pmd);
141 vaddr += PMD_SIZE;
143 pmd_idx = 0;
147 static inline int is_kernel_text(unsigned long addr)
149 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
150 return 1;
151 return 0;
155 * This maps the physical memory to kernel virtual address space, a total
156 * of max_low_pfn pages, by creating page tables starting from address
157 * PAGE_OFFSET:
159 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
161 int pgd_idx, pmd_idx, pte_ofs;
162 unsigned long pfn;
163 pgd_t *pgd;
164 pmd_t *pmd;
165 pte_t *pte;
167 pgd_idx = pgd_index(PAGE_OFFSET);
168 pgd = pgd_base + pgd_idx;
169 pfn = 0;
171 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172 pmd = one_md_table_init(pgd);
173 if (pfn >= max_low_pfn)
174 continue;
176 for (pmd_idx = 0;
177 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
178 pmd++, pmd_idx++) {
179 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
182 * Map with big pages if possible, otherwise
183 * create normal page tables:
185 if (cpu_has_pse) {
186 unsigned int addr2;
187 pgprot_t prot = PAGE_KERNEL_LARGE;
189 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
190 PAGE_OFFSET + PAGE_SIZE-1;
192 if (is_kernel_text(addr) ||
193 is_kernel_text(addr2))
194 prot = PAGE_KERNEL_LARGE_EXEC;
196 set_pmd(pmd, pfn_pmd(pfn, prot));
198 pfn += PTRS_PER_PTE;
199 end_pfn_map = pfn;
200 continue;
202 pte = one_page_table_init(pmd);
204 for (pte_ofs = 0;
205 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
206 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
207 pgprot_t prot = PAGE_KERNEL;
209 if (is_kernel_text(addr))
210 prot = PAGE_KERNEL_EXEC;
212 set_pte(pte, pfn_pte(pfn, prot));
214 end_pfn_map = pfn;
219 static inline int page_kills_ppro(unsigned long pagenr)
221 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
222 return 1;
223 return 0;
226 #ifdef CONFIG_HIGHMEM
227 pte_t *kmap_pte;
228 pgprot_t kmap_prot;
230 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
232 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
233 vaddr), vaddr), vaddr);
236 static void __init kmap_init(void)
238 unsigned long kmap_vstart;
241 * Cache the first kmap pte:
243 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
244 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
246 kmap_prot = PAGE_KERNEL;
249 static void __init permanent_kmaps_init(pgd_t *pgd_base)
251 unsigned long vaddr;
252 pgd_t *pgd;
253 pud_t *pud;
254 pmd_t *pmd;
255 pte_t *pte;
257 vaddr = PKMAP_BASE;
258 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
260 pgd = swapper_pg_dir + pgd_index(vaddr);
261 pud = pud_offset(pgd, vaddr);
262 pmd = pmd_offset(pud, vaddr);
263 pte = pte_offset_kernel(pmd, vaddr);
264 pkmap_page_table = pte;
267 static void __meminit free_new_highpage(struct page *page)
269 init_page_count(page);
270 __free_page(page);
271 totalhigh_pages++;
274 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
276 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
277 ClearPageReserved(page);
278 free_new_highpage(page);
279 } else
280 SetPageReserved(page);
283 static int __meminit
284 add_one_highpage_hotplug(struct page *page, unsigned long pfn)
286 free_new_highpage(page);
287 totalram_pages++;
288 #ifdef CONFIG_FLATMEM
289 max_mapnr = max(pfn, max_mapnr);
290 #endif
291 num_physpages++;
293 return 0;
297 * Not currently handling the NUMA case.
298 * Assuming single node and all memory that
299 * has been added dynamically that would be
300 * onlined here is in HIGHMEM.
302 void __meminit online_page(struct page *page)
304 ClearPageReserved(page);
305 add_one_highpage_hotplug(page, page_to_pfn(page));
308 #ifndef CONFIG_NUMA
309 static void __init set_highmem_pages_init(int bad_ppro)
311 int pfn;
313 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
315 * Holes under sparsemem might not have no mem_map[]:
317 if (pfn_valid(pfn))
318 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
320 totalram_pages += totalhigh_pages;
322 #endif /* !CONFIG_NUMA */
324 #else
325 # define kmap_init() do { } while (0)
326 # define permanent_kmaps_init(pgd_base) do { } while (0)
327 # define set_highmem_pages_init(bad_ppro) do { } while (0)
328 #endif /* CONFIG_HIGHMEM */
330 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
331 EXPORT_SYMBOL(__PAGE_KERNEL);
333 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
335 void __init native_pagetable_setup_start(pgd_t *base)
337 unsigned long pfn, va;
338 pgd_t *pgd;
339 pud_t *pud;
340 pmd_t *pmd;
341 pte_t *pte;
344 * Remove any mappings which extend past the end of physical
345 * memory from the boot time page table:
347 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
348 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
349 pgd = base + pgd_index(va);
350 if (!pgd_present(*pgd))
351 break;
353 pud = pud_offset(pgd, va);
354 pmd = pmd_offset(pud, va);
355 if (!pmd_present(*pmd))
356 break;
358 pte = pte_offset_kernel(pmd, va);
359 if (!pte_present(*pte))
360 break;
362 pte_clear(NULL, va, pte);
364 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
367 void __init native_pagetable_setup_done(pgd_t *base)
372 * Build a proper pagetable for the kernel mappings. Up until this
373 * point, we've been running on some set of pagetables constructed by
374 * the boot process.
376 * If we're booting on native hardware, this will be a pagetable
377 * constructed in arch/x86/kernel/head_32.S. The root of the
378 * pagetable will be swapper_pg_dir.
380 * If we're booting paravirtualized under a hypervisor, then there are
381 * more options: we may already be running PAE, and the pagetable may
382 * or may not be based in swapper_pg_dir. In any case,
383 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
384 * appropriately for the rest of the initialization to work.
386 * In general, pagetable_init() assumes that the pagetable may already
387 * be partially populated, and so it avoids stomping on any existing
388 * mappings.
390 static void __init pagetable_init(void)
392 pgd_t *pgd_base = swapper_pg_dir;
393 unsigned long vaddr, end;
395 paravirt_pagetable_setup_start(pgd_base);
397 /* Enable PSE if available */
398 if (cpu_has_pse)
399 set_in_cr4(X86_CR4_PSE);
401 /* Enable PGE if available */
402 if (cpu_has_pge) {
403 set_in_cr4(X86_CR4_PGE);
404 __PAGE_KERNEL |= _PAGE_GLOBAL;
405 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
408 kernel_physical_mapping_init(pgd_base);
409 remap_numa_kva();
412 * Fixed mappings, only the page table structure has to be
413 * created - mappings will be set by set_fixmap():
415 early_ioremap_clear();
416 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
417 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
418 page_table_range_init(vaddr, end, pgd_base);
419 early_ioremap_reset();
421 permanent_kmaps_init(pgd_base);
423 paravirt_pagetable_setup_done(pgd_base);
426 #ifdef CONFIG_ACPI_SLEEP
428 * ACPI suspend needs this for resume, because things like the intel-agp
429 * driver might have split up a kernel 4MB mapping.
431 char swsusp_pg_dir[PAGE_SIZE]
432 __attribute__ ((aligned(PAGE_SIZE)));
434 static inline void save_pg_dir(void)
436 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
438 #else /* !CONFIG_ACPI_SLEEP */
439 static inline void save_pg_dir(void)
442 #endif /* !CONFIG_ACPI_SLEEP */
444 void zap_low_mappings(void)
446 int i;
448 save_pg_dir();
451 * Zap initial low-memory mappings.
453 * Note that "pgd_clear()" doesn't do it for
454 * us, because pgd_clear() is a no-op on i386.
456 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
457 #ifdef CONFIG_X86_PAE
458 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
459 #else
460 set_pgd(swapper_pg_dir+i, __pgd(0));
461 #endif
463 flush_tlb_all();
466 int nx_enabled;
468 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
469 EXPORT_SYMBOL_GPL(__supported_pte_mask);
471 #ifdef CONFIG_X86_PAE
473 static int disable_nx __initdata;
476 * noexec = on|off
478 * Control non executable mappings.
480 * on Enable
481 * off Disable
483 static int __init noexec_setup(char *str)
485 if (!str || !strcmp(str, "on")) {
486 if (cpu_has_nx) {
487 __supported_pte_mask |= _PAGE_NX;
488 disable_nx = 0;
490 } else {
491 if (!strcmp(str, "off")) {
492 disable_nx = 1;
493 __supported_pte_mask &= ~_PAGE_NX;
494 } else {
495 return -EINVAL;
499 return 0;
501 early_param("noexec", noexec_setup);
503 static void __init set_nx(void)
505 unsigned int v[4], l, h;
507 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
508 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
510 if ((v[3] & (1 << 20)) && !disable_nx) {
511 rdmsr(MSR_EFER, l, h);
512 l |= EFER_NX;
513 wrmsr(MSR_EFER, l, h);
514 nx_enabled = 1;
515 __supported_pte_mask |= _PAGE_NX;
519 #endif
522 * paging_init() sets up the page tables - note that the first 8MB are
523 * already mapped by head.S.
525 * This routines also unmaps the page at virtual kernel address 0, so
526 * that we can trap those pesky NULL-reference errors in the kernel.
528 void __init paging_init(void)
530 #ifdef CONFIG_X86_PAE
531 set_nx();
532 if (nx_enabled)
533 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
534 #endif
535 pagetable_init();
537 load_cr3(swapper_pg_dir);
539 __flush_tlb_all();
541 kmap_init();
545 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
546 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
547 * used to involve black magic jumps to work around some nasty CPU bugs,
548 * but fortunately the switch to using exceptions got rid of all that.
550 static void __init test_wp_bit(void)
552 printk(KERN_INFO
553 "Checking if this processor honours the WP bit even in supervisor mode...");
555 /* Any page-aligned address will do, the test is non-destructive */
556 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
557 boot_cpu_data.wp_works_ok = do_test_wp_bit();
558 clear_fixmap(FIX_WP_TEST);
560 if (!boot_cpu_data.wp_works_ok) {
561 printk(KERN_CONT "No.\n");
562 #ifdef CONFIG_X86_WP_WORKS_OK
563 panic(
564 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
565 #endif
566 } else {
567 printk(KERN_CONT "Ok.\n");
571 static struct kcore_list kcore_mem, kcore_vmalloc;
573 void __init mem_init(void)
575 int codesize, reservedpages, datasize, initsize;
576 int tmp, bad_ppro;
578 #ifdef CONFIG_FLATMEM
579 BUG_ON(!mem_map);
580 #endif
581 bad_ppro = ppro_with_ram_bug();
583 #ifdef CONFIG_HIGHMEM
584 /* check that fixmap and pkmap do not overlap */
585 if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
586 printk(KERN_ERR
587 "fixmap and kmap areas overlap - this will crash\n");
588 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
589 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
590 FIXADDR_START);
591 BUG();
593 #endif
594 /* this will put all low memory onto the freelists */
595 totalram_pages += free_all_bootmem();
597 reservedpages = 0;
598 for (tmp = 0; tmp < max_low_pfn; tmp++)
600 * Only count reserved RAM pages:
602 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
603 reservedpages++;
605 set_highmem_pages_init(bad_ppro);
607 codesize = (unsigned long) &_etext - (unsigned long) &_text;
608 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
609 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
611 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
612 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
613 VMALLOC_END-VMALLOC_START);
615 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
616 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
617 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
618 num_physpages << (PAGE_SHIFT-10),
619 codesize >> 10,
620 reservedpages << (PAGE_SHIFT-10),
621 datasize >> 10,
622 initsize >> 10,
623 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
626 #if 1 /* double-sanity-check paranoia */
627 printk(KERN_INFO "virtual kernel memory layout:\n"
628 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
629 #ifdef CONFIG_HIGHMEM
630 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
631 #endif
632 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
633 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
634 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
635 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
636 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
637 FIXADDR_START, FIXADDR_TOP,
638 (FIXADDR_TOP - FIXADDR_START) >> 10,
640 #ifdef CONFIG_HIGHMEM
641 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
642 (LAST_PKMAP*PAGE_SIZE) >> 10,
643 #endif
645 VMALLOC_START, VMALLOC_END,
646 (VMALLOC_END - VMALLOC_START) >> 20,
648 (unsigned long)__va(0), (unsigned long)high_memory,
649 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
651 (unsigned long)&__init_begin, (unsigned long)&__init_end,
652 ((unsigned long)&__init_end -
653 (unsigned long)&__init_begin) >> 10,
655 (unsigned long)&_etext, (unsigned long)&_edata,
656 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
658 (unsigned long)&_text, (unsigned long)&_etext,
659 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
661 #ifdef CONFIG_HIGHMEM
662 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
663 BUG_ON(VMALLOC_END > PKMAP_BASE);
664 #endif
665 BUG_ON(VMALLOC_START > VMALLOC_END);
666 BUG_ON((unsigned long)high_memory > VMALLOC_START);
667 #endif /* double-sanity-check paranoia */
669 if (boot_cpu_data.wp_works_ok < 0)
670 test_wp_bit();
672 cpa_init();
675 * Subtle. SMP is doing it's boot stuff late (because it has to
676 * fork idle threads) - but it also needs low mappings for the
677 * protected-mode entry to work. We zap these entries only after
678 * the WP-bit has been tested.
680 #ifndef CONFIG_SMP
681 zap_low_mappings();
682 #endif
685 #ifdef CONFIG_MEMORY_HOTPLUG
686 int arch_add_memory(int nid, u64 start, u64 size)
688 struct pglist_data *pgdata = NODE_DATA(nid);
689 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
690 unsigned long start_pfn = start >> PAGE_SHIFT;
691 unsigned long nr_pages = size >> PAGE_SHIFT;
693 return __add_pages(zone, start_pfn, nr_pages);
695 #endif
698 * This function cannot be __init, since exceptions don't work in that
699 * section. Put this after the callers, so that it cannot be inlined.
701 static noinline int do_test_wp_bit(void)
703 char tmp_reg;
704 int flag;
706 __asm__ __volatile__(
707 " movb %0, %1 \n"
708 "1: movb %1, %0 \n"
709 " xorl %2, %2 \n"
710 "2: \n"
711 _ASM_EXTABLE(1b,2b)
712 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
713 "=q" (tmp_reg),
714 "=r" (flag)
715 :"2" (1)
716 :"memory");
718 return flag;
721 #ifdef CONFIG_DEBUG_RODATA
722 const int rodata_test_data = 0xC3;
723 EXPORT_SYMBOL_GPL(rodata_test_data);
725 void mark_rodata_ro(void)
727 unsigned long start = PFN_ALIGN(_text);
728 unsigned long size = PFN_ALIGN(_etext) - start;
730 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
731 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
732 size >> 10);
734 #ifdef CONFIG_CPA_DEBUG
735 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
736 start, start+size);
737 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
739 printk(KERN_INFO "Testing CPA: write protecting again\n");
740 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
741 #endif
742 start += size;
743 size = (unsigned long)__end_rodata - start;
744 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
745 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
746 size >> 10);
747 rodata_test();
749 #ifdef CONFIG_CPA_DEBUG
750 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
751 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
753 printk(KERN_INFO "Testing CPA: write protecting again\n");
754 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
755 #endif
757 #endif
759 void free_init_pages(char *what, unsigned long begin, unsigned long end)
761 #ifdef CONFIG_DEBUG_PAGEALLOC
763 * If debugging page accesses then do not free this memory but
764 * mark them not present - any buggy init-section access will
765 * create a kernel page fault:
767 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
768 begin, PAGE_ALIGN(end));
769 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
770 #else
771 unsigned long addr;
774 * We just marked the kernel text read only above, now that
775 * we are going to free part of that, we need to make that
776 * writeable first.
778 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
780 for (addr = begin; addr < end; addr += PAGE_SIZE) {
781 ClearPageReserved(virt_to_page(addr));
782 init_page_count(virt_to_page(addr));
783 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
784 free_page(addr);
785 totalram_pages++;
787 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
788 #endif
791 void free_initmem(void)
793 free_init_pages("unused kernel memory",
794 (unsigned long)(&__init_begin),
795 (unsigned long)(&__init_end));
798 #ifdef CONFIG_BLK_DEV_INITRD
799 void free_initrd_mem(unsigned long start, unsigned long end)
801 free_init_pages("initrd memory", start, end);
803 #endif