memory hotplug x86_64: fix section mismatch in init_memory_mapping()
[linux-2.6/mini2440.git] / arch / x86 / mm / init_64.c
blob0f9c8c890658043773018ad32e8786953dae8c79
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/module.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/nmi.h>
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/dma.h>
38 #include <asm/fixmap.h>
39 #include <asm/e820.h>
40 #include <asm/apic.h>
41 #include <asm/tlb.h>
42 #include <asm/mmu_context.h>
43 #include <asm/proto.h>
44 #include <asm/smp.h>
45 #include <asm/sections.h>
47 #ifndef Dprintk
48 #define Dprintk(x...)
49 #endif
51 const struct dma_mapping_ops* dma_ops;
52 EXPORT_SYMBOL(dma_ops);
54 static unsigned long dma_reserve __initdata;
56 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
59 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
60 * physical space so we can cache the place of the first one and move
61 * around without checking the pgd every time.
64 void show_mem(void)
66 long i, total = 0, reserved = 0;
67 long shared = 0, cached = 0;
68 pg_data_t *pgdat;
69 struct page *page;
71 printk(KERN_INFO "Mem-info:\n");
72 show_free_areas();
73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
75 for_each_online_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 /* this loop can take a while with 256 GB and 4k pages
78 so update the NMI watchdog */
79 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) {
80 touch_nmi_watchdog();
82 if (!pfn_valid(pgdat->node_start_pfn + i))
83 continue;
84 page = pfn_to_page(pgdat->node_start_pfn + i);
85 total++;
86 if (PageReserved(page))
87 reserved++;
88 else if (PageSwapCache(page))
89 cached++;
90 else if (page_count(page))
91 shared += page_count(page) - 1;
94 printk(KERN_INFO "%lu pages of RAM\n", total);
95 printk(KERN_INFO "%lu reserved pages\n",reserved);
96 printk(KERN_INFO "%lu pages shared\n",shared);
97 printk(KERN_INFO "%lu pages swap cached\n",cached);
100 int after_bootmem;
102 static __init void *spp_getpage(void)
104 void *ptr;
105 if (after_bootmem)
106 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
107 else
108 ptr = alloc_bootmem_pages(PAGE_SIZE);
109 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
110 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
112 Dprintk("spp_getpage %p\n", ptr);
113 return ptr;
116 static __init void set_pte_phys(unsigned long vaddr,
117 unsigned long phys, pgprot_t prot)
119 pgd_t *pgd;
120 pud_t *pud;
121 pmd_t *pmd;
122 pte_t *pte, new_pte;
124 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
126 pgd = pgd_offset_k(vaddr);
127 if (pgd_none(*pgd)) {
128 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
129 return;
131 pud = pud_offset(pgd, vaddr);
132 if (pud_none(*pud)) {
133 pmd = (pmd_t *) spp_getpage();
134 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
135 if (pmd != pmd_offset(pud, 0)) {
136 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
137 return;
140 pmd = pmd_offset(pud, vaddr);
141 if (pmd_none(*pmd)) {
142 pte = (pte_t *) spp_getpage();
143 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
144 if (pte != pte_offset_kernel(pmd, 0)) {
145 printk("PAGETABLE BUG #02!\n");
146 return;
149 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
151 pte = pte_offset_kernel(pmd, vaddr);
152 if (!pte_none(*pte) &&
153 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
154 pte_ERROR(*pte);
155 set_pte(pte, new_pte);
158 * It's enough to flush this one mapping.
159 * (PGE mappings get flushed as well)
161 __flush_tlb_one(vaddr);
164 /* NOTE: this is meant to be run only at boot */
165 void __init
166 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
168 unsigned long address = __fix_to_virt(idx);
170 if (idx >= __end_of_fixed_addresses) {
171 printk("Invalid __set_fixmap\n");
172 return;
174 set_pte_phys(address, phys, prot);
177 unsigned long __meminitdata table_start, table_end;
179 static __meminit void *alloc_low_page(unsigned long *phys)
181 unsigned long pfn = table_end++;
182 void *adr;
184 if (after_bootmem) {
185 adr = (void *)get_zeroed_page(GFP_ATOMIC);
186 *phys = __pa(adr);
187 return adr;
190 if (pfn >= end_pfn)
191 panic("alloc_low_page: ran out of memory");
193 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
194 memset(adr, 0, PAGE_SIZE);
195 *phys = pfn * PAGE_SIZE;
196 return adr;
199 static __meminit void unmap_low_page(void *adr)
202 if (after_bootmem)
203 return;
205 early_iounmap(adr, PAGE_SIZE);
208 /* Must run before zap_low_mappings */
209 __meminit void *early_ioremap(unsigned long addr, unsigned long size)
211 unsigned long vaddr;
212 pmd_t *pmd, *last_pmd;
213 int i, pmds;
215 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
216 vaddr = __START_KERNEL_map;
217 pmd = level2_kernel_pgt;
218 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
219 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
220 for (i = 0; i < pmds; i++) {
221 if (pmd_present(pmd[i]))
222 goto next;
224 vaddr += addr & ~PMD_MASK;
225 addr &= PMD_MASK;
226 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
227 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
228 __flush_tlb();
229 return (void *)vaddr;
230 next:
233 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
234 return NULL;
237 /* To avoid virtual aliases later */
238 __meminit void early_iounmap(void *addr, unsigned long size)
240 unsigned long vaddr;
241 pmd_t *pmd;
242 int i, pmds;
244 vaddr = (unsigned long)addr;
245 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
246 pmd = level2_kernel_pgt + pmd_index(vaddr);
247 for (i = 0; i < pmds; i++)
248 pmd_clear(pmd + i);
249 __flush_tlb();
252 static void __meminit
253 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
255 int i = pmd_index(address);
257 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
258 unsigned long entry;
259 pmd_t *pmd = pmd_page + pmd_index(address);
261 if (address >= end) {
262 if (!after_bootmem)
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
265 break;
268 if (pmd_val(*pmd))
269 continue;
271 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
272 entry &= __supported_pte_mask;
273 set_pmd(pmd, __pmd(entry));
277 static void __meminit
278 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
280 pmd_t *pmd = pmd_offset(pud,0);
281 spin_lock(&init_mm.page_table_lock);
282 phys_pmd_init(pmd, address, end);
283 spin_unlock(&init_mm.page_table_lock);
284 __flush_tlb_all();
287 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
289 int i = pud_index(addr);
292 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
293 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr);
295 pmd_t *pmd;
297 if (addr >= end)
298 break;
300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
301 set_pud(pud, __pud(0));
302 continue;
305 if (pud_val(*pud)) {
306 phys_pmd_update(pud, addr, end);
307 continue;
310 pmd = alloc_low_page(&pmd_phys);
311 spin_lock(&init_mm.page_table_lock);
312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313 phys_pmd_init(pmd, addr, end);
314 spin_unlock(&init_mm.page_table_lock);
315 unmap_low_page(pmd);
317 __flush_tlb();
320 static void __init find_early_table_space(unsigned long end)
322 unsigned long puds, pmds, tables, start;
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
345 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
348 void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
350 unsigned long next;
352 Dprintk("init_memory_mapping\n");
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
360 if (!after_bootmem)
361 find_early_table_space(end);
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
366 for (; start < end; start = next) {
367 unsigned long pud_phys;
368 pgd_t *pgd = pgd_offset_k(start);
369 pud_t *pud;
371 if (after_bootmem)
372 pud = pud_offset(pgd, start & PGDIR_MASK);
373 else
374 pud = alloc_low_page(&pud_phys);
376 next = start + PGDIR_SIZE;
377 if (next > end)
378 next = end;
379 phys_pud_init(pud, __pa(start), __pa(next));
380 if (!after_bootmem)
381 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
382 unmap_low_page(pud);
385 if (!after_bootmem)
386 mmu_cr4_features = read_cr4();
387 __flush_tlb_all();
390 #ifndef CONFIG_NUMA
391 void __init paging_init(void)
393 unsigned long max_zone_pfns[MAX_NR_ZONES];
394 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
395 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
396 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
397 max_zone_pfns[ZONE_NORMAL] = end_pfn;
399 memory_present(0, 0, end_pfn);
400 sparse_init();
401 free_area_init_nodes(max_zone_pfns);
403 #endif
405 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
406 from the CPU leading to inconsistent cache lines. address and size
407 must be aligned to 2MB boundaries.
408 Does nothing when the mapping doesn't exist. */
409 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
411 unsigned long end = address + size;
413 BUG_ON(address & ~LARGE_PAGE_MASK);
414 BUG_ON(size & ~LARGE_PAGE_MASK);
416 for (; address < end; address += LARGE_PAGE_SIZE) {
417 pgd_t *pgd = pgd_offset_k(address);
418 pud_t *pud;
419 pmd_t *pmd;
420 if (pgd_none(*pgd))
421 continue;
422 pud = pud_offset(pgd, address);
423 if (pud_none(*pud))
424 continue;
425 pmd = pmd_offset(pud, address);
426 if (!pmd || pmd_none(*pmd))
427 continue;
428 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
429 /* Could handle this, but it should not happen currently. */
430 printk(KERN_ERR
431 "clear_kernel_mapping: mapping has been split. will leak memory\n");
432 pmd_ERROR(*pmd);
434 set_pmd(pmd, __pmd(0));
436 __flush_tlb_all();
440 * Memory hotplug specific functions
442 void online_page(struct page *page)
444 ClearPageReserved(page);
445 init_page_count(page);
446 __free_page(page);
447 totalram_pages++;
448 num_physpages++;
451 #ifdef CONFIG_MEMORY_HOTPLUG
453 * Memory is added always to NORMAL zone. This means you will never get
454 * additional DMA/DMA32 memory.
456 int arch_add_memory(int nid, u64 start, u64 size)
458 struct pglist_data *pgdat = NODE_DATA(nid);
459 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
460 unsigned long start_pfn = start >> PAGE_SHIFT;
461 unsigned long nr_pages = size >> PAGE_SHIFT;
462 int ret;
464 init_memory_mapping(start, (start + size -1));
466 ret = __add_pages(zone, start_pfn, nr_pages);
467 if (ret)
468 goto error;
470 return ret;
471 error:
472 printk("%s: Problem encountered in __add_pages!\n", __func__);
473 return ret;
475 EXPORT_SYMBOL_GPL(arch_add_memory);
477 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
478 int memory_add_physaddr_to_nid(u64 start)
480 return 0;
482 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
483 #endif
485 #endif /* CONFIG_MEMORY_HOTPLUG */
487 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
489 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
490 * just online the pages.
492 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
494 int err = -EIO;
495 unsigned long pfn;
496 unsigned long total = 0, mem = 0;
497 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
498 if (pfn_valid(pfn)) {
499 online_page(pfn_to_page(pfn));
500 err = 0;
501 mem++;
503 total++;
505 if (!err) {
506 z->spanned_pages += total;
507 z->present_pages += mem;
508 z->zone_pgdat->node_spanned_pages += total;
509 z->zone_pgdat->node_present_pages += mem;
511 return err;
513 #endif
515 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
516 kcore_vsyscall;
518 void __init mem_init(void)
520 long codesize, reservedpages, datasize, initsize;
522 pci_iommu_alloc();
524 /* clear the zero-page */
525 memset(empty_zero_page, 0, PAGE_SIZE);
527 reservedpages = 0;
529 /* this will put all low memory onto the freelists */
530 #ifdef CONFIG_NUMA
531 totalram_pages = numa_free_all_bootmem();
532 #else
533 totalram_pages = free_all_bootmem();
534 #endif
535 reservedpages = end_pfn - totalram_pages -
536 absent_pages_in_range(0, end_pfn);
538 after_bootmem = 1;
540 codesize = (unsigned long) &_etext - (unsigned long) &_text;
541 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
542 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
544 /* Register memory areas for /proc/kcore */
545 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
546 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
547 VMALLOC_END-VMALLOC_START);
548 kclist_add(&kcore_kernel, &_stext, _end - _stext);
549 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
550 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
551 VSYSCALL_END - VSYSCALL_START);
553 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
554 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
555 end_pfn << (PAGE_SHIFT-10),
556 codesize >> 10,
557 reservedpages << (PAGE_SHIFT-10),
558 datasize >> 10,
559 initsize >> 10);
562 void free_init_pages(char *what, unsigned long begin, unsigned long end)
564 unsigned long addr;
566 if (begin >= end)
567 return;
569 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
570 for (addr = begin; addr < end; addr += PAGE_SIZE) {
571 ClearPageReserved(virt_to_page(addr));
572 init_page_count(virt_to_page(addr));
573 memset((void *)(addr & ~(PAGE_SIZE-1)),
574 POISON_FREE_INITMEM, PAGE_SIZE);
575 if (addr >= __START_KERNEL_map)
576 change_page_attr_addr(addr, 1, __pgprot(0));
577 free_page(addr);
578 totalram_pages++;
580 if (addr > __START_KERNEL_map)
581 global_flush_tlb();
584 void free_initmem(void)
586 free_init_pages("unused kernel memory",
587 (unsigned long)(&__init_begin),
588 (unsigned long)(&__init_end));
591 #ifdef CONFIG_DEBUG_RODATA
593 void mark_rodata_ro(void)
595 unsigned long start = (unsigned long)_stext, end;
597 #ifdef CONFIG_HOTPLUG_CPU
598 /* It must still be possible to apply SMP alternatives. */
599 if (num_possible_cpus() > 1)
600 start = (unsigned long)_etext;
601 #endif
603 #ifdef CONFIG_KPROBES
604 start = (unsigned long)__start_rodata;
605 #endif
607 end = (unsigned long)__end_rodata;
608 start = (start + PAGE_SIZE - 1) & PAGE_MASK;
609 end &= PAGE_MASK;
610 if (end <= start)
611 return;
613 change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
615 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
616 (end - start) >> 10);
619 * change_page_attr_addr() requires a global_flush_tlb() call after it.
620 * We do this after the printk so that if something went wrong in the
621 * change, the printk gets out at least to give a better debug hint
622 * of who is the culprit.
624 global_flush_tlb();
626 #endif
628 #ifdef CONFIG_BLK_DEV_INITRD
629 void free_initrd_mem(unsigned long start, unsigned long end)
631 free_init_pages("initrd memory", start, end);
633 #endif
635 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
637 #ifdef CONFIG_NUMA
638 int nid = phys_to_nid(phys);
639 #endif
640 unsigned long pfn = phys >> PAGE_SHIFT;
641 if (pfn >= end_pfn) {
642 /* This can happen with kdump kernels when accessing firmware
643 tables. */
644 if (pfn < end_pfn_map)
645 return;
646 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
647 phys, len);
648 return;
651 /* Should check here against the e820 map to avoid double free */
652 #ifdef CONFIG_NUMA
653 reserve_bootmem_node(NODE_DATA(nid), phys, len);
654 #else
655 reserve_bootmem(phys, len);
656 #endif
657 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
658 dma_reserve += len / PAGE_SIZE;
659 set_dma_reserve(dma_reserve);
663 int kern_addr_valid(unsigned long addr)
665 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
666 pgd_t *pgd;
667 pud_t *pud;
668 pmd_t *pmd;
669 pte_t *pte;
671 if (above != 0 && above != -1UL)
672 return 0;
674 pgd = pgd_offset_k(addr);
675 if (pgd_none(*pgd))
676 return 0;
678 pud = pud_offset(pgd, addr);
679 if (pud_none(*pud))
680 return 0;
682 pmd = pmd_offset(pud, addr);
683 if (pmd_none(*pmd))
684 return 0;
685 if (pmd_large(*pmd))
686 return pfn_valid(pmd_pfn(*pmd));
688 pte = pte_offset_kernel(pmd, addr);
689 if (pte_none(*pte))
690 return 0;
691 return pfn_valid(pte_pfn(*pte));
694 /* A pseudo VMA to allow ptrace access for the vsyscall page. This only
695 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
696 not need special handling anymore. */
698 static struct vm_area_struct gate_vma = {
699 .vm_start = VSYSCALL_START,
700 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
701 .vm_page_prot = PAGE_READONLY_EXEC,
702 .vm_flags = VM_READ | VM_EXEC
705 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
707 #ifdef CONFIG_IA32_EMULATION
708 if (test_tsk_thread_flag(tsk, TIF_IA32))
709 return NULL;
710 #endif
711 return &gate_vma;
714 int in_gate_area(struct task_struct *task, unsigned long addr)
716 struct vm_area_struct *vma = get_gate_vma(task);
717 if (!vma)
718 return 0;
719 return (addr >= vma->vm_start) && (addr < vma->vm_end);
722 /* Use this when you have no reliable task/vma, typically from interrupt
723 * context. It is less reliable than using the task's vma and may give
724 * false positives.
726 int in_gate_area_no_task(unsigned long addr)
728 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
731 const char *arch_vma_name(struct vm_area_struct *vma)
733 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
734 return "[vdso]";
735 if (vma == &gate_vma)
736 return "[vsyscall]";
737 return NULL;
740 #ifdef CONFIG_SPARSEMEM_VMEMMAP
742 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
744 int __meminit vmemmap_populate(struct page *start_page,
745 unsigned long size, int node)
747 unsigned long addr = (unsigned long)start_page;
748 unsigned long end = (unsigned long)(start_page + size);
749 unsigned long next;
750 pgd_t *pgd;
751 pud_t *pud;
752 pmd_t *pmd;
754 for (; addr < end; addr = next) {
755 next = pmd_addr_end(addr, end);
757 pgd = vmemmap_pgd_populate(addr, node);
758 if (!pgd)
759 return -ENOMEM;
760 pud = vmemmap_pud_populate(pgd, addr, node);
761 if (!pud)
762 return -ENOMEM;
764 pmd = pmd_offset(pud, addr);
765 if (pmd_none(*pmd)) {
766 pte_t entry;
767 void *p = vmemmap_alloc_block(PMD_SIZE, node);
768 if (!p)
769 return -ENOMEM;
771 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
772 mk_pte_huge(entry);
773 set_pmd(pmd, __pmd(pte_val(entry)));
775 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
776 addr, addr + PMD_SIZE - 1, p, node);
777 } else
778 vmemmap_verify((pte_t *)pmd, node, addr, next);
781 return 0;
783 #endif