[PATCH] add __[start|end]_rodata sections to asm-generic/sections.h
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / mm / init.c
blobd14fb2dfbfc4a742b399882482a877b9c2db9428
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
42 #include <asm/smp.h>
43 #include <asm/sections.h>
45 #ifndef Dprintk
46 #define Dprintk(x...)
47 #endif
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
62 void show_mem(void)
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
66 pg_data_t *pgdat;
67 struct page *page;
69 printk(KERN_INFO "Mem-info:\n");
70 show_free_areas();
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
91 int after_bootmem;
93 static __init void *spp_getpage(void)
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
152 __flush_tlb_one(vaddr);
155 /* NOTE: this is meant to be run only at boot */
156 void __init
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
207 memset(adr, 0, PAGE_SIZE);
208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
213 static __meminit void unmap_low_page(int i)
215 struct temp_map *ti;
217 if (after_bootmem)
218 return;
220 ti = &temp_mappings[i];
221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
225 /* Must run before zap_low_mappings */
226 __init void *early_ioremap(unsigned long addr, unsigned long size)
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
232 printk("SMBIOS area too long %lu\n", size);
233 return NULL;
235 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
236 map += LARGE_PAGE_SIZE;
237 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 __flush_tlb();
239 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
242 /* To avoid virtual aliases later */
243 __init void early_iounmap(void *addr, unsigned long size)
245 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
246 printk("early_iounmap: bad address %p\n", addr);
247 set_pmd(temp_mappings[0].pmd, __pmd(0));
248 set_pmd(temp_mappings[1].pmd, __pmd(0));
249 __flush_tlb();
252 static void __meminit
253 phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
255 int i;
257 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
258 unsigned long entry;
260 if (address >= end) {
261 if (!after_bootmem)
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
264 break;
266 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
267 entry &= __supported_pte_mask;
268 set_pmd(pmd, __pmd(entry));
272 static void __meminit
273 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
275 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
277 if (pmd_none(*pmd)) {
278 spin_lock(&init_mm.page_table_lock);
279 phys_pmd_init(pmd, address, end);
280 spin_unlock(&init_mm.page_table_lock);
281 __flush_tlb_all();
285 static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
287 long i = pud_index(address);
289 pud = pud + i;
291 if (after_bootmem && pud_val(*pud)) {
292 phys_pmd_update(pud, address, end);
293 return;
296 for (; i < PTRS_PER_PUD; pud++, i++) {
297 int map;
298 unsigned long paddr, pmd_phys;
299 pmd_t *pmd;
301 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
302 if (paddr >= end)
303 break;
305 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
306 set_pud(pud, __pud(0));
307 continue;
310 pmd = alloc_low_page(&map, &pmd_phys);
311 spin_lock(&init_mm.page_table_lock);
312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313 phys_pmd_init(pmd, paddr, end);
314 spin_unlock(&init_mm.page_table_lock);
315 unmap_low_page(map);
317 __flush_tlb();
320 static void __init find_early_table_space(unsigned long end)
322 unsigned long puds, pmds, tables, start;
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
345 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
348 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
350 unsigned long next;
352 Dprintk("init_memory_mapping\n");
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
360 if (!after_bootmem)
361 find_early_table_space(end);
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
372 if (after_bootmem)
373 pud = pud_offset(pgd, start & PGDIR_MASK);
374 else
375 pud = alloc_low_page(&map, &pud_phys);
377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
383 unmap_low_page(map);
386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
388 __flush_tlb_all();
391 void __cpuinit zap_low_mappings(int cpu)
393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
403 __flush_tlb_all();
406 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
407 __init void
408 size_zones(unsigned long *z, unsigned long *h,
409 unsigned long start_pfn, unsigned long end_pfn)
411 int i;
412 unsigned long w;
414 for (i = 0; i < MAX_NR_ZONES; i++)
415 z[i] = 0;
417 if (start_pfn < MAX_DMA_PFN)
418 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
419 if (start_pfn < MAX_DMA32_PFN) {
420 unsigned long dma32_pfn = MAX_DMA32_PFN;
421 if (dma32_pfn > end_pfn)
422 dma32_pfn = end_pfn;
423 z[ZONE_DMA32] = dma32_pfn - start_pfn;
425 z[ZONE_NORMAL] = end_pfn - start_pfn;
427 /* Remove lower zones from higher ones. */
428 w = 0;
429 for (i = 0; i < MAX_NR_ZONES; i++) {
430 if (z[i])
431 z[i] -= w;
432 w += z[i];
435 /* Compute holes */
436 w = start_pfn;
437 for (i = 0; i < MAX_NR_ZONES; i++) {
438 unsigned long s = w;
439 w += z[i];
440 h[i] = e820_hole_size(s, w);
443 /* Add the space pace needed for mem_map to the holes too. */
444 for (i = 0; i < MAX_NR_ZONES; i++)
445 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
447 /* The 16MB DMA zone has the kernel and other misc mappings.
448 Account them too */
449 if (h[ZONE_DMA]) {
450 h[ZONE_DMA] += dma_reserve;
451 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
452 printk(KERN_WARNING
453 "Kernel too large and filling up ZONE_DMA?\n");
454 h[ZONE_DMA] = z[ZONE_DMA];
459 #ifndef CONFIG_NUMA
460 void __init paging_init(void)
462 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
464 memory_present(0, 0, end_pfn);
465 sparse_init();
466 size_zones(zones, holes, 0, end_pfn);
467 free_area_init_node(0, NODE_DATA(0), zones,
468 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
470 #endif
472 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
473 from the CPU leading to inconsistent cache lines. address and size
474 must be aligned to 2MB boundaries.
475 Does nothing when the mapping doesn't exist. */
476 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
478 unsigned long end = address + size;
480 BUG_ON(address & ~LARGE_PAGE_MASK);
481 BUG_ON(size & ~LARGE_PAGE_MASK);
483 for (; address < end; address += LARGE_PAGE_SIZE) {
484 pgd_t *pgd = pgd_offset_k(address);
485 pud_t *pud;
486 pmd_t *pmd;
487 if (pgd_none(*pgd))
488 continue;
489 pud = pud_offset(pgd, address);
490 if (pud_none(*pud))
491 continue;
492 pmd = pmd_offset(pud, address);
493 if (!pmd || pmd_none(*pmd))
494 continue;
495 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
496 /* Could handle this, but it should not happen currently. */
497 printk(KERN_ERR
498 "clear_kernel_mapping: mapping has been split. will leak memory\n");
499 pmd_ERROR(*pmd);
501 set_pmd(pmd, __pmd(0));
503 __flush_tlb_all();
507 * Memory hotplug specific functions
509 void online_page(struct page *page)
511 ClearPageReserved(page);
512 init_page_count(page);
513 __free_page(page);
514 totalram_pages++;
515 num_physpages++;
518 #ifdef CONFIG_MEMORY_HOTPLUG
520 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
521 * via probe interface of sysfs. If acpi notifies hot-add event, then it
522 * can tell node id by searching dsdt. But, probe interface doesn't have
523 * node id. So, return 0 as node id at this time.
525 #ifdef CONFIG_NUMA
526 int memory_add_physaddr_to_nid(u64 start)
528 return 0;
530 #endif
533 * Memory is added always to NORMAL zone. This means you will never get
534 * additional DMA/DMA32 memory.
536 int arch_add_memory(int nid, u64 start, u64 size)
538 struct pglist_data *pgdat = NODE_DATA(nid);
539 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
540 unsigned long start_pfn = start >> PAGE_SHIFT;
541 unsigned long nr_pages = size >> PAGE_SHIFT;
542 int ret;
544 ret = __add_pages(zone, start_pfn, nr_pages);
545 if (ret)
546 goto error;
548 init_memory_mapping(start, (start + size -1));
550 return ret;
551 error:
552 printk("%s: Problem encountered in __add_pages!\n", __func__);
553 return ret;
555 EXPORT_SYMBOL_GPL(arch_add_memory);
557 int remove_memory(u64 start, u64 size)
559 return -EINVAL;
561 EXPORT_SYMBOL_GPL(remove_memory);
563 #else /* CONFIG_MEMORY_HOTPLUG */
565 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
566 * just online the pages.
568 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
570 int err = -EIO;
571 unsigned long pfn;
572 unsigned long total = 0, mem = 0;
573 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
574 if (pfn_valid(pfn)) {
575 online_page(pfn_to_page(pfn));
576 err = 0;
577 mem++;
579 total++;
581 if (!err) {
582 z->spanned_pages += total;
583 z->present_pages += mem;
584 z->zone_pgdat->node_spanned_pages += total;
585 z->zone_pgdat->node_present_pages += mem;
587 return err;
589 #endif /* CONFIG_MEMORY_HOTPLUG */
591 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
592 kcore_vsyscall;
594 void __init mem_init(void)
596 long codesize, reservedpages, datasize, initsize;
598 pci_iommu_alloc();
600 /* How many end-of-memory variables you have, grandma! */
601 max_low_pfn = end_pfn;
602 max_pfn = end_pfn;
603 num_physpages = end_pfn;
604 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
606 /* clear the zero-page */
607 memset(empty_zero_page, 0, PAGE_SIZE);
609 reservedpages = 0;
611 /* this will put all low memory onto the freelists */
612 #ifdef CONFIG_NUMA
613 totalram_pages = numa_free_all_bootmem();
614 #else
615 totalram_pages = free_all_bootmem();
616 #endif
617 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
619 after_bootmem = 1;
621 codesize = (unsigned long) &_etext - (unsigned long) &_text;
622 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
623 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
625 /* Register memory areas for /proc/kcore */
626 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
627 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
628 VMALLOC_END-VMALLOC_START);
629 kclist_add(&kcore_kernel, &_stext, _end - _stext);
630 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
631 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
632 VSYSCALL_END - VSYSCALL_START);
634 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
635 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
636 end_pfn << (PAGE_SHIFT-10),
637 codesize >> 10,
638 reservedpages << (PAGE_SHIFT-10),
639 datasize >> 10,
640 initsize >> 10);
642 #ifdef CONFIG_SMP
644 * Sync boot_level4_pgt mappings with the init_level4_pgt
645 * except for the low identity mappings which are already zapped
646 * in init_level4_pgt. This sync-up is essential for AP's bringup
648 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
649 #endif
652 void free_init_pages(char *what, unsigned long begin, unsigned long end)
654 unsigned long addr;
656 if (begin >= end)
657 return;
659 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
660 for (addr = begin; addr < end; addr += PAGE_SIZE) {
661 ClearPageReserved(virt_to_page(addr));
662 init_page_count(virt_to_page(addr));
663 memset((void *)(addr & ~(PAGE_SIZE-1)),
664 POISON_FREE_INITMEM, PAGE_SIZE);
665 free_page(addr);
666 totalram_pages++;
670 void free_initmem(void)
672 memset(__initdata_begin, POISON_FREE_INITDATA,
673 __initdata_end - __initdata_begin);
674 free_init_pages("unused kernel memory",
675 (unsigned long)(&__init_begin),
676 (unsigned long)(&__init_end));
679 #ifdef CONFIG_DEBUG_RODATA
681 void mark_rodata_ro(void)
683 unsigned long addr = (unsigned long)__start_rodata;
685 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
686 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
688 printk ("Write protecting the kernel read-only data: %luk\n",
689 (__end_rodata - __start_rodata) >> 10);
692 * change_page_attr_addr() requires a global_flush_tlb() call after it.
693 * We do this after the printk so that if something went wrong in the
694 * change, the printk gets out at least to give a better debug hint
695 * of who is the culprit.
697 global_flush_tlb();
699 #endif
701 #ifdef CONFIG_BLK_DEV_INITRD
702 void free_initrd_mem(unsigned long start, unsigned long end)
704 free_init_pages("initrd memory", start, end);
706 #endif
708 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
710 /* Should check here against the e820 map to avoid double free */
711 #ifdef CONFIG_NUMA
712 int nid = phys_to_nid(phys);
713 reserve_bootmem_node(NODE_DATA(nid), phys, len);
714 #else
715 reserve_bootmem(phys, len);
716 #endif
717 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
718 dma_reserve += len / PAGE_SIZE;
721 int kern_addr_valid(unsigned long addr)
723 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
724 pgd_t *pgd;
725 pud_t *pud;
726 pmd_t *pmd;
727 pte_t *pte;
729 if (above != 0 && above != -1UL)
730 return 0;
732 pgd = pgd_offset_k(addr);
733 if (pgd_none(*pgd))
734 return 0;
736 pud = pud_offset(pgd, addr);
737 if (pud_none(*pud))
738 return 0;
740 pmd = pmd_offset(pud, addr);
741 if (pmd_none(*pmd))
742 return 0;
743 if (pmd_large(*pmd))
744 return pfn_valid(pmd_pfn(*pmd));
746 pte = pte_offset_kernel(pmd, addr);
747 if (pte_none(*pte))
748 return 0;
749 return pfn_valid(pte_pfn(*pte));
752 #ifdef CONFIG_SYSCTL
753 #include <linux/sysctl.h>
755 extern int exception_trace, page_fault_trace;
757 static ctl_table debug_table2[] = {
758 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
759 proc_dointvec },
760 { 0, }
763 static ctl_table debug_root_table2[] = {
764 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
765 .child = debug_table2 },
766 { 0 },
769 static __init int x8664_sysctl_init(void)
771 register_sysctl_table(debug_root_table2, 1);
772 return 0;
774 __initcall(x8664_sysctl_init);
775 #endif
777 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
778 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
779 not need special handling anymore. */
781 static struct vm_area_struct gate_vma = {
782 .vm_start = VSYSCALL_START,
783 .vm_end = VSYSCALL_END,
784 .vm_page_prot = PAGE_READONLY
787 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
789 #ifdef CONFIG_IA32_EMULATION
790 if (test_tsk_thread_flag(tsk, TIF_IA32))
791 return NULL;
792 #endif
793 return &gate_vma;
796 int in_gate_area(struct task_struct *task, unsigned long addr)
798 struct vm_area_struct *vma = get_gate_vma(task);
799 if (!vma)
800 return 0;
801 return (addr >= vma->vm_start) && (addr < vma->vm_end);
804 /* Use this when you have no reliable task/vma, typically from interrupt
805 * context. It is less reliable than using the task's vma and may give
806 * false positives.
808 int in_gate_area_no_task(unsigned long addr)
810 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);