initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / x86_64 / mm / init.c
blobe13acb6abee356a1e80e86c3274ef2d78ce28d76
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
26 #include <asm/processor.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/dma.h>
32 #include <asm/fixmap.h>
33 #include <asm/e820.h>
34 #include <asm/apic.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 #include <asm/proto.h>
38 #include <asm/smp.h>
40 #ifndef Dprintk
41 #define Dprintk(x...)
42 #endif
44 #ifdef CONFIG_GART_IOMMU
45 extern int swiotlb;
46 #endif
48 extern char _stext[];
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
54 * physical space so we can cache the place of the first one and move
55 * around without checking the pgd every time.
58 void show_mem(void)
60 int i, total = 0, reserved = 0;
61 int shared = 0, cached = 0;
62 pg_data_t *pgdat;
63 struct page *page;
65 printk("Mem-info:\n");
66 show_free_areas();
67 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
69 for_each_pgdat(pgdat) {
70 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
71 page = pgdat->node_mem_map + i;
72 total++;
73 if (PageReserved(page))
74 reserved++;
75 else if (PageSwapCache(page))
76 cached++;
77 else if (page_count(page))
78 shared += page_count(page) - 1;
81 printk("%d pages of RAM\n", total);
82 printk("%d reserved pages\n",reserved);
83 printk("%d pages shared\n",shared);
84 printk("%d pages swap cached\n",cached);
87 /* References to section boundaries */
89 extern char _text, _etext, _edata, __bss_start, _end[];
90 extern char __init_begin, __init_end;
92 int after_bootmem;
94 static void *spp_getpage(void)
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
108 static void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
111 pml4_t *level4;
112 pgd_t *pgd;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118 level4 = pml4_offset_k(vaddr);
119 if (pml4_none(*level4)) {
120 printk("PML4 FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
123 pgd = level3_offset_k(level4, vaddr);
124 if (pgd_none(*pgd)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pgd(pgd, __pgd(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pgd, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pgd,0));
129 return;
132 pmd = pmd_offset(pgd, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 /* NOTE: this is meant to be run only at boot */
157 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __init void *alloc_low_page(int *index, unsigned long *phys)
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
189 if (pfn >= end_pfn)
190 panic("alloc_low_page: ran out of memory");
191 for (i = 0; temp_mappings[i].allocated; i++) {
192 if (!temp_mappings[i].pmd)
193 panic("alloc_low_page: ran out of temp mappings");
195 ti = &temp_mappings[i];
196 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
197 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
198 ti->allocated = 1;
199 __flush_tlb();
200 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
201 *index = i;
202 *phys = pfn * PAGE_SIZE;
203 return adr;
206 static __init void unmap_low_page(int i)
208 struct temp_map *ti = &temp_mappings[i];
209 set_pmd(ti->pmd, __pmd(0));
210 ti->allocated = 0;
213 static void __init phys_pgd_init(pgd_t *pgd, unsigned long address, unsigned long end)
215 long i, j;
217 i = pgd_index(address);
218 pgd = pgd + i;
219 for (; i < PTRS_PER_PGD; pgd++, i++) {
220 int map;
221 unsigned long paddr, pmd_phys;
222 pmd_t *pmd;
224 paddr = (address & PML4_MASK) + i*PGDIR_SIZE;
225 if (paddr >= end) {
226 for (; i < PTRS_PER_PGD; i++, pgd++)
227 set_pgd(pgd, __pgd(0));
228 break;
231 if (!e820_mapped(paddr, paddr+PGDIR_SIZE, 0)) {
232 set_pgd(pgd, __pgd(0));
233 continue;
236 pmd = alloc_low_page(&map, &pmd_phys);
237 set_pgd(pgd, __pgd(pmd_phys | _KERNPG_TABLE));
238 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
239 unsigned long pe;
241 if (paddr >= end) {
242 for (; j < PTRS_PER_PMD; j++, pmd++)
243 set_pmd(pmd, __pmd(0));
244 break;
246 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
247 pe &= __supported_pte_mask;
248 set_pmd(pmd, __pmd(pe));
250 unmap_low_page(map);
252 __flush_tlb();
255 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
256 This runs before bootmem is initialized and gets pages directly from the
257 physical memory. To access them they are temporarily mapped. */
258 void __init init_memory_mapping(void)
260 unsigned long adr;
261 unsigned long end;
262 unsigned long next;
263 unsigned long pgds, pmds, tables;
265 Dprintk("init_memory_mapping\n");
267 end = end_pfn_map << PAGE_SHIFT;
270 * Find space for the kernel direct mapping tables.
271 * Later we should allocate these tables in the local node of the memory
272 * mapped. Unfortunately this is done currently before the nodes are
273 * discovered.
276 pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;
277 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
278 tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE);
280 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
281 if (table_start == -1UL)
282 panic("Cannot find space for the kernel page tables");
284 table_start >>= PAGE_SHIFT;
285 table_end = table_start;
287 end += __PAGE_OFFSET; /* turn virtual */
289 for (adr = PAGE_OFFSET; adr < end; adr = next) {
290 int map;
291 unsigned long pgd_phys;
292 pgd_t *pgd = alloc_low_page(&map, &pgd_phys);
293 next = adr + PML4_SIZE;
294 if (next > end)
295 next = end;
296 phys_pgd_init(pgd, adr-PAGE_OFFSET, next-PAGE_OFFSET);
297 set_pml4(init_level4_pgt + pml4_index(adr), mk_kernel_pml4(pgd_phys));
298 unmap_low_page(map);
300 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
301 __flush_tlb_all();
302 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
303 table_start<<PAGE_SHIFT,
304 table_end<<PAGE_SHIFT);
307 extern struct x8664_pda cpu_pda[NR_CPUS];
309 static unsigned long low_pml4[NR_CPUS];
311 void swap_low_mappings(void)
313 int i;
314 for (i = 0; i < NR_CPUS; i++) {
315 unsigned long t;
316 if (!cpu_pda[i].level4_pgt)
317 continue;
318 t = cpu_pda[i].level4_pgt[0];
319 cpu_pda[i].level4_pgt[0] = low_pml4[i];
320 low_pml4[i] = t;
322 flush_tlb_all();
325 void zap_low_mappings(void)
327 swap_low_mappings();
330 #ifndef CONFIG_DISCONTIGMEM
331 void __init paging_init(void)
334 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
335 unsigned int max_dma;
337 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
339 if (end_pfn < max_dma)
340 zones_size[ZONE_DMA] = end_pfn;
341 else {
342 zones_size[ZONE_DMA] = max_dma;
343 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
345 free_area_init(zones_size);
347 return;
349 #endif
351 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
352 from the CPU leading to inconsistent cache lines. address and size
353 must be aligned to 2MB boundaries.
354 Does nothing when the mapping doesn't exist. */
355 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
357 unsigned long end = address + size;
359 BUG_ON(address & ~LARGE_PAGE_MASK);
360 BUG_ON(size & ~LARGE_PAGE_MASK);
362 for (; address < end; address += LARGE_PAGE_SIZE) {
363 pgd_t *pgd = pgd_offset_k(address);
364 pmd_t *pmd;
365 if (!pgd || pgd_none(*pgd))
366 continue;
367 pmd = pmd_offset(pgd, address);
368 if (!pmd || pmd_none(*pmd))
369 continue;
370 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
371 /* Could handle this, but it should not happen currently. */
372 printk(KERN_ERR
373 "clear_kernel_mapping: mapping has been split. will leak memory\n");
374 pmd_ERROR(*pmd);
376 set_pmd(pmd, __pmd(0));
378 __flush_tlb_all();
381 static inline int page_is_ram (unsigned long pagenr)
383 int i;
385 for (i = 0; i < e820.nr_map; i++) {
386 unsigned long addr, end;
388 if (e820.map[i].type != E820_RAM) /* not usable memory */
389 continue;
391 * !!!FIXME!!! Some BIOSen report areas as RAM that
392 * are not. Notably the 640->1Mb area. We need a sanity
393 * check here.
395 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
396 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
397 if ((pagenr >= addr) && (pagenr < end))
398 return 1;
400 return 0;
403 extern int swiotlb_force;
405 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
406 kcore_vsyscall;
408 void __init mem_init(void)
410 int codesize, reservedpages, datasize, initsize;
411 int tmp;
413 #ifdef CONFIG_SWIOTLB
414 if (swiotlb_force)
415 swiotlb = 1;
416 if (!iommu_aperture &&
417 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
418 swiotlb = 1;
419 if (swiotlb)
420 swiotlb_init();
421 #endif
423 /* How many end-of-memory variables you have, grandma! */
424 max_low_pfn = end_pfn;
425 max_pfn = end_pfn;
426 num_physpages = end_pfn;
427 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
429 /* clear the zero-page */
430 memset(empty_zero_page, 0, PAGE_SIZE);
432 reservedpages = 0;
434 /* this will put all low memory onto the freelists */
435 #ifdef CONFIG_DISCONTIGMEM
436 totalram_pages += numa_free_all_bootmem();
437 tmp = 0;
438 /* should count reserved pages here for all nodes */
439 #else
440 max_mapnr = end_pfn;
441 if (!mem_map) BUG();
443 totalram_pages += free_all_bootmem();
445 for (tmp = 0; tmp < end_pfn; tmp++)
447 * Only count reserved RAM pages
449 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
450 reservedpages++;
451 #endif
453 after_bootmem = 1;
455 codesize = (unsigned long) &_etext - (unsigned long) &_text;
456 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
457 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
459 /* Register memory areas for /proc/kcore */
460 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
461 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
462 VMALLOC_END-VMALLOC_START);
463 kclist_add(&kcore_kernel, &_stext, _end - _stext);
464 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
465 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
466 VSYSCALL_END - VSYSCALL_START);
468 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
469 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
470 end_pfn << (PAGE_SHIFT-10),
471 codesize >> 10,
472 reservedpages << (PAGE_SHIFT-10),
473 datasize >> 10,
474 initsize >> 10);
477 * Subtle. SMP is doing its boot stuff late (because it has to
478 * fork idle threads) - but it also needs low mappings for the
479 * protected-mode entry to work. We zap these entries only after
480 * the WP-bit has been tested.
482 #ifndef CONFIG_SMP
483 zap_low_mappings();
484 #endif
487 void free_initmem(void)
489 unsigned long addr;
491 addr = (unsigned long)(&__init_begin);
492 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
493 ClearPageReserved(virt_to_page(addr));
494 set_page_count(virt_to_page(addr), 1);
495 #ifdef CONFIG_INIT_DEBUG
496 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
497 #endif
498 free_page(addr);
499 totalram_pages++;
501 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
504 #ifdef CONFIG_BLK_DEV_INITRD
505 void free_initrd_mem(unsigned long start, unsigned long end)
507 if (start < (unsigned long)&_end)
508 return;
509 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
510 for (; start < end; start += PAGE_SIZE) {
511 ClearPageReserved(virt_to_page(start));
512 set_page_count(virt_to_page(start), 1);
513 free_page(start);
514 totalram_pages++;
517 #endif
519 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
521 /* Should check here against the e820 map to avoid double free */
522 #ifdef CONFIG_DISCONTIGMEM
523 int nid = phys_to_nid(phys);
524 reserve_bootmem_node(NODE_DATA(nid), phys, len);
525 #else
526 reserve_bootmem(phys, len);
527 #endif
530 int kern_addr_valid(unsigned long addr)
532 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
533 pml4_t *pml4;
534 pgd_t *pgd;
535 pmd_t *pmd;
536 pte_t *pte;
538 if (above != 0 && above != -1UL)
539 return 0;
541 pml4 = pml4_offset_k(addr);
542 if (pml4_none(*pml4))
543 return 0;
545 pgd = pgd_offset_k(addr);
546 if (pgd_none(*pgd))
547 return 0;
549 pmd = pmd_offset(pgd, addr);
550 if (pmd_none(*pmd))
551 return 0;
552 if (pmd_large(*pmd))
553 return pfn_valid(pmd_pfn(*pmd));
555 pte = pte_offset_kernel(pmd, addr);
556 if (pte_none(*pte))
557 return 0;
558 return pfn_valid(pte_pfn(*pte));
561 #ifdef CONFIG_SYSCTL
562 #include <linux/sysctl.h>
564 extern int exception_trace, page_fault_trace;
566 static ctl_table debug_table2[] = {
567 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
568 proc_dointvec },
569 #ifdef CONFIG_CHECKING
570 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
571 proc_dointvec },
572 #endif
573 { 0, }
576 static ctl_table debug_root_table2[] = {
577 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
578 .child = debug_table2 },
579 { 0 },
582 static __init int x8664_sysctl_init(void)
584 register_sysctl_table(debug_root_table2, 1);
585 return 0;
587 __initcall(x8664_sysctl_init);
588 #endif
590 /* Pseudo VMAs to allow ptrace access for the vsyscall pages. x86-64 has two
591 different ones: one for 32bit and one for 64bit. Use the appropiate
592 for the target task. */
594 static struct vm_area_struct gate_vma = {
595 .vm_start = VSYSCALL_START,
596 .vm_end = VSYSCALL_END,
597 .vm_page_prot = PAGE_READONLY
600 static struct vm_area_struct gate32_vma = {
601 .vm_start = VSYSCALL32_BASE,
602 .vm_end = VSYSCALL32_END,
603 .vm_page_prot = PAGE_READONLY
606 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
608 #ifdef CONFIG_IA32_EMULATION
609 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
610 /* lookup code assumes the pages are present. set them up
611 now */
612 if (__map_syscall32(tsk->mm, 0xfffe000) < 0)
613 return NULL;
614 return &gate32_vma;
616 #endif
617 return &gate_vma;
620 int in_gate_area(struct task_struct *task, unsigned long addr)
622 struct vm_area_struct *vma = get_gate_vma(task);
623 return (addr >= vma->vm_start) && (addr < vma->vm_end);