[PATCH] x86_64: Use function pointers to call DMA mapping functions
[linux-2.6/zen-sources.git] / arch / x86_64 / mm / init.c
blobe93867850a4f3dce9717ad8ecc79b70e660128f0
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/dma-mapping.h>
28 #include <asm/processor.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
33 #include <asm/dma.h>
34 #include <asm/fixmap.h>
35 #include <asm/e820.h>
36 #include <asm/apic.h>
37 #include <asm/tlb.h>
38 #include <asm/mmu_context.h>
39 #include <asm/proto.h>
40 #include <asm/smp.h>
41 #include <asm/sections.h>
42 #include <asm/dma-mapping.h>
43 #include <asm/swiotlb.h>
45 #ifndef Dprintk
46 #define Dprintk(x...)
47 #endif
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
62 void show_mem(void)
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
66 pg_data_t *pgdat;
67 struct page *page;
69 printk(KERN_INFO "Mem-info:\n");
70 show_free_areas();
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
91 /* References to section boundaries */
93 int after_bootmem;
95 static void *spp_getpage(void)
97 void *ptr;
98 if (after_bootmem)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 else
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
105 Dprintk("spp_getpage %p\n", ptr);
106 return ptr;
109 static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
112 pgd_t *pgd;
113 pud_t *pud;
114 pmd_t *pmd;
115 pte_t *pte, new_pte;
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 return;
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
130 return;
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
139 return;
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 pte_ERROR(*pte);
148 set_pte(pte, new_pte);
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
157 /* NOTE: this is meant to be run only at boot */
158 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
160 unsigned long address = __fix_to_virt(idx);
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
166 set_pte_phys(address, phys, prot);
169 unsigned long __initdata table_start, table_end;
171 extern pmd_t temp_boot_pmds[];
173 static struct temp_map {
174 pmd_t *pmd;
175 void *address;
176 int allocated;
177 } temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
183 static __init void *alloc_low_page(int *index, unsigned long *phys)
185 struct temp_map *ti;
186 int i;
187 unsigned long pfn = table_end++, paddr;
188 void *adr;
190 if (pfn >= end_pfn)
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
199 ti->allocated = 1;
200 __flush_tlb();
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
202 *index = i;
203 *phys = pfn * PAGE_SIZE;
204 return adr;
207 static __init void unmap_low_page(int i)
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
211 ti->allocated = 0;
214 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
216 long i, j;
218 i = pud_index(address);
219 pud = pud + i;
220 for (; i < PTRS_PER_PUD; pud++, i++) {
221 int map;
222 unsigned long paddr, pmd_phys;
223 pmd_t *pmd;
225 paddr = address + i*PUD_SIZE;
226 if (paddr >= end) {
227 for (; i < PTRS_PER_PUD; i++, pud++)
228 set_pud(pud, __pud(0));
229 break;
232 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
233 set_pud(pud, __pud(0));
234 continue;
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
240 unsigned long pe;
242 if (paddr >= end) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
245 break;
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
251 unmap_low_page(map);
253 __flush_tlb();
256 static void __init find_early_table_space(unsigned long end)
258 unsigned long puds, pmds, tables;
260 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
261 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
262 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
263 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
265 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
266 if (table_start == -1UL)
267 panic("Cannot find space for the kernel page tables");
269 table_start >>= PAGE_SHIFT;
270 table_end = table_start;
273 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
274 This runs before bootmem is initialized and gets pages directly from the
275 physical memory. To access them they are temporarily mapped. */
276 void __init init_memory_mapping(unsigned long start, unsigned long end)
278 unsigned long next;
280 Dprintk("init_memory_mapping\n");
283 * Find space for the kernel direct mapping tables.
284 * Later we should allocate these tables in the local node of the memory
285 * mapped. Unfortunately this is done currently before the nodes are
286 * discovered.
288 find_early_table_space(end);
290 start = (unsigned long)__va(start);
291 end = (unsigned long)__va(end);
293 for (; start < end; start = next) {
294 int map;
295 unsigned long pud_phys;
296 pud_t *pud = alloc_low_page(&map, &pud_phys);
297 next = start + PGDIR_SIZE;
298 if (next > end)
299 next = end;
300 phys_pud_init(pud, __pa(start), __pa(next));
301 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
302 unmap_low_page(map);
305 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
306 __flush_tlb_all();
307 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
308 table_start<<PAGE_SHIFT,
309 table_end<<PAGE_SHIFT);
312 void __cpuinit zap_low_mappings(int cpu)
314 if (cpu == 0) {
315 pgd_t *pgd = pgd_offset_k(0UL);
316 pgd_clear(pgd);
317 } else {
319 * For AP's, zap the low identity mappings by changing the cr3
320 * to init_level4_pgt and doing local flush tlb all
322 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
324 __flush_tlb_all();
327 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
328 __init void
329 size_zones(unsigned long *z, unsigned long *h,
330 unsigned long start_pfn, unsigned long end_pfn)
332 int i;
333 unsigned long w;
335 for (i = 0; i < MAX_NR_ZONES; i++)
336 z[i] = 0;
338 if (start_pfn < MAX_DMA_PFN)
339 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
340 if (start_pfn < MAX_DMA32_PFN) {
341 unsigned long dma32_pfn = MAX_DMA32_PFN;
342 if (dma32_pfn > end_pfn)
343 dma32_pfn = end_pfn;
344 z[ZONE_DMA32] = dma32_pfn - start_pfn;
346 z[ZONE_NORMAL] = end_pfn - start_pfn;
348 /* Remove lower zones from higher ones. */
349 w = 0;
350 for (i = 0; i < MAX_NR_ZONES; i++) {
351 if (z[i])
352 z[i] -= w;
353 w += z[i];
356 /* Compute holes */
357 w = start_pfn;
358 for (i = 0; i < MAX_NR_ZONES; i++) {
359 unsigned long s = w;
360 w += z[i];
361 h[i] = e820_hole_size(s, w);
364 /* Add the space pace needed for mem_map to the holes too. */
365 for (i = 0; i < MAX_NR_ZONES; i++)
366 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
368 /* The 16MB DMA zone has the kernel and other misc mappings.
369 Account them too */
370 if (h[ZONE_DMA]) {
371 h[ZONE_DMA] += dma_reserve;
372 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
373 printk(KERN_WARNING
374 "Kernel too large and filling up ZONE_DMA?\n");
375 h[ZONE_DMA] = z[ZONE_DMA];
380 #ifndef CONFIG_NUMA
381 void __init paging_init(void)
383 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
384 size_zones(zones, holes, 0, end_pfn);
385 free_area_init_node(0, NODE_DATA(0), zones,
386 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
388 #endif
390 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
391 from the CPU leading to inconsistent cache lines. address and size
392 must be aligned to 2MB boundaries.
393 Does nothing when the mapping doesn't exist. */
394 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
396 unsigned long end = address + size;
398 BUG_ON(address & ~LARGE_PAGE_MASK);
399 BUG_ON(size & ~LARGE_PAGE_MASK);
401 for (; address < end; address += LARGE_PAGE_SIZE) {
402 pgd_t *pgd = pgd_offset_k(address);
403 pud_t *pud;
404 pmd_t *pmd;
405 if (pgd_none(*pgd))
406 continue;
407 pud = pud_offset(pgd, address);
408 if (pud_none(*pud))
409 continue;
410 pmd = pmd_offset(pud, address);
411 if (!pmd || pmd_none(*pmd))
412 continue;
413 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
414 /* Could handle this, but it should not happen currently. */
415 printk(KERN_ERR
416 "clear_kernel_mapping: mapping has been split. will leak memory\n");
417 pmd_ERROR(*pmd);
419 set_pmd(pmd, __pmd(0));
421 __flush_tlb_all();
424 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
425 kcore_vsyscall;
427 void __init mem_init(void)
429 long codesize, reservedpages, datasize, initsize;
431 #ifdef CONFIG_SWIOTLB
432 pci_swiotlb_init();
433 #endif
434 no_iommu_init();
436 /* How many end-of-memory variables you have, grandma! */
437 max_low_pfn = end_pfn;
438 max_pfn = end_pfn;
439 num_physpages = end_pfn;
440 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
442 /* clear the zero-page */
443 memset(empty_zero_page, 0, PAGE_SIZE);
445 reservedpages = 0;
447 /* this will put all low memory onto the freelists */
448 #ifdef CONFIG_NUMA
449 totalram_pages = numa_free_all_bootmem();
450 #else
451 totalram_pages = free_all_bootmem();
452 #endif
453 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
455 after_bootmem = 1;
457 codesize = (unsigned long) &_etext - (unsigned long) &_text;
458 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
459 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
461 /* Register memory areas for /proc/kcore */
462 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
463 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
464 VMALLOC_END-VMALLOC_START);
465 kclist_add(&kcore_kernel, &_stext, _end - _stext);
466 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
467 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
468 VSYSCALL_END - VSYSCALL_START);
470 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
471 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
472 end_pfn << (PAGE_SHIFT-10),
473 codesize >> 10,
474 reservedpages << (PAGE_SHIFT-10),
475 datasize >> 10,
476 initsize >> 10);
478 #ifdef CONFIG_SMP
480 * Sync boot_level4_pgt mappings with the init_level4_pgt
481 * except for the low identity mappings which are already zapped
482 * in init_level4_pgt. This sync-up is essential for AP's bringup
484 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
485 #endif
488 void free_initmem(void)
490 unsigned long addr;
492 addr = (unsigned long)(&__init_begin);
493 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
494 ClearPageReserved(virt_to_page(addr));
495 set_page_count(virt_to_page(addr), 1);
496 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
497 free_page(addr);
498 totalram_pages++;
500 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
501 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
504 #ifdef CONFIG_DEBUG_RODATA
506 extern char __start_rodata, __end_rodata;
507 void mark_rodata_ro(void)
509 unsigned long addr = (unsigned long)&__start_rodata;
511 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
512 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
514 printk ("Write protecting the kernel read-only data: %luk\n",
515 (&__end_rodata - &__start_rodata) >> 10);
518 * change_page_attr_addr() requires a global_flush_tlb() call after it.
519 * We do this after the printk so that if something went wrong in the
520 * change, the printk gets out at least to give a better debug hint
521 * of who is the culprit.
523 global_flush_tlb();
525 #endif
527 #ifdef CONFIG_BLK_DEV_INITRD
528 void free_initrd_mem(unsigned long start, unsigned long end)
530 if (start < (unsigned long)&_end)
531 return;
532 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
533 for (; start < end; start += PAGE_SIZE) {
534 ClearPageReserved(virt_to_page(start));
535 set_page_count(virt_to_page(start), 1);
536 free_page(start);
537 totalram_pages++;
540 #endif
542 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
544 /* Should check here against the e820 map to avoid double free */
545 #ifdef CONFIG_NUMA
546 int nid = phys_to_nid(phys);
547 reserve_bootmem_node(NODE_DATA(nid), phys, len);
548 #else
549 reserve_bootmem(phys, len);
550 #endif
551 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
552 dma_reserve += len / PAGE_SIZE;
555 int kern_addr_valid(unsigned long addr)
557 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
558 pgd_t *pgd;
559 pud_t *pud;
560 pmd_t *pmd;
561 pte_t *pte;
563 if (above != 0 && above != -1UL)
564 return 0;
566 pgd = pgd_offset_k(addr);
567 if (pgd_none(*pgd))
568 return 0;
570 pud = pud_offset(pgd, addr);
571 if (pud_none(*pud))
572 return 0;
574 pmd = pmd_offset(pud, addr);
575 if (pmd_none(*pmd))
576 return 0;
577 if (pmd_large(*pmd))
578 return pfn_valid(pmd_pfn(*pmd));
580 pte = pte_offset_kernel(pmd, addr);
581 if (pte_none(*pte))
582 return 0;
583 return pfn_valid(pte_pfn(*pte));
586 #ifdef CONFIG_SYSCTL
587 #include <linux/sysctl.h>
589 extern int exception_trace, page_fault_trace;
591 static ctl_table debug_table2[] = {
592 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
593 proc_dointvec },
594 { 0, }
597 static ctl_table debug_root_table2[] = {
598 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
599 .child = debug_table2 },
600 { 0 },
603 static __init int x8664_sysctl_init(void)
605 register_sysctl_table(debug_root_table2, 1);
606 return 0;
608 __initcall(x8664_sysctl_init);
609 #endif
611 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
612 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
613 not need special handling anymore. */
615 static struct vm_area_struct gate_vma = {
616 .vm_start = VSYSCALL_START,
617 .vm_end = VSYSCALL_END,
618 .vm_page_prot = PAGE_READONLY
621 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
623 #ifdef CONFIG_IA32_EMULATION
624 if (test_tsk_thread_flag(tsk, TIF_IA32))
625 return NULL;
626 #endif
627 return &gate_vma;
630 int in_gate_area(struct task_struct *task, unsigned long addr)
632 struct vm_area_struct *vma = get_gate_vma(task);
633 if (!vma)
634 return 0;
635 return (addr >= vma->vm_start) && (addr < vma->vm_end);
638 /* Use this when you have no reliable task/vma, typically from interrupt
639 * context. It is less reliable than using the task's vma and may give
640 * false positives.
642 int in_gate_area_no_task(unsigned long addr)
644 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);